Callback event for getUserMedia() - javascript

I'm trying to take a snapshot from my webcam with the navigator.mediaDevices.getUserMedia() and canvas.getContext('2d').drawImage() functions.
When I do it like this, it works perfectly:
function init(){
myVideo = document.getElementById("myVideo")
myCanvas = document.getElementById("myCanvas");
videoWidth = myCanvas.width;
videoHeight = myCanvas.height;
startVideoStream();
}
function startVideoStream(){
navigator.mediaDevices.getUserMedia({audio: false, video: { width: videoWidth, height: videoHeight }}).then(function(stream) {
myVideo.src = URL.createObjectURL(stream);
}).catch(function(err) {
console.log("Unable to get video stream: " + err);
});
}
function snapshot(){
myCanvas.getContext('2d').drawImage(myVideo, 0, 0, videoWidth, videoHeight);
}
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script src="debug.js"></script>
</head>
<body onload="init()">
<div id="mainContainer">
<video id="myVideo" width="640" height="480" autoplay style="display: inline;"></video>
<canvas id="myCanvas" width="640" height="480" style="display: inline;"></canvas>
<input type="button" id="snapshotButton" value="Snapshot" onclick="snapshot()"/>
</div>
</body>
</html>
The thing is, I don't want to use a button click for taking the snapshot, but take the snapshot as soon as the the camera stream is loaded.
I tried calling the snapshot() function directly after setting the video source:
function init(){
myVideo = document.getElementById("myVideo")
myCanvas = document.getElementById("myCanvas");
videoWidth = myCanvas.width;
videoHeight = myCanvas.height;
startVideoStream();
}
function startVideoStream(){
navigator.mediaDevices.getUserMedia({audio: false, video: { width: videoWidth, height: videoHeight }}).then(function(stream) {
myVideo.src = URL.createObjectURL(stream);
snapshot();
}).catch(function(err) {
console.log("Unable to get video stream: " + err);
});
}
function snapshot(){
myCanvas.getContext('2d').drawImage(myVideo, 0, 0, videoWidth, videoHeight);
}
But it doesn't work. My canvas stays white. I guess it's because the the camera stream is not fully loaded at this point.
So is there any other event getting fired, which I could use for drawing the snapshot as soon as the camera feed is loaded? Or am I totally on the wrong track?
Thanks in advance!

Wait for the loadedmetadata event:
navigator.mediaDevices.getUserMedia({video: true})
.then(stream => {
video.srcObject = stream;
return new Promise(resolve => video.onloadedmetadata = resolve);
})
.then(() => canvas.getContext('2d').drawImage(video, 0, 0, 160, 120))
.catch(e => console.log(e));
<video id="video" width="160" height="120" autoplay></video>
<canvas id="canvas" width="160" height="120"></canvas>
The above should work in all browsers (that do WebRTC).
In Chrome you can also do this - but play() doesn't return a promise in any other browser yet.
Also note that URL.createObjectURL(stream) is deprecated. Use srcObject.
Update: Thanks to #KyleMcDonald in comments for pointing out the importance of registering the loadedmetadata listener synchronously with setting the srcObject!—Code updated.

Related

HTML canvas not producing image from web cam

I want to capture image every time socket.on('takePic') gets triggered. It works fine for the first time. but when the socket.on('takePic') gets triggered second time,the canvas div is just blank and so the img tag.
I have copied the takePicture() function from somewhere and added the setInterval() and localstream variable to stop camera.
how can I fix this?
here is my js code.
const socket = io('http://localhost:3001');
const params = new Proxy(new URLSearchParams(window.location.search), {
get: (searchParams, prop) => searchParams.get(prop),
});
let roomId = params.roomId;
socket.emit('joinRoom',roomId);
var takePicture = function () {
// The width and height of the captured photo. We will set the
// width to the value defined here, but the height will be
// calculated based on the aspect ratio of the input stream.
var width = 320; // We will scale the photo width to this
var height = 0; // This will be computed based on the input stream
// |streaming| indicates whether or not we're currently streaming
// video from the camera. Obviously, we start at false.
var streaming = false;
var localstream;
// The various HTML elements we need to configure or control. These
// will be set by the startup() function.
var video = null;
var canvas = null;
var photo = null;
var startbutton = null;
function showViewLiveResultButton() {
if (window.self !== window.top) {
// Ensure that if our document is in a frame, we get the user
// to first open it in its own tab or window. Otherwise, it
// won't be able to request permission for camera access.
document.querySelector(".contentarea").remove();
const button = document.createElement("button");
button.textContent = "View live result of the example code above";
document.body.append(button);
button.addEventListener('click', () => window.open(location.href));
return true;
}
return false;
}
function startup() {
if (showViewLiveResultButton()) { return; }
video = document.getElementById('video');
canvas = document.getElementById('canvas');
photo = document.getElementById('photo');
startbutton = document.getElementById('startbutton');
navigator.mediaDevices.getUserMedia({video: true, audio: false})
.then(function(stream) {
video.srcObject = stream;
localstream = stream;
video.play();
})
.catch(function(err) {
console.log("An error occurred: " + err);
});
video.addEventListener('canplay', function(ev){
if (!streaming) {
height = video.videoHeight / (video.videoWidth/width);
// Firefox currently has a bug where the height can't be read from
// the video, so we will make assumptions if this happens.
if (isNaN(height)) {
height = width / (4/3);
}
video.setAttribute('width', width);
video.setAttribute('height', height);
canvas.setAttribute('width', width);
canvas.setAttribute('height', height);
streaming = true;
}
}, false);
startbutton.addEventListener('click', function(ev){
takepicture();
ev.preventDefault();
clearInterval(picInterval);
$('#heading').css('display','none')
video.pause();
video.src = "";
localstream.getTracks()[0].stop();
}, false);
clearphoto();
}
// Fill the photo with an indication that none has been
// captured.
function clearphoto() {
var context = canvas.getContext('2d');
context.fillStyle = "#AAA";
context.fillRect(0, 0, canvas.width, canvas.height);
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data);
}
// Capture a photo by fetching the current contents of the video
// and drawing it into a canvas, then converting that to a PNG
// format data URL. By drawing it on an offscreen canvas and then
// drawing that to the screen, we can change its size and/or apply
// other changes before drawing it.
function takepicture() {
var context = canvas.getContext('2d');
if (width && height) {
canvas.width = width;
canvas.height = height;
context.drawImage(video, 0, 0, width, height);
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data);
} else {
clearphoto();
}
}
// Set up our event listener to run the startup process
// once loading is complete.
startup();
var i = 10;
let picInterval = setInterval(()=>{
i -= 1;
$('#heading').html(`taking picture in ${i}`);
if(i==0){
$('#startbutton').click()
}
}, 1000)
}
socket.on('takePic',()=>{
takePicture()
})
and this is the html code
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>StudentVideo</title>
<link rel="stylesheet" href="css/studentVideo.css">
</head>
<body>
<div class="contentarea">
<h1 id="heading">
</h1>
<p>
This example demonstrates how to set up a media stream using your built-in webcam, fetch an image from that stream, and create a PNG using that image.
</p>
<div class="camera">
<video id="video" width="320" height="240">Video stream not available.</video>
<button id="startbutton">Take photo</button>
</div>
<canvas id="canvas" width="320" height="240">
</canvas>
<div class="output">
<img id="photo" alt="The screen capture will appear in this box." src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASwAAACWCAYAAABkW7XSAAAAAXNSR0IArs4c6QAABGlJREFUeF7t1AEJADAMA8HVv5Oa3GAuHq4KwqVkdvceR4AAgYDAGKxASyISIPAFDJZHIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRB46/vA5AUJNVYAAAAASUVORK5CYII=">
</div>
<p>
Visit our article Taking still photos with WebRTC to learn more about the technologies used here.
</p>
</div>
</body>
<script src="https://cdn.socket.io/socket.io-3.0.1.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js"></script>
<script src="js/video.js"></script>
</html>
In the click handler of #startbutton your code calls takepicture and then it goes on to remove the video's src and stop the MediaStream.
So the next time this handler is called, there is no source affected to the video element and thus nothing to be drawn on the canvas anymore.
It's quire unclear why you clear the video in this click handler, so you might want to remove this part of the code, or to move it to a "stop" button instead, but anyway, you would probably be better calling takepicture from your interval directly rather than relying on the event handler.

Flutter Web Plugin for HTML and JS

I couldn't find plugin html and js for Flutter Web. My purpose is open camera and capture a picture. But it is working on Flutter Web. I wrote html and js camera codes. But it can't working on Flutter Web app. . I tried flutter_html plugin. But has just a few meaningless line in Chrome Web Emulator. Please help me or suggestion to me other plugins.
import "package:flutter/material.dart";
import 'package:flutter_html/flutter_html.dart';
class kamera extends StatefulWidget {
#override
_kameraState createState() => _kameraState();
}
class _kameraState extends State<kamera> {
#override
Widget build(BuildContext context) {
return Html(data:
"""
<video id="video" width="640" height="480" autoplay></video><br>
<button id="snap">Snap Photo</button><br>
<canvas id="canvas" width="640" height="480"></canvas><br>
<script>
var video = document.getElementById('video');
// Get access to the camera!
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// Not adding `{ audio: true }` since we only want video now
navigator.mediaDevices.getUserMedia({ video: true }).then(function(stream) {
//video.src = window.URL.createObjectURL(stream);
video.srcObject = stream;
video.play();
});
}
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var video = document.getElementById('video');
// Trigger photo take
document.getElementById("snap").addEventListener("click", function() {
context.drawImage(video, 0, 0, 640, 480);
});
</script>
""");
}
}

Clmtrackr cannot work with, responsive html5 camera video

I use clmtrackr to track my face, but when I set the width of video tag equal to 100%, it cannot track the face. And I try to set the width of canvas tag as same as the width of video tag, but it still cannot work.
<div style="text-align:center;">
<div style="border: 1px solid #ccc; display:inline-block; position:relative;">
<video id="inputVideo" width="100%" height="auto" autoplay></video>
<canvas id="canvas" width="100%" height="auto" style="position:absolute; top:0; left:0;"></canvas>
</div>
</div>
<script src="clmtrackr.min.js"></script>
<script type="text/javascript">
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function() {
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("inputVideo"),
videoObj = { "video": true },
videoFace = document.getElementById("video-face"),
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// Not adding `{ audio: true }` since we only want video now
navigator.mediaDevices.getUserMedia({ video: true }).then(function(stream) {
video.src = window.URL.createObjectURL(stream);
video.play();
});
} else if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
}
var ctracker = new clm.tracker();
ctracker.init();
ctracker.start(video);
function positionLoop() {
requestAnimationFrame(positionLoop);
var positions = ctracker.getCurrentPosition();
}
positionLoop();
function drawLoop() {
requestAnimationFrame(drawLoop);
context.clearRect(0, 0, canvas.width, canvas.height);
ctracker.draw(canvas);
}
drawLoop();
});
</script>
Why it cannot work track the face unless set the video's width equal to 640, and video's height equal to 480?
How to fix it with responsive width='100%' and height='auto'?
Your webcam resolution should be 640x480 and you're trying to take a frame with higher resolution (width='100%'). I had the same problem and fixed it by reducing the width-height attributes of the video element.

Count number of faces using javascript

Is there a way that I can count the number of faces in a live camera.
For example I've three persons in front of a webcam and I'm having 2 pages say, Success.html and error .html and as part of the underlying way to judge the redirection to Success or error pages the condition would be, if only one face is detected, it should be sent to success, else, it should be sent to error .
I'm using tracking.js to detect the face in my web page and currently my code is as below.
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face with camera</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="js/tracking-min.js"></script>
<script src="js/data/face-min.js"></script>
<script src="js/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
video, canvas {
margin-left: 230px;
margin-top: 120px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p>
tracking.js -
get user's webcam and detect faces
</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="320" height="240" preload autoplay loop
muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.ObjectTracker('face');
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#video', tracker, {
camera : true
});
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data
.forEach(function(rect) {
console.log('face detected');
});
});
</script>
</body>
</html>
Here in my console I'm able to print, face detected.
please let me know how can I detect multiples faces in this window.
Thanks
From the documentation:
myTracker.on('track', function(event) {
if (event.data.length === 0) {
// No targets were detected in this frame.
} else {
event.data.forEach(function(data) {
// Plots the detected targets here.
});
}
});
It seems to be event.data.length that gives you the number of tracked elements.
Fom the docs at https://trackingjs.com/docs.html#trackers
myTracker.on('track', function(event) {
if (event.data.length === 0) {
// No targets were detected in this frame.
} else {
event.data.forEach(function(data) {
// Plots the detected targets here.
});
}
});
Use event.data.length to count the amount of faces.
On your piece of code :
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
// console log amount of elements found
console.log(event.data.length);
event.data.forEach(function(rect) {
console.log('face detected');
});
});

Javascript Camera Snapshot stretched too large

I am trying to take a snapshot from someone's webcam through javascript. The code works except the resultant image is stretched way too much to me readable.
I've messed around with the: ctx.drawImage(video, 0, 0);line
I've tried ctx.drawImage(video, 0, 0,1280,720); with no difference
I've tried ctx.drawImage(video, 0, 0,100,100); with major difference. It made the whole image appear but way too small for the eye.
Code:
<html>
<head>
<video autoplay></video>
<img src="" width=1280, height=720>
<canvas style="display:none;"></canvas>
<script>
var errorCallback = function(e) {
console.log('Reeeejected!', e);
video.src = 'failure.mp4'; // fallback.
};
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var video = document.querySelector('video');
var canvas = document.querySelector('canvas');
var ctx = canvas.getContext('2d');
var localMediaStream = null;
var hdConstraints = {
video: {
mandatory: {
minWidth: 1280,
minHeight: 720
}
}
};
function snapshot() {
if (localMediaStream) {
ctx.drawImage(video, 0, 0);
// "image/webp" works in Chrome.
// Other browsers will fall back to image/png.
document.querySelector('img').src = canvas.toDataURL('image/webp');
}
}
function success(stream) {
video.src = window.URL.createObjectURL(stream);
localMediaStream = stream;
}
video.addEventListener('click', snapshot, false);
navigator.getUserMedia(hdConstraints, success,errorCallback);
</script>
</head>
</html>
Result:
Video:
Image Output:
So in conjunction with #Loktar's comment, I determined that the secret was to set the canvas size in the html like so:
<canvas style="display:none;" width=1280 height=720></canvas>
This allows for a full screen (aspect correct) capture.
Hope this helps someone else!

Categories