I am trying to take a snapshot from someone's webcam through javascript. The code works except the resultant image is stretched way too much to me readable.
I've messed around with the: ctx.drawImage(video, 0, 0);line
I've tried ctx.drawImage(video, 0, 0,1280,720); with no difference
I've tried ctx.drawImage(video, 0, 0,100,100); with major difference. It made the whole image appear but way too small for the eye.
Code:
<html>
<head>
<video autoplay></video>
<img src="" width=1280, height=720>
<canvas style="display:none;"></canvas>
<script>
var errorCallback = function(e) {
console.log('Reeeejected!', e);
video.src = 'failure.mp4'; // fallback.
};
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
var video = document.querySelector('video');
var canvas = document.querySelector('canvas');
var ctx = canvas.getContext('2d');
var localMediaStream = null;
var hdConstraints = {
video: {
mandatory: {
minWidth: 1280,
minHeight: 720
}
}
};
function snapshot() {
if (localMediaStream) {
ctx.drawImage(video, 0, 0);
// "image/webp" works in Chrome.
// Other browsers will fall back to image/png.
document.querySelector('img').src = canvas.toDataURL('image/webp');
}
}
function success(stream) {
video.src = window.URL.createObjectURL(stream);
localMediaStream = stream;
}
video.addEventListener('click', snapshot, false);
navigator.getUserMedia(hdConstraints, success,errorCallback);
</script>
</head>
</html>
Result:
Video:
Image Output:
So in conjunction with #Loktar's comment, I determined that the secret was to set the canvas size in the html like so:
<canvas style="display:none;" width=1280 height=720></canvas>
This allows for a full screen (aspect correct) capture.
Hope this helps someone else!
Related
I want to capture image every time socket.on('takePic') gets triggered. It works fine for the first time. but when the socket.on('takePic') gets triggered second time,the canvas div is just blank and so the img tag.
I have copied the takePicture() function from somewhere and added the setInterval() and localstream variable to stop camera.
how can I fix this?
here is my js code.
const socket = io('http://localhost:3001');
const params = new Proxy(new URLSearchParams(window.location.search), {
get: (searchParams, prop) => searchParams.get(prop),
});
let roomId = params.roomId;
socket.emit('joinRoom',roomId);
var takePicture = function () {
// The width and height of the captured photo. We will set the
// width to the value defined here, but the height will be
// calculated based on the aspect ratio of the input stream.
var width = 320; // We will scale the photo width to this
var height = 0; // This will be computed based on the input stream
// |streaming| indicates whether or not we're currently streaming
// video from the camera. Obviously, we start at false.
var streaming = false;
var localstream;
// The various HTML elements we need to configure or control. These
// will be set by the startup() function.
var video = null;
var canvas = null;
var photo = null;
var startbutton = null;
function showViewLiveResultButton() {
if (window.self !== window.top) {
// Ensure that if our document is in a frame, we get the user
// to first open it in its own tab or window. Otherwise, it
// won't be able to request permission for camera access.
document.querySelector(".contentarea").remove();
const button = document.createElement("button");
button.textContent = "View live result of the example code above";
document.body.append(button);
button.addEventListener('click', () => window.open(location.href));
return true;
}
return false;
}
function startup() {
if (showViewLiveResultButton()) { return; }
video = document.getElementById('video');
canvas = document.getElementById('canvas');
photo = document.getElementById('photo');
startbutton = document.getElementById('startbutton');
navigator.mediaDevices.getUserMedia({video: true, audio: false})
.then(function(stream) {
video.srcObject = stream;
localstream = stream;
video.play();
})
.catch(function(err) {
console.log("An error occurred: " + err);
});
video.addEventListener('canplay', function(ev){
if (!streaming) {
height = video.videoHeight / (video.videoWidth/width);
// Firefox currently has a bug where the height can't be read from
// the video, so we will make assumptions if this happens.
if (isNaN(height)) {
height = width / (4/3);
}
video.setAttribute('width', width);
video.setAttribute('height', height);
canvas.setAttribute('width', width);
canvas.setAttribute('height', height);
streaming = true;
}
}, false);
startbutton.addEventListener('click', function(ev){
takepicture();
ev.preventDefault();
clearInterval(picInterval);
$('#heading').css('display','none')
video.pause();
video.src = "";
localstream.getTracks()[0].stop();
}, false);
clearphoto();
}
// Fill the photo with an indication that none has been
// captured.
function clearphoto() {
var context = canvas.getContext('2d');
context.fillStyle = "#AAA";
context.fillRect(0, 0, canvas.width, canvas.height);
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data);
}
// Capture a photo by fetching the current contents of the video
// and drawing it into a canvas, then converting that to a PNG
// format data URL. By drawing it on an offscreen canvas and then
// drawing that to the screen, we can change its size and/or apply
// other changes before drawing it.
function takepicture() {
var context = canvas.getContext('2d');
if (width && height) {
canvas.width = width;
canvas.height = height;
context.drawImage(video, 0, 0, width, height);
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data);
} else {
clearphoto();
}
}
// Set up our event listener to run the startup process
// once loading is complete.
startup();
var i = 10;
let picInterval = setInterval(()=>{
i -= 1;
$('#heading').html(`taking picture in ${i}`);
if(i==0){
$('#startbutton').click()
}
}, 1000)
}
socket.on('takePic',()=>{
takePicture()
})
and this is the html code
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>StudentVideo</title>
<link rel="stylesheet" href="css/studentVideo.css">
</head>
<body>
<div class="contentarea">
<h1 id="heading">
</h1>
<p>
This example demonstrates how to set up a media stream using your built-in webcam, fetch an image from that stream, and create a PNG using that image.
</p>
<div class="camera">
<video id="video" width="320" height="240">Video stream not available.</video>
<button id="startbutton">Take photo</button>
</div>
<canvas id="canvas" width="320" height="240">
</canvas>
<div class="output">
<img id="photo" alt="The screen capture will appear in this box." src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASwAAACWCAYAAABkW7XSAAAAAXNSR0IArs4c6QAABGlJREFUeF7t1AEJADAMA8HVv5Oa3GAuHq4KwqVkdvceR4AAgYDAGKxASyISIPAFDJZHIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRAwWH6AAIGMgMHKVCUoAQIGyw8QIJARMFiZqgQlQMBg+QECBDICBitTlaAECBgsP0CAQEbAYGWqEpQAAYPlBwgQyAgYrExVghIgYLD8AAECGQGDlalKUAIEDJYfIEAgI2CwMlUJSoCAwfIDBAhkBAxWpipBCRB46/vA5AUJNVYAAAAASUVORK5CYII=">
</div>
<p>
Visit our article Taking still photos with WebRTC to learn more about the technologies used here.
</p>
</div>
</body>
<script src="https://cdn.socket.io/socket.io-3.0.1.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js"></script>
<script src="js/video.js"></script>
</html>
In the click handler of #startbutton your code calls takepicture and then it goes on to remove the video's src and stop the MediaStream.
So the next time this handler is called, there is no source affected to the video element and thus nothing to be drawn on the canvas anymore.
It's quire unclear why you clear the video in this click handler, so you might want to remove this part of the code, or to move it to a "stop" button instead, but anyway, you would probably be better calling takepicture from your interval directly rather than relying on the event handler.
I am trying to record and download video of canvas element using official MediaStream Recording API
<!DOCTYPE html>
<html>
<body>
<h1>Lets test mediaRecorder</h1>
<canvas id="myCanvas" width="200" height="100" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML canvas tag.
</canvas>
<script>
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
ctx.font = "30px Arial";
ctx.fillText("Hello World", 10, 50);
const stream = c.captureStream(25);
var recordedChunks = [];
console.log(stream);
var options = { mimeType: "video/webm; codecs=vp9" };
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start();
function handleDataAvailable(event) {
console.log("data-available");
if (event.data.size > 0) {
recordedChunks.push(event.data);
console.log(recordedChunks);
download();
} else {
// ...
}
}
function download() {
var blob = new Blob(recordedChunks, {
type: "video/webm"
});
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "test.webm";
a.click();
window.URL.revokeObjectURL(url);
}
// demo: to download after 10 sec
setTimeout(event => {
console.log("stopping");
mediaRecorder.stop();
}, 10000);
</script>
</body>
</html>
code is working and I am able to download test.webm but I guess that does not have any data as I am not seeing any content while playing this file in VLC Media Player
What I am missing to make it working?
You are facing a few bugs here.
First one is a bit excusable, Chrome doesn't generate seekable webm files. This is because of how media files are built and how the MediaRecorder API works. For them to be able to add this information they'd have to keep the chunk where the metadata is in order to add this information when the recording is done.
I'm not too sure what Firefox does differently here, but VLC prefers their file.
An other Chrome bug, a lot less excusable, is that they don't pass a new frame to the MediaRecorder until we draw on the source canvas again.
So since in your case you are not drawing anything after you started the MediaRecorder, you'll get nothing in the output...
To workaround that, simply drawing a frame right before we stop the recorder should have been enough, except that there is nothing letting us know exactly when the browser will push that frame to the recorder...
So the only working workaround here is to draw on the canvas continuously while we record it. The good thing is that it doesn't need to be painting anything new: we can trick the browser in thinking something new was painted by drawing a transparent rectangle.
A final note, while Chrome does support exporting the canvas with transparency, not all browsers can and even when supported most players have a default black background. So be sure to draw yourself a background in an other color when you record it.
All that said, here is a fixed demo:
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// draw a white background
ctx.fillStyle = "white";
ctx.fillRect(0, 0, c.width, c.height);
ctx.fillStyle = "black";
ctx.font = "30px Arial";
ctx.fillText("Hello World", 10, 50);
const stream = c.captureStream(25);
var recordedChunks = [];
var options = {};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start();
// Chrome requires we draw on the canvas while recording
mediaRecorder.onstart = animationLoop;
function animationLoop() {
// draw nothing, but still draw
ctx.globalAlpha = 0;
ctx.fillRect(0, 0, 1, 1);
// while we're recording
if (mediaRecorder.state !== "inactive") {
requestAnimationFrame(animationLoop);
}
}
// wait for the stop event to export the final video
// the dataavailable can fire before
mediaRecorder.onstop = (evt) => download();
function handleDataAvailable(event) {
recordedChunks.push(event.data);
}
function download() {
var blob = new Blob(recordedChunks, {
type: "video/webm"
});
var url = URL.createObjectURL(blob);
// exporting to a video element for that demo
// the downloaded video will still not work in some programs
// For this one would need to fix the markers using something like ffmpeg.
var video = document.getElementById('video');
video.src = url;
// hack to make the video seekable in the browser
// see https://stackoverflow.com/questions/38443084/
video.onloadedmetadata = (evt) => {
video.currentTime = 10e6;
video.addEventListener("seeked", () => video.currentTime = 0, {
once: true
})
}
}
setTimeout(() => {
console.clear();
mediaRecorder.stop();
}, 10000);
console.log("please wait while recording (10s)");
<h1>Lets test mediaRecorder</h1>
<canvas id="myCanvas" width="200" height="100" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML canvas tag.
</canvas>
<video controls id="video"></video>
I've looked at a sample for Streaming from canvas to video element so I can see that the principle works but i can't get it to play/display a static image in the video.
Here is my code so far with an image borrowed from stackoverflow. How can I change my code to display the canvas as a video?
const canvas = document.getElementById('viewport');
const context = canvas.getContext('2d');
const video = document.getElementById('videoPlayBack');
make_base();
function make_base() {
base_image = new Image();
base_image.onload = function () {
context.drawImage(base_image, 0, 0);
}
base_image.src = "https://cdn.sstatic.net/Img/ico-binoculars.svg?v=d4dbaac4eec9";
}
const stream = canvas.captureStream(25);
video.srcObject = stream;
<canvas id="viewport"></canvas>
<video id="videoPlayBack" playsinline autoplay muted></video>
You are drawing a cross-origin image, this will thus taint the canvas and mute the CanvasCaptureMediaStreamTrack, resulting in no data being passed.
Keep your canvas untainted if you wish to stream it:
const canvas = document.getElementById('viewport');
const context = canvas.getContext('2d');
const video = document.getElementById('videoPlayBack');
make_base();
function make_base() {
base_image = new Image();
base_image.onload = function () {
context.drawImage(base_image, 0, 0, 400, 300);
}
base_image.crossOrigin = "anonymous";
base_image.src = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png";
}
const stream = canvas.captureStream(25);
video.srcObject = stream;
<canvas id="viewport" width="400" height="300"></canvas>
<video id="videoPlayBack" controls playsinline autoplay muted ></video>
You don't get to save from a canvas if you draw "not your images" on it, because of the canvas security model (the moment you draw any image to it that isn't same origin, and doesn't explicitly have a CORS header that okays your use, you no longer have access to the pixels - not through ImageData, not through toDataURL, etc. etc).
To prevent the canvas from flagging itself as "tainted", see https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image for what your options might be.
basically people take a pic with their phone and I should crop the pic and add a watermark.
how can I add a watermark to my picture?
my code below
function takeSnapshot(){
// Here we're using a trick that involves a hidden canvas element.
var hidden_canvas = document.querySelector('canvas'),
context = hidden_canvas.getContext('2d');
var width = 480,
height = 480;
hidden_canvas.width = width;
hidden_canvas.height = height;
// Make a copy of the current frame in the video on the canvas.
context.drawImage(video, 100, 0, 480, 480, 0, 0, hidden_canvas.width, hidden_canvas.width);
// Turn the canvas image into a dataURL that can be used as a src for our photo.
return hidden_canvas.toDataURL('image/png');
}
html
<div class="container">
<div class="app">
Touch here to start the app.
<video id="camera-stream" width="640" height="480"></video>
<img id="snap">
<img src="http://localhost/selfie/face-pic.png" style="z-index:100;position:absolute; top:0;left:0;"/>
<p id="error-message"></p>
<div class="controls" style="z-index:200;">
<i class="material-icons">delete</i>
<i class="material-icons">camera_alt</i>
<i class="material-icons">file_download</i>
</div>
<!-- Hidden canvas element. Used for taking snapshot of video. -->
<canvas>
</canvas>
</div>
</div>
js
document.addEventListener('DOMContentLoaded', function () {
// References to all the element we will need.
var video = document.querySelector('#camera-stream'),
image = document.querySelector('#snap'),
start_camera = document.querySelector('#start-camera'),
controls = document.querySelector('.controls'),
take_photo_btn = document.querySelector('#take-photo'),
delete_photo_btn = document.querySelector('#delete-photo'),
download_photo_btn = document.querySelector('#download-photo'),
error_message = document.querySelector('#error-message');
// The getUserMedia interface is used for handling camera input.
// Some browsers need a prefix so here we're covering all the options
navigator.getMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
if(!navigator.getMedia){
displayErrorMessage("Your browser doesn't have support for the navigator.getUserMedia interface.");
}
else{
// Request the camera.
navigator.getMedia(
{
video: true
},
// Success Callback
function(stream){
// Create an object URL for the video stream and
// set it as src of our HTLM video element.
video.srcObject=stream;
// Play the video element to start the stream.
video.play();
video.onplay = function() {
showVideo();
};
},
// Error Callback
function(err){
displayErrorMessage("There was an error with accessing the camera stream: " + err.name, err);
}
);
}
// Mobile browsers cannot play video without user input,
// so here we're using a button to start it manually.
start_camera.addEventListener("click", function(e){
e.preventDefault();
// Start video playback manually.
video.play();
showVideo();
});
take_photo_btn.addEventListener("click", function(e){
e.preventDefault();
var snap = takeSnapshot();
// Show image.
image.setAttribute('src', snap);
image.classList.add("visible");
// Enable delete and save buttons
delete_photo_btn.classList.remove("disabled");
download_photo_btn.classList.remove("disabled");
// Set the href attribute of the download button to the snap url.
download_photo_btn.href = snap;
// Pause video playback of stream.
video.pause();
});
delete_photo_btn.addEventListener("click", function(e){
e.preventDefault();
// Hide image.
image.setAttribute('src', "");
image.classList.remove("visible");
// Disable delete and save buttons
delete_photo_btn.classList.add("disabled");
download_photo_btn.classList.add("disabled");
// Resume playback of stream.
video.play();
});
function showVideo(){
// Display the video stream and the controls.
hideUI();
video.classList.add("visible");
controls.classList.add("visible");
}
function takeSnapshot(){
// Here we're using a trick that involves a hidden canvas element.
var hidden_canvas = document.querySelector('canvas'),
context = hidden_canvas.getContext('2d');
var width = 480,
height = 480;
hidden_canvas.width = width;
hidden_canvas.height = height;
// Make a copy of the current frame in the video on the canvas.
context.drawImage(video, 100, 0, 480, 480, 0, 0, hidden_canvas.width, hidden_canvas.width);
// Turn the canvas image into a dataURL that can be used as a src for our photo.
return hidden_canvas.toDataURL('image/png');
}
function displayErrorMessage(error_msg, error){
error = error || "";
if(error){
console.error(error);
}
error_message.innerText = error_msg;
hideUI();
error_message.classList.add("visible");
}
function hideUI(){
// Helper function for clearing the app UI.
controls.classList.remove("visible");
start_camera.classList.remove("visible");
video.classList.remove("visible");
snap.classList.remove("visible");
error_message.classList.remove("visible");
}
});
How do you enable the front camera on a Webview? I have enable the features in AndroidManifest.xml
<uses-feature android:name="android.hardware.camera" android:required="true" />
<uses-feature android:name="android.hardware.camera.front" android:required="true" />
The camera is not going to be used for taking photos or recording, just to switch on the front camera.
When I go to the website using the phone browser the phone camera works once allow the prompt message. How can this work with a webview?
In the html file has a Canvas and Video tag that displays webcam It doesn't record or take pictures it just shows you the camera view.
Here is the html code
<canvas id="inCanvas" width="500" height="500" style="display:none"></canvas>
<video id="inputVideo" width="100" height="100" autoplay loop ></video>
It work with webcam but not with webview in android.
I didnt quite understand, but i thing there could one of the following two, what you want.
1) access camera and just show the video on the screen(not capturing
image):
html:
<canvas id='canvas' width='100' height='100'></canvas>
js:
var onFailSoHard = function(e)
{
console.log('failed',e);
}
window.URL = window.URL || window.webkitURL ;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
var video = document.querySelector('video');
if(navigator.getUserMedia)
{
navigator.getUserMedia({video: true},function(stream) {
video.src = window.URL.createObjectURL(stream);
},onFailSoHard);
}
var canvas = document.getElementById('canvas');
var ctx = canvas.getContext('2d');
setInterval(function(){
ctx.drawImage(video,0,0);
}, 100);
}
2) capture image from the camera:
here is the doc for that.
navigator.camera.getPicture(onSuccess, onFail, { quality: 50,
destinationType: Camera.DestinationType.DATA_URL
});
function onSuccess(imageData) {
var image = document.getElementById('myImage');
image.src = "data:image/jpeg;base64," + imageData;
}
function onFail(message) {
alert('Failed because: ' + message);
}
I would use something similar to the below as a script to access the phone camera.
<script>
var errorCallback = function(e) {
console.log('Rejected!', e);
};
// Not showing vendor prefixes.
navigator.getUserMedia({video: true, audio: true}, function(localMediaStream) {
var video = document.querySelector('video');
video.src = window.URL.createObjectURL(localMediaStream);
// Note: onloadedmetadata doesn't fire in Chrome when using it with getUserMedia.
// See crbug.com/110938.
video.onloadedmetadata = function(e) {
// Ready to go. Do some stuff.
};
}, errorCallback);
</script>
Used the following tutorial to help me.
Hope it sets yuo on the right track :)