guys i have two cameras that is
-the web camera
-the laptop camera
i want to stream those camera in a website
i already have some reference
here is some code that is working on jsfiddle
here
<video id="video" width="640" height="480" autoplay></video>
<button id="snap" class="sexyButton">Snap Photo</button>
<canvas id="canvas" width="640" height="480"></canvas>
<script>
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function() {
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
} else if(navigator.mozGetUserMedia) { // WebKit-prefixed
navigator.mozGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
// Trigger photo take
document.getElementById("snap").addEventListener("click", function() {
context.drawImage(video, 0, 0, 640, 480);
});
}, false);
</script>
that example can only connects and select 1 camera
i want to select and view two of my camera, any suggestion or solution guys?
you can also give me the JS fiddle
You can create two different streams, one for each camera, and show them simultaneously in two <video> tags.
The list of available devices is available using navigator.mediaDevices.enumerateDevices(). After filtering the resulting list for only videoinputs, you have access to the deviceIds without needing permission from the user.
With getUserMedia you can then request a stream from the camera with id camera1Id using
navigator.mediaDevices.getUserMedia({
video: {
deviceId: { exact: camera1Id }
}
});
The resulting stream can be fed into a <video> (referenced here by vid) by calling vid.srcObject = stream.
I have done this for two streams from two webcams simultaneously.
You cannot access two cameras simultaneously. The API would indicate otherwise, but something underlying seems to prevent it from working as expected. You can verify this by opening https://simpl.info/getusermedia/sources/ or http://googlechrome.github.io/webrtc/samples/web/content/getusermedia-source/ in two completely seperate windows, despite being able to select two streams only one is active at once - if you pick the same one in both windows, then it shows in both places.
The only workaround I was able to do was to flip-flop between the two streams, then draw the video to a canvas. Doing this I was able to do captures at around 1 fps, unfortunately the camera resets between frames, on one of my cameras I had to put in a delay to allow the auto white balance to kick in to get a decent image.
function webcam() {
if (!navigator.getUserMedia) {
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
}
if (!navigator.getUserMedia) {
return alert('getUserMedia not supported in this browser.');
}
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var audioSource;
var cw = Math.floor(canvas.clientWidth / 2);
var ch = Math.floor(canvas.clientHeight/2);
//canvas.width = cw;
//canvas.height = ch;
//off dom video player
var video = document.createElement("video");
video.autoplay="autoplay";
video.addEventListener('playing', function(){
//delay for settling...
setTimeout(draw,1000,this,context,(currentSource*canvas.clientWidth/2),cw,ch);
},false);
function captureVideo() {
console.log("Capturing " + currentSource,videosources[currentSource]);
var mediaOptions = {
audio: {
optional: [{sourceId: audioSource}]
},
video: {
optional: [
{sourceId: videosources[currentSource].id}
]
}};
navigator.getUserMedia(mediaOptions, success, errorCallback);
}
var currentSource=0;
var videosources = [];
var lastStream;
function errorCallback(error){
console.log("navigator.getUserMedia error: ", error);
}
function success(stream) {
console.log("the stream" + currentSource,stream);
video.src = window.URL.createObjectURL(stream);
video.play();
lastStream=stream;
}
function next(){
if(lastStream){
lastStream.stop();
}
video.src = "";
if(currentSource < videosources.length-1){
currentSource+=1;
}
else
{
currentSource=0;
}
captureVideo();
}
function draw(v,c,l,w,h) {
if(v.paused || v.ended) return false;
console.log("drawing",l)
c.drawImage(v,l,0,w,h);
setTimeout(next,500);
}
MediaStreamTrack.getSources(function (sourceInfos) {
for (var i = 0; i != sourceInfos.length; ++i) {
var sourceInfo = sourceInfos[i];
if (sourceInfo.kind === 'audio') {
console.log(sourceInfo.id, sourceInfo.label || 'microphone');
audioSource=sourceInfo.id;
} else if (sourceInfo.kind === 'video') {
console.log(sourceInfo.id, sourceInfo.facing, sourceInfo.label || 'camera');
videosources.push(sourceInfo);
} else {
console.log('Some other kind of source: ', sourceInfo);
}
}
console.log("sources",videosources)
next();
});
}
Related
Well, I'm having a very weird issue that's only happening on Samsung Browser. On Chrome and other browsers, this works well.
When I take a snapshot of a current frame of a video (Currently the mobile camera) on javascript I get the image with distortion and generally bad.
The code that takes the snapshot is:
function takeSnapshot() {
// Here we're using a trick that involves a hidden canvas element.
video.pause();
var hidden_canvas = document.createElement('canvas'),
context = hidden_canvas.getContext('2d');
var width = video.videoWidth,
height = video.videoHeight;
if (width && height) {
// Setup a canvas with the same dimensions as the video.
hidden_canvas.width = width;
hidden_canvas.height = height;
// Make a copy of the current frame in the video on the canvas.
context.drawImage(video, 0, 0, width, height);
// Turn the canvas image into a dataURL that can be used as a src for our photo.
return hidden_canvas.toDataURL('image/jpeg');
}
}
Do I'm missing something else to make it work in Samsung Browser? Or I just put a message that this is not compatible with this browser?
Currently tested on Samsung Galaxy S9, Android 10.
------------- Update
I found what is causing the image to be captured badly.
I'm using custom size for the image, in this case, is a horizontal rectangle.
I do this when init the video:
var w = 2000; // This renders the video as a horizontal rectangle, this does the issue.
var h = 1200;
var userAgent = (typeof navigator !== 'undefined' && navigator.userAgent) || '';
var isSamsungBrowser = userAgent.indexOf('SamsungBrowser') >= 0;
// Quick fix:
if(SamsungBrowser){ // If I render as vertical renctangle, the issue is gone.
w = 1200;
h = 2000;
}
navigator.getMedia(
{
video:
{
deviceId: videoSource ? { exact: videoSource } : undefined,
width: { ideal: h },
height: { ideal: w }
}
},
// Success Callback
function (stream) {
// Create an object URL for the video stream and
// set it as src of our HTLM video element.
try {
currentStream = stream;
video.srcObject = stream;
} catch (error) {
video.src = window.URL.createObjectURL(stream);
}
window.stream = stream;
// Play the video element to start the stream.
video.play();
video.onplay = function () {
showVideo();
};
}
I have a camera PWA, it's working fine with taking photos and uploading them, but I want to use the rear-facing camera instead of the front facing cam. How do I proceed on doing this?
This is the current lines of coenter code heredes I'm using for initializing the camera and taking the photo. This is on .js
// This will initialize the camera
function initializeMedia() {
if (!('mediaDevices' in navigator)) {
navigator.mediaDevices = {};
}
if (!('getUserMedia' in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function(constraints) {
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented!'));
}
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia({
video: true
})
.then(function(stream) {
videoPlayer.srcObject = stream;
videoPlayer.style.display = 'block';
})
.catch(function(err) {
imagePicker.style.display = 'block';
});
}
// capture image
captureButton.addEventListener('click', function(event) {
canvasElement.style.display = 'block';
videoPlayer.style.display = 'none';
captureButton.style.display = 'none';
var context = canvasElement.getContext('2d');
context.drawImage(videoPlayer, 0, 0, canvas.width, videoPlayer.videoHeight / (videoPlayer.videoWidth / canvas.width));
videoPlayer.srcObject.getVideoTracks().forEach(function(track) {
track.stop();
});
picture = dataURItoBlob(canvasElement.toDataURL());
});
You can set video.facingMode to either 'user' for the front camera, or 'environment' for the back camera in the constraints object that you pass to navigator.mediaDevices.getUserMedia().
Example from the MDN:
var constraints = { video: { facingMode: "environment" } };
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
My script:
var srgt_socket = false;
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var video = document.getElementById('video');
window.srgt_socket = new WebSocket("ws://185.25.150.192:1987");
window.srgt_socket.addEventListener('open', function (event) {
window.srgt_socket.send('Hello Server!');
start_send();
});
window.srgt_socket.addEventListener('close', function (event) {
console.log('closing');
});
function start_send() {
var mediaConfig = { video: true };
var errBack = function(e) {
console.log('An error has occurred!', e)
};
// Put video listeners into place
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(mediaConfig).then(function(stream) {
video.src = window.URL.createObjectURL(stream);
video.play();
});
}
/* Legacy code below! */
else if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(mediaConfig, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(mediaConfig, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
} else if(navigator.mozGetUserMedia) { // Mozilla-prefixed
navigator.mozGetUserMedia(mediaConfig, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
setInterval(function(){
context.drawImage(video, 0, 0, 320, 100);
let canvas = document.getElementById('canvas');
let img = document.getElementById('img');
img.src = canvas.toDataURL("image/png");
let data = {
src : canvas.toDataURL("image/png"),
}
window.srgt_socket.send(JSON.stringify(data));
}, 33);
}
I'm using websocket. On server side I use node.js. I don't know why connection is closed after a while (it works for few miliseconds). Data is send then connection is closing. I'm runing this script from localhost.
making a page to take snapshots with the webcam. but suddenly it stopped working on every computer. Did i make a typo i cant see myself, or does anyone has an idea too fix this??
added the draw in bit aswell
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function() {
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
videoObj = { "video": true },
image_format= "jpeg",
jpeg_quality= 85,
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
$("#snap").show();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
$("#snap").show();
}, errBack);
} else if(navigator.mozGetUserMedia) { // moz-prefixed
navigator.mozGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
$("#snap").show();
}, errBack);
}
// Get-Save Snapshot - image
document.getElementById("snap").addEventListener("click", function() {
context.drawImage(video, 0, 0, 640, 480);
// the fade only works on firefox?
$("#video").fadeOut("slow");
$("#canvas").fadeIn("slow");
$("#snap").hide();
$("#reset").show();
$("#upload").show();
You never draw your video to the canvas in this part of the code.
Also, navigator.getUserMedia is not the "standard" anymore, it has been updated to navigator.mediaDevices.getUserMedia which will return a Promise.
var ctx = c.getContext('2d');
var vid = document.createElement('video');
vid.oncanplay = function() {
c.width = this.videoWidth;
c.height = this.videoHeight;
draw();
}
navigator.mediaDevices.getUserMedia({
video: true
}).then((stream) => {
vid.srcObject = stream;
vid.play();
});
function draw() {
ctx.drawImage(vid, 0, 0);
requestAnimationFrame(draw);
}
<canvas id="c"></canvas>
And a fiddle for chrome since it doesn't allow gUM in SO-snippets.
Ps : if you need to support older implementations, check the official WebRTC polyfill, adapter.js
I want to start camera when clicking on the button and show the preview through javascript.
function emitStream() {
// Asking permission to get the user media.
// If permission granted, assign the stream to the HTML 5 video element.
navigator.webkitGetUserMedia({
video: true,
audio: true
}, function(stream) {
that._video = document.querySelector('video');
that._video.src = window.URL.createObjectURL(stream);
});
function takePicture() {
// Assigning the video stream to the canvas to create a picture.
that._canvas = document.querySelector('canvas');
var context = that._canvas.getContext('2d');
context.clearRect(0, 0, 0, 0);
context.drawImage(that._video, 0, 0, 400, 300);
}
}
This code is published or written by David Walsh - Camera and Video Control with HTML5 .. Try the following code----
window.addEventListener("DOMContentLoaded", function() {
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
}
else if(navigator.mozGetUserMedia) { // Firefox-prefixed
navigator.mozGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
}, false);
The trigger to is as follows:---
document.getElementById("snap").addEventListener("click", function() {
context.drawImage(video, 0, 0, 640, 480);
});