GetUserMedia - facingmode - javascript

I am currently using an Android tablet and GetUserMedia to take pictures in my program.
Apparently, the default camera used by GetUserMedia is the front camera. How do I use the rear camera as a default?
Here's my code for GetUserMedia:
navigator.getUserMedia({
"audio": false,
"video": {
mandatory: {
minWidth: this.params.dest_width,
minHeight: this.params.dest_height,
//facingMode: "environment",
},
}
},
function(stream) {
// got access, attach stream to video
video.src = window.URL.createObjectURL( stream ) || stream;
Webcam.stream = stream;
Webcam.loaded = true;
Webcam.live = true;
Webcam.dispatch('load');
Webcam.dispatch('live');
Webcam.flip();
},
function(err) {
return self.dispatch('error', "Could not access webcam.");
});
I inserted facingMode in the "mandatory" part but didn't work.
Please help.

Update: facingMode is now available in Chrome for Android through the adapter.js polyfill!
facingMode is not yet implemented in Chrome for Android, but works natively in Firefox for Android.
You must use standard constraints however: (use https fiddle for Chrome):
var gum = mode =>
navigator.mediaDevices.getUserMedia({video: {facingMode: {exact: mode}}})
.then(stream => (video.srcObject = stream))
.catch(e => log(e));
var stop = () => video.srcObject && video.srcObject.getTracks().forEach(t => t.stop());
var log = msg => div.innerHTML += msg + "<br>";
<button onclick="stop();gum('user')">Front</button>
<button onclick="stop();gum('environment')">Back</button>
<div id="div"></div><br>
<video id="video" height="320" autoplay></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
The { exact: } syntax means the constraint is required, and things fail if the user doesn't have the right camera. If you leave it out then the constraint is optional, which in Firefox for Android means it only changes the default in the camera chooser in the permission prompt.

Using Peter's code (https://stackoverflow.com/a/41618462/7723861) I came up with this solution to get the rear camera:
function handleSuccess(stream) {
window.stream = stream; // make stream available to browser console
video.srcObject = stream;
}
function handleError(error) {
console.log('navigator.getUserMedia error: ', error);
}
var DEVICES = [];
var final = null;
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
var arrayLength = devices.length;
for (var i = 0; i < arrayLength; i++)
{
var tempDevice = devices[i];
//FOR EACH DEVICE, PUSH TO DEVICES LIST THOSE OF KIND VIDEOINPUT (cameras)
//AND IF THE CAMERA HAS THE RIGHT FACEMODE ASSING IT TO "final"
if (tempDevice.kind == "videoinput")
{
DEVICES.push(tempDevice);
if(tempDevice.facingMode == "environment" ||tempDevice.label.indexOf("facing back")>=0 )
{final = tempDevice;}
}
}
var totalCameras = DEVICES.length;
//If couldnt find a suitable camera, pick the last one... you can change to what works for you
if(final == null)
{
//console.log("no suitable camera, getting the last one");
final = DEVICES[totalCameras-1];
};
//Set the constraints and call getUserMedia
var constraints = {
audio: false,
video: {
deviceId: {exact: final.deviceId}
}
};
navigator.mediaDevices.getUserMedia(constraints).
then(handleSuccess).catch(handleError);
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});

Deploying our web app to android through Cordova, I tried multiple solutions to access the rear camera. The solution that worked for me was:
constraints = {
audio: false,
video: {
width: 400,
height: 300,
deviceId: deviceId ? {exact: deviceId} : undefined
}
};
Retrieving the deviceId through:
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
// devices is an array of accessible audio and video inputs. deviceId is the property I used to switch cameras
})
.catch(function(err) {
console.log(err.name + ": " + error.message);
});
I chose not to use a Cordova plugin so that if we choose to move away from Cordova, there would not be such a hefty migration.

A pretty dandy snippet you can use is:
var front = false;
document.getElementById('flip-button').onclick = function() { front =` !front; };
var constraints = { video: { facingMode: (front? "user" : "environment") } };
This should work for you hopefully.

In the newer version of Chrome (after v52) the adaper.js solutions seem not working. So I solve the problem by enumerating devices first. Here is my solution. I am not sure if there is a better way to flip the camera and show the video on the screen. But I have to stop the track first and get a new stream.
let Video = function() {
let cameras = [];
let currCameraIndex = 0;
let constraints = {
audio: true,
video: {
deviceId: { exact: "" }
}
};
let videoCanvas = $('video#gum');
this.initialize = function() {
return enumerateDevices()
.then(startVideo);
};
this.flipCamera = function() {
currCameraIndex += 1;
if (currCameraIndex >= cameras.length) {
currCameraIndex = 0;
}
if (window.stream) {
window.stream.getVideoTracks()[0].stop();
}
return startVideo();
};
function enumerateDevices() {
return navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
devices.forEach(function(device) {
console.log(device);
if (device.kind === "videoinput") {
cameras.push(device.deviceId);
}
});
console.log(cameras);
});
}
function startVideo() {
constraints.video.deviceId.exact = cameras[currCameraIndex];
return navigator.mediaDevices.getUserMedia(constraints)
.then(handleSuccess).catch(handleError);
}
function handleSuccess(stream) {
videoCanvas[0].srcObject = stream;
window.stream = stream;
}
function handleError(error) {
alert(error);
}
};

This would work for rear camera :
navigator.mediaDevices.getUserMedia({
video: {facingMode: "environment",
height:{<heightValueHere>},
width : {<widthValueHere>}
}
})
.then(function(stream){
window.stream = stream;
video.srcObject = stream;
})

I use the following for webcam.js, call LoadWebcam ()
async function LoadWebcam () {
var i
var frontdev
var backdev
const tempStream = await navigator.mediaDevices.getUserMedia({video:true})
const devices = await navigator.mediaDevices.enumerateDevices()
//Check all the video input and find the backdev = back Cam
devices.forEach(function(device) {
if (device.kind === 'videoinput') {
if( device.label && device.label.length > 0 ) {
if( device.label.toLowerCase().indexOf( 'back' ) >= 0 )
backdev = device.deviceId
else if( device.label.toLowerCase().indexOf( 'front' ) >= 0 )
frontdev = device.deviceId
}
}
})
//Stop Stream
const tracks = tempStream.getTracks()
if( tracks )
for( let t = 0; t < tracks.length; t++ ) tracks[t].stop()
//Load the webcam,
Webcam.set({
width: 320,
height: 240,
image_format: 'png',
jpeg_quality: 90,
flip_horiz: true,
constraints: {
video: true,
deviceId: {exact: backdev }
}
});
Webcam.attach( '#my_camera' )}

Related

How to restart track in webrtc video calling

I implemented video calling using webrtc with use of https://github.com/webtutsplus/videoChat-WebFrontend library. But when I implement switch camera functionality on click of sender at that time sender video stop at receiver end. I followed Unable to change camera / stream for WebRTC call link for switch camera.
So anyone please give me a solution regarding this issue.
Thanks.
I tried Unable to change camera / stream for WebRTC call for sitch camera but receiver not getting the sender video when switch the camera.
I followed Unable to change camera / stream for WebRTC call link to switch camera in video calling between 2 users.
This is the code to switch the camera.
``
$(".btn_rear_camera").click(function() {
if (cameratype == "user") {
capture('environment');
} else {
capture('user');
}
});
function capture(facingMode) {
cameratype = facingMode;
localStream.getTracks().forEach(function(track) {
track.stop();
});
var constraints = {
video: {
deviceId: devicesIds[1]
},
audio: true
};
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
replaceTracks(stream);
}).catch(function(error) {
});
}
function replaceTracks(newStream) {
var elementId = "localVideo";
detachMediaStream(elementId);
newStream.getTracks().forEach(function(track) {
localStream.addTrack(track);
});
attachMediaStream(elementId, newStream);
// optionally, if you have active peer connections:
_replaceTracksForPeer(peerConnection);
function _replaceTracksForPeer(peer) {
peer.getSenders().map(function(sender) {
sender.replaceTrack(newStream.getTracks().find(function(track) {
return track.kind === sender.track.kind;
}));
});
}
}
function detachMediaStream(id) {
var elem = document.getElementById(id);
if (elem) {
elem.pause();
if (typeof elem.srcObject === 'object') {
elem.srcObject = null;
} else {
elem.src = '';
}
}
};
function attachMediaStream(id, stream) {
var elem = document.getElementById(id);
if (elem) {
if (typeof elem.srcObject === 'object') {
elem.srcObject = stream;
} else {
elem.src = window.URL.createObjectURL(stream);
}
elem.onloadedmetadata = function(e) {
elem.play();
};
} else {
throw new Error('Unable to attach media stream');
}
};
``

How to console.log each frame in the video as a byte array?

the following is the code that I use to stream from my webcam onto a video element.
I want each frame to be logged as an byte array in the console. Basically want to get each frame as a byte array. I'm a newbie so please provide a detailed explanation as to how I can approach this.
var video = document.querySelector("#video");
// Basic settings for the video to get from Webcam
const constraints = {
audio: false,
video: {
width: 475,
height: 475,
},
};
// This condition will ask permission to user for Webcam access
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia(constraints)
.then(function (stream) {
video.srcObject = stream;
})
.catch(function (err0r) {
console.log("Something went wrong!");
});
}
//console.log(context.drawImage(video, 0, 0));
let a = document.addEventListener("keydown", function (e) {
if (e.key === "Escape") {
stop(e);
}
});
function stop(e) {
console.log("video stopped!");
var stream = video.srcObject;
var tracks = stream.getTracks();
for (var i = 0; i < tracks.length; i++) {
var track = tracks[i];
track.stop();
}
video.srcObject = null;
}
Thanks.

HTML userMedia facingMode: "environment"doesn't work on android phone

It keeps using the front facing camera instead of the back camera
This is my code: I added the facingMode: {exact:"environment"}, but it doesn't work
const constraints = {
video: true,
facingMode: { exact: 'environment' }
};
if ('mediaDevices' in navigator && 'getUserMedia' in navigator.mediaDevices) {
console.log("Let's get this party started")
}
navigator.mediaDevices.getUserMedia(constraints).
then((stream) => {video.srcObject = stream});
function displayImage()
{
const selectedFile = document.getElementById('fileinput')
//var image =document.getElementById('output')
//image.src = URL.createObjectURL(selectedFile.files[0]);
//selectedFile.files[0]
const img = new Image()
img.src = URL.createObjectURL(selectedFile.files[0])
canvas.width = video.videoWidth
canvas.height = video.videoHeight
video.style.display="none"
canvas.style.display ="inline"
console.log(img)
console.log("image uploaded")
img.onload = function() {
canvas.getContext('2d').drawImage(img, 0, 0,video.videoWidth,video.videoHeight);
console.log('the image is drawn');
}
}
Your constraints are not set correctly.
facingMode is a member of the video constraint, so it should be
const constraints = {
video: {
facingMode: {
exact: "environment"
}
}
};
Live Fiddle to be ran from a device with a back camera.
The facingMode constraint is incompletely implemented, especially in mobile devices.
I have found that the label member of the device object contains the string back for an environment-facing camera and front for a user-facing camera in a wide range of mobile devices, android and iOS. (Sometimes those strings are partially in upper case.) So you could do something like this. It's a bit of a hairball compared to facingMode, but it works.
/* get user's permission to muck around with video devices */
const tempStream = await navigator.mediaDevices.getUserMedia({video:true})
const devices = navigator.mediaDevices.enumerateDevices()
let frontDeviceId
let backDeviceId
if (devices.length > 0) {
/* defaults so all this will work on a desktop */
frontDeviceId = devices[0].deviceId;
backDeviceId = devices[0].deviceId;
}
/* look for front and back devices */
devices.forEach (device => {
if( device.kind === 'videoinput' ) {
if( device.label && device.label.length > 0 ) {
if( device.label.toLowerCase().indexOf( 'back' ) >= 0 )
backDeviceId = device.deviceId
else if( device.label.toLowerCase().indexOf( 'front' ) >= 0 )
frontDeviceId = device.deviceId
}
}
}
/* close the temp stream */
const tracks = tempStream.getTracks()
if( tracks )
for( let t = 0; t < tracks.length; t++ ) tracks[t].stop()
/* open the device you want */
const constraints = {
video: true,
deviceId: {exact: backDeviceId }
}
const stream = navigator.mediaDevices.getUserMedia(constraints)
The best way for me is to use "ideal" so it will work both on a pc both on a phone:
const constraints = {
video: {
facingMode: {
ideal: "environment"
}
}
};
btn.onclick = e => {
navigator.mediaDevices.getUserMedia(constraints)
.then((stream) => {video.srcObject = stream})
.catch( console.error );
};
https://jsfiddle.net/Zibri/pk7en85u/

Choose WebCams Via getusermedia

I am beginner with getusermedia, just got some codes from Google, and I'm able to work on those. But i have to show options on my webapp, from where a user can select WebCam from Primary (Laptop) or Secondary (Connected via USB).
Tried this, working for primary (Laptop WebCam), but when i am adding USB WebCam, it is auto select USB WebCam.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
imagegrid = document.getElementById("imagegrid"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
var video = document.querySelector("#video");
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;
if (navigator.getUserMedia) {      
navigator.getUserMedia({video: true}, handleVideo, videoError);
}
function handleVideo(stream) {
video.src = window.URL.createObjectURL(stream);
}
function videoError(e) {
// do something
}
// Trigger photo take
document.getElementById("video").addEventListener("click", function() {
draw(video, canvas, imagegrid);
});
Is it possible, i can show options for both webcams.
Thanks
The function navigator.getUserMedia() will only give you the default camera (with the exception of Firefox which gives you an option of which camera to share with the web application)
To avoid this problem you should use navigator.mediaDevices.enumerateDevices() and then navigator.mediaDevices.getUserMedia(constraints).
Example:
navigator.mediaDevices.enumerateDevices()
.then(gotDevices)
.catch(errorCallback);
...
function gotDevices(deviceInfos) {
...
for (var i = 0; i !== deviceInfos.length; ++i) {
var deviceInfo = deviceInfos[i];
var option = document.createElement('option');
option.value = deviceInfo.deviceId;
if (deviceInfo.kind === 'audioinput') {
option.text = deviceInfo.label ||
'Microphone ' + (audioInputSelect.length + 1);
audioInputSelect.appendChild(option);
} else if (deviceInfo.kind === 'audiooutput') {
option.text = deviceInfo.label || 'Speaker ' +
(audioOutputSelect.length + 1);
audioOutputSelect.appendChild(option);
} else if (deviceInfo.kind === 'videoinput') {
option.text = deviceInfo.label || 'Camera ' +
(videoSelect.length + 1);
videoSelect.appendChild(option);
}
...
}
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
var videoTracks = stream.getVideoTracks();
console.log('Got stream with constraints:', constraints);
console.log('Using video device: ' + videoTracks[0].label);
stream.onended = function() {
console.log('Stream ended');
};
window.stream = stream; // make variable available to console
video.srcObject = stream;
})
.catch(function(error) {
// ...
}
The above functions use promises and require a more complex approach than yours. So you need to do some reading in order to adaptate this method. Have a look at the link below for some examples:
https://developers.google.com/web/updates/2015/10/media-devices

Specify webcam with sourceId in getUserMedia

I have two webcams and am trying to specify which one to show in my video tag. My HTML is simply <video autoplay></video>. Here is my javascript:
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
video: {
optional: [{
sourceId: "64-character-alphanumeric-source-id-here"
}]
}
};
var video = document.querySelector("video");
function successCallback(stream) {
window.stream = stream; // stream available to console
if (window.URL) {
video.src = window.URL.createObjectURL(stream);
} else {
video.src = stream;
}
video.play();
}
function errorCallback(error) {
console.log("navigator.getUserMedia error: ", error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
However, even when I change the sourceId to my second webcam, I can't get it to display that webcam. js.fiddle code
this code is working for me in mobile chrome: It tries to detect the back facing video stream.
MediaStreamTrack.getSources(function(sourceInfos) {
var videoSourceId;
for (var i = 0; i != sourceInfos.length; ++i) {
var sourceInfo = sourceInfos[i];
if(sourceInfo.kind == "video" && sourceInfo.facing == "environment") {
videoSourceId = sourceInfo.id;
}
}
var constraints = {
audio: false,
video: {
optional: [{sourceId: videoSourceId}]
}
};
....
});
note that there is no fallback in this code

Categories