It keeps using the front facing camera instead of the back camera
This is my code: I added the facingMode: {exact:"environment"}, but it doesn't work
const constraints = {
video: true,
facingMode: { exact: 'environment' }
};
if ('mediaDevices' in navigator && 'getUserMedia' in navigator.mediaDevices) {
console.log("Let's get this party started")
}
navigator.mediaDevices.getUserMedia(constraints).
then((stream) => {video.srcObject = stream});
function displayImage()
{
const selectedFile = document.getElementById('fileinput')
//var image =document.getElementById('output')
//image.src = URL.createObjectURL(selectedFile.files[0]);
//selectedFile.files[0]
const img = new Image()
img.src = URL.createObjectURL(selectedFile.files[0])
canvas.width = video.videoWidth
canvas.height = video.videoHeight
video.style.display="none"
canvas.style.display ="inline"
console.log(img)
console.log("image uploaded")
img.onload = function() {
canvas.getContext('2d').drawImage(img, 0, 0,video.videoWidth,video.videoHeight);
console.log('the image is drawn');
}
}
Your constraints are not set correctly.
facingMode is a member of the video constraint, so it should be
const constraints = {
video: {
facingMode: {
exact: "environment"
}
}
};
Live Fiddle to be ran from a device with a back camera.
The facingMode constraint is incompletely implemented, especially in mobile devices.
I have found that the label member of the device object contains the string back for an environment-facing camera and front for a user-facing camera in a wide range of mobile devices, android and iOS. (Sometimes those strings are partially in upper case.) So you could do something like this. It's a bit of a hairball compared to facingMode, but it works.
/* get user's permission to muck around with video devices */
const tempStream = await navigator.mediaDevices.getUserMedia({video:true})
const devices = navigator.mediaDevices.enumerateDevices()
let frontDeviceId
let backDeviceId
if (devices.length > 0) {
/* defaults so all this will work on a desktop */
frontDeviceId = devices[0].deviceId;
backDeviceId = devices[0].deviceId;
}
/* look for front and back devices */
devices.forEach (device => {
if( device.kind === 'videoinput' ) {
if( device.label && device.label.length > 0 ) {
if( device.label.toLowerCase().indexOf( 'back' ) >= 0 )
backDeviceId = device.deviceId
else if( device.label.toLowerCase().indexOf( 'front' ) >= 0 )
frontDeviceId = device.deviceId
}
}
}
/* close the temp stream */
const tracks = tempStream.getTracks()
if( tracks )
for( let t = 0; t < tracks.length; t++ ) tracks[t].stop()
/* open the device you want */
const constraints = {
video: true,
deviceId: {exact: backDeviceId }
}
const stream = navigator.mediaDevices.getUserMedia(constraints)
The best way for me is to use "ideal" so it will work both on a pc both on a phone:
const constraints = {
video: {
facingMode: {
ideal: "environment"
}
}
};
btn.onclick = e => {
navigator.mediaDevices.getUserMedia(constraints)
.then((stream) => {video.srcObject = stream})
.catch( console.error );
};
https://jsfiddle.net/Zibri/pk7en85u/
Related
I have a project requirement to record and store the currently running video stream. I have used webRTC for video streaming, and to record the video streaming, I have used MediaRecorder API. It is completely working fine in the desktop system. But it is not working in the mobile browser.
Any idea why it is not working in the mobile browser?
Following is the code snippet:
componentDidMount = async () => {
recordingStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
const mergedAudio = await this.mergeAudioStreams();
console.log("audio track length... ", mergedAudio.length);
const tracks = [
...recordingStream.getVideoTracks(),
...mergedAudio,
];
captureStream = new MediaStream(tracks);
mediaRecorder = new MediaRecorder(captureStream, options);
mediaRecorder.ondataavailable = this.handleDataAvailable;
mediaRecorder.start();
}
handleDataAvailable = async event => {
console.log("data-available", event);
if (event.data.size > 0) {
recordedChunks.push(event.data);
this.download();
} else {
// ...
console.log("in else");
}
};
download = async () => {
console.log("in download fn");
var blob = await new Blob(recordedChunks, {
type: "video/mp4",
});
//called the API to store the recorded video
}
mergeAudioStreams = async () => {
console.log("recordScreen fn called");
const ctx = new AudioContext();
const dest = ctx.createMediaStreamDestination();
let localSource, remoteSource;
if (this.state.localStream.getAudioTracks().length > 0) {
localSource = ctx.createMediaStreamSource(this.state.localStream);
}
if (this.state.selectedVideo.stream.getAudioTracks().length > 0) {
remoteSource = ctx.createMediaStreamSource(
this.state.selectedVideo.stream
);
}
const localGain = ctx.createGain();
const remoteGain = ctx.createGain();
localGain.gain.value = 0.7;
remoteGain.gain.value = 0.7;
localSource.connect(localGain).connect(dest);
remoteSource.connect(remoteGain).connect(dest);
console.log("combine tracks..", dest.stream.getAudioTracks());
return dest.stream.getAudioTracks();
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
const socket = io("/");
const chatInputBox = document.getElementById("chat_message");
const all_messages = document.getElementById("all_messages");
const main__chat__window = document.getElementById("main__chat__window");
const videotable = document.getElementById("video-table");
const myVideo = document.createElement("video");
myVideo.muted = true;//for not recieving own voice
var peer = new Peer(undefined, {
path: "/peerjs",
host: "/",
port: "3000",
});
let myVideoStream;
const peers = {};
var getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
//setting initial controls of video and passing them as constraints
const constraints = {
'video': true,
'audio': true
}
navigator.mediaDevices
.getUserMedia(constraints)
.then((stream) => {
myVideoStream = stream;
addVideoStream(myVideo,myVideoStream);//call to function addVideoStream
//answering to calls
peer.on("call", (call) => {
call.answer(myVideoStream);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
addVideoStream(video, userVideoStream);// Show stream in some video/canvas element.
});
});
socket.on("user_connected", (userId) => {//recieving info
connectToNewUser(userId, stream);//call function with id and stream
});
//adding event for messages of chat
document.addEventListener("keydown", (e) => {
if (e.which === 13 && chatInputBox.value != "") {
socket.emit("message", chatInputBox.value);
chatInputBox.value = "";
}
});
//adding text to chat window
socket.on("createMessage", (msg) => {
//console.log(msg);
let li = document.createElement("li");
li.innerHTML = msg;
all_messages.append(li);
main__chat__window.scrollTop = main__chat__window.scrollHeight;//scrolled to latest message
});
});
//For disconnecting user
socket.on('user_disconnected', userId => {
if (peers[userId]) peers[userId].close()
});
peer.on("call", function (call) {
getUserMedia(constraints,
function (stream) {
call.answer(stream); // Answer the call with an A/V stream.
const video = document.createElement("video");
call.on("stream", function (remoteStream) {
addVideoStream(video, remoteStream); // Show stream in some video/canvas element.
});
},
function (err) {
console.log("Failed to get local stream", err);
}
);
});
peer.on("open", (id) => {//send with an id for user
// on open will be launch when you successfully connect to PeerServ
socket.emit("join_room", ROOM_ID, id);//emiting event
});
// Fetch an array of devices of a certain type
async function getConnectedDevices(type) {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter(device => device.kind === type)
}
// Open camera with echoCancellation for better audio
async function openCamera(cameraId) {
const constraints = {
'audio': {'echoCancellation': true}
}
return await navigator.mediaDevices.getUserMedia(constraints);
}
const cameras = getConnectedDevices('videoinput');
if (cameras && cameras.length > 0) {
const stream = openCamera(cameras[0].deviceId);
}
function connectToNewUser (userId, streams) {
const call = peer.call(userId, streams);
//console.log(call);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
//console.log(userVideoStream);
addVideoStream(video, userVideoStream);
});
call.on('close', () => {
video.remove()//for removing video elemnt on closing call
});
peers[userId] = call;
};
const addVideoStream = (videoEl, stream) => {
videoEl.srcObject = stream;
videoEl.addEventListener("loadedmetadata", () => {
videoEl.play();
});
videotable.append(videoEl);//adding video to front-end
let totalUsers = document.getElementsByTagName("video").length;
if (totalUsers > 1) {
for (let index = 0; index < totalUsers; index++) {
document.getElementsByTagName("video")[index].style.width =
100 / totalUsers + "%";
}
}
};
//js for pause and play of video
const playStop = () => {
let enabled = myVideoStream.getVideoTracks()[0].enabled;
if (enabled) {
myVideoStream.getVideoTracks()[0].enabled = false;
setPlayVideo();
} else {
myVideoStream.getVideoTracks()[0].enabled = true;
setStopVideo();
}
};
//js of pause and play of audio
const muteUnmute = () => {
let enabled = myVideoStream.getAudioTracks()[0].enabled;
if (enabled) {
myVideoStream.getAudioTracks()[0].enabled = false;
setUnmuteButton();
} else {
myVideoStream.getAudioTracks()[0].enabled = true;
setMuteButton();
}
};
//setting icon for representing current state of video
const setPlayVideo = () => {
const html = `<i class="unmute fa fa-pause-circle"></i>
<span class="unmute">Resume Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of video
const setStopVideo = () => {
const html = `<i class=" fa fa-video-camera"></i>
<span class="">Pause Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of audio
const setUnmuteButton = () => {
const html = `<i class="unmute fa fa-microphone-slash"></i>
<span class="unmute">Unmute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
//setting icon for representing current state of audio
const setMuteButton = () => {
const html = `<i class="fa fa-microphone"></i>
<span>Mute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
the following is the code that I use to stream from my webcam onto a video element.
I want each frame to be logged as an byte array in the console. Basically want to get each frame as a byte array. I'm a newbie so please provide a detailed explanation as to how I can approach this.
var video = document.querySelector("#video");
// Basic settings for the video to get from Webcam
const constraints = {
audio: false,
video: {
width: 475,
height: 475,
},
};
// This condition will ask permission to user for Webcam access
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia(constraints)
.then(function (stream) {
video.srcObject = stream;
})
.catch(function (err0r) {
console.log("Something went wrong!");
});
}
//console.log(context.drawImage(video, 0, 0));
let a = document.addEventListener("keydown", function (e) {
if (e.key === "Escape") {
stop(e);
}
});
function stop(e) {
console.log("video stopped!");
var stream = video.srcObject;
var tracks = stream.getTracks();
for (var i = 0; i < tracks.length; i++) {
var track = tracks[i];
track.stop();
}
video.srcObject = null;
}
Thanks.
So I've recently found what looks to be a tidy example of how to read a QR code at https://codesandbox.io/s/qr-code-scanner-ilrm9?file=/index.html
When I try to set this up, I always seem to get an error in the console saying "Uncaught TypeError: Cannot set property 'callback' of undefined" and I'm not entirely sure why, specially as the example above works.
Here is what I have so far https://jsfiddle.net/snt95wh8/
const qrcode = window.qrcode;
const video = document.createElement("video");
const canvasElement = document.getElementById("qr-canvas");
const canvas = canvasElement.getContext("2d");
const qrResult = document.getElementById("qr-result");
const outputData = document.getElementById("outputData");
const btnScanQR = document.getElementById("btn-scan-qr");
let scanning = false;
qrcode.callback = res => {
if (res) {
outputData.innerText = res;
scanning = false;
video.srcObject.getTracks().forEach(track => {
track.stop();
});
qrResult.hidden = false;
canvasElement.hidden = true;
btnScanQR.hidden = false;
}
};
btnScanQR.onclick = () => {
navigator.mediaDevices
.getUserMedia({ video: { facingMode: "environment" } })
.then(function(stream) {
scanning = true;
qrResult.hidden = true;
btnScanQR.hidden = true;
canvasElement.hidden = false;
video.setAttribute("playsinline", true); // required to tell iOS safari we don't want fullscreen
video.srcObject = stream;
video.play();
tick();
scan();
});
};
function tick() {
canvasElement.height = video.videoHeight;
canvasElement.width = video.videoWidth;
canvas.drawImage(video, 0, 0, canvasElement.width, canvasElement.height);
scanning && requestAnimationFrame(tick);
}
function scan() {
try {
qrcode.decode();
} catch (e) {
setTimeout(scan, 300);
}
}
Any advice would be very much appreciated, thanks.
You are forget to link the qrcode library
Some times fiddle not allow camera permission. better try on your localhost
Updated Fiddle
<script src="https://rawgit.com/sitepoint-editors/jsqrcode/master/src/qr_packed.js"></script>
I am currently using an Android tablet and GetUserMedia to take pictures in my program.
Apparently, the default camera used by GetUserMedia is the front camera. How do I use the rear camera as a default?
Here's my code for GetUserMedia:
navigator.getUserMedia({
"audio": false,
"video": {
mandatory: {
minWidth: this.params.dest_width,
minHeight: this.params.dest_height,
//facingMode: "environment",
},
}
},
function(stream) {
// got access, attach stream to video
video.src = window.URL.createObjectURL( stream ) || stream;
Webcam.stream = stream;
Webcam.loaded = true;
Webcam.live = true;
Webcam.dispatch('load');
Webcam.dispatch('live');
Webcam.flip();
},
function(err) {
return self.dispatch('error', "Could not access webcam.");
});
I inserted facingMode in the "mandatory" part but didn't work.
Please help.
Update: facingMode is now available in Chrome for Android through the adapter.js polyfill!
facingMode is not yet implemented in Chrome for Android, but works natively in Firefox for Android.
You must use standard constraints however: (use https fiddle for Chrome):
var gum = mode =>
navigator.mediaDevices.getUserMedia({video: {facingMode: {exact: mode}}})
.then(stream => (video.srcObject = stream))
.catch(e => log(e));
var stop = () => video.srcObject && video.srcObject.getTracks().forEach(t => t.stop());
var log = msg => div.innerHTML += msg + "<br>";
<button onclick="stop();gum('user')">Front</button>
<button onclick="stop();gum('environment')">Back</button>
<div id="div"></div><br>
<video id="video" height="320" autoplay></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
The { exact: } syntax means the constraint is required, and things fail if the user doesn't have the right camera. If you leave it out then the constraint is optional, which in Firefox for Android means it only changes the default in the camera chooser in the permission prompt.
Using Peter's code (https://stackoverflow.com/a/41618462/7723861) I came up with this solution to get the rear camera:
function handleSuccess(stream) {
window.stream = stream; // make stream available to browser console
video.srcObject = stream;
}
function handleError(error) {
console.log('navigator.getUserMedia error: ', error);
}
var DEVICES = [];
var final = null;
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
var arrayLength = devices.length;
for (var i = 0; i < arrayLength; i++)
{
var tempDevice = devices[i];
//FOR EACH DEVICE, PUSH TO DEVICES LIST THOSE OF KIND VIDEOINPUT (cameras)
//AND IF THE CAMERA HAS THE RIGHT FACEMODE ASSING IT TO "final"
if (tempDevice.kind == "videoinput")
{
DEVICES.push(tempDevice);
if(tempDevice.facingMode == "environment" ||tempDevice.label.indexOf("facing back")>=0 )
{final = tempDevice;}
}
}
var totalCameras = DEVICES.length;
//If couldnt find a suitable camera, pick the last one... you can change to what works for you
if(final == null)
{
//console.log("no suitable camera, getting the last one");
final = DEVICES[totalCameras-1];
};
//Set the constraints and call getUserMedia
var constraints = {
audio: false,
video: {
deviceId: {exact: final.deviceId}
}
};
navigator.mediaDevices.getUserMedia(constraints).
then(handleSuccess).catch(handleError);
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});
Deploying our web app to android through Cordova, I tried multiple solutions to access the rear camera. The solution that worked for me was:
constraints = {
audio: false,
video: {
width: 400,
height: 300,
deviceId: deviceId ? {exact: deviceId} : undefined
}
};
Retrieving the deviceId through:
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
// devices is an array of accessible audio and video inputs. deviceId is the property I used to switch cameras
})
.catch(function(err) {
console.log(err.name + ": " + error.message);
});
I chose not to use a Cordova plugin so that if we choose to move away from Cordova, there would not be such a hefty migration.
A pretty dandy snippet you can use is:
var front = false;
document.getElementById('flip-button').onclick = function() { front =` !front; };
var constraints = { video: { facingMode: (front? "user" : "environment") } };
This should work for you hopefully.
In the newer version of Chrome (after v52) the adaper.js solutions seem not working. So I solve the problem by enumerating devices first. Here is my solution. I am not sure if there is a better way to flip the camera and show the video on the screen. But I have to stop the track first and get a new stream.
let Video = function() {
let cameras = [];
let currCameraIndex = 0;
let constraints = {
audio: true,
video: {
deviceId: { exact: "" }
}
};
let videoCanvas = $('video#gum');
this.initialize = function() {
return enumerateDevices()
.then(startVideo);
};
this.flipCamera = function() {
currCameraIndex += 1;
if (currCameraIndex >= cameras.length) {
currCameraIndex = 0;
}
if (window.stream) {
window.stream.getVideoTracks()[0].stop();
}
return startVideo();
};
function enumerateDevices() {
return navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
devices.forEach(function(device) {
console.log(device);
if (device.kind === "videoinput") {
cameras.push(device.deviceId);
}
});
console.log(cameras);
});
}
function startVideo() {
constraints.video.deviceId.exact = cameras[currCameraIndex];
return navigator.mediaDevices.getUserMedia(constraints)
.then(handleSuccess).catch(handleError);
}
function handleSuccess(stream) {
videoCanvas[0].srcObject = stream;
window.stream = stream;
}
function handleError(error) {
alert(error);
}
};
This would work for rear camera :
navigator.mediaDevices.getUserMedia({
video: {facingMode: "environment",
height:{<heightValueHere>},
width : {<widthValueHere>}
}
})
.then(function(stream){
window.stream = stream;
video.srcObject = stream;
})
I use the following for webcam.js, call LoadWebcam ()
async function LoadWebcam () {
var i
var frontdev
var backdev
const tempStream = await navigator.mediaDevices.getUserMedia({video:true})
const devices = await navigator.mediaDevices.enumerateDevices()
//Check all the video input and find the backdev = back Cam
devices.forEach(function(device) {
if (device.kind === 'videoinput') {
if( device.label && device.label.length > 0 ) {
if( device.label.toLowerCase().indexOf( 'back' ) >= 0 )
backdev = device.deviceId
else if( device.label.toLowerCase().indexOf( 'front' ) >= 0 )
frontdev = device.deviceId
}
}
})
//Stop Stream
const tracks = tempStream.getTracks()
if( tracks )
for( let t = 0; t < tracks.length; t++ ) tracks[t].stop()
//Load the webcam,
Webcam.set({
width: 320,
height: 240,
image_format: 'png',
jpeg_quality: 90,
flip_horiz: true,
constraints: {
video: true,
deviceId: {exact: backdev }
}
});
Webcam.attach( '#my_camera' )}