Javascript DOMException: The associated Track is in an invalid state - javascript

I'm trying to create a photo capture web app on a nodeJS server, and i'm using the javascript code below.
const btn = document.querySelector('#btn');
btn.addEventListener('click', (event) => {
navigator.mediaDevices.getUserMedia({video: true})
.then(gotMedia)
.catch(error => console.error('getUserMedia() error:', error));
event.preventDefault()
})
And the gotMedia function is this:
function gotMedia(mediaStream) {
const mediaStreamTrack = mediaStream.getVideoTracks()[0];
const imageCapture = new ImageCapture(mediaStreamTrack);
console.log(imageCapture);
const canvas = document.querySelector('canvas');
// ...
imageCapture.grabFrame()
.then(imageBitmap => {
canvas.width = imageBitmap.width;
canvas.height = imageBitmap.height;
canvas.getContext('2d').drawImage(imageBitmap, 0, 0);
})
.catch(error => console.error('grabFrame() error:', error));
}
Everything works fine, the image capturing is ok, but an error occurs, when i snap photos rapidly one after another, that says:
grabFrame() error: DOMException: The associated Track is in an invalid state.
This usually happens when i capture too many photos (like clicking rapidly for about more than 20 seconds), but it also has happened on the first five snapshots. Does anyone know what's going on and what should i change, in order to fix this? Thank you for your time.

According to the spec such error could be result of not acceptable state. In chromium sources I have found this method.
I've overcame error using code like this:
const promise = getPrevImagePromise();
if (!promise && !(imageCapture.track.readyState != 'live' || !imageCapture.track.enabled || imageCapture.track.muted)) {
const imagePromise = imageCapture.grabFrame()
.then((image) => {
// do work with image
})
.then(() => {
deletePrevImagePromise()
})
.catch((error) => {
//
});
setPrevImagePromise(imagePromise);
}

I ran into the same issue and was unable to get the bleeding-edge grabFrame function to work reliably.
Instead, you can draw from an intermediate video source. In your function, that would look something like this (untested):
function gotMedia(mediaStream) {
const mediaStreamTrack = mediaStream.getVideoTracks()[0];
const canvas = document.querySelector('canvas');
const video = document.createElement('video');
video.autoplay = true;
video.srcObject = mediaStream;
video.onplay = () => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
};
}
Of course, if you're going to be capturing a lot of frames, it'd be best to create a single video alongside the stream and to capture from that each time.

My two cents:
let preview = document.getElementById('preview')
let recording = document.getElementById('recording')
let photo = document.getElementById('photo')
let photoButton = document.getElementById('photoButton')
let imageHolder = document.getElementById('image-holder')
async function startWebcam() {
return navigator.mediaDevices.getUserMedia({
video: true
})
};
photoButton.addEventListener('click', async() => {
let webcamStream = await startWebcam()
preview.srcObject = webcamStream
const track = webcamStream.getVideoTracks()[0]
const imageCapture = new ImageCapture(track)
const bitmap = await imageCapture.grabFrame()
const canvas = document.getElementById('canvas')
canvas.width = bitmap.width
canvas.height = bitmap.height
const context = canvas.getContext('2d')
context.drawImage(bitmap, 0, 0, bitmap.width, bitmap.height)
let image = canvas.toDataURL()
imageHolder.innerHTML += `<img src="${image}"></img>`
track.stop()
})
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Desktoprecord</title>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bulma#0.9.0/css/bulma.min.css">
<script defer src="https://use.fontawesome.com/releases/v5.3.1/js/all.js"></script>
<script src="node_modules/axios/dist/axios.min.js"></script>
<script src="app4.js" defer></script>
</head>
<div class="container">
<video id="preview" width="25%" height="auto" autoplay muted poster="https://placekitten.com/g/150"></video>
<canvas id="canvas"></canvas>
<img src="http://placekitten.com/g/320/261" id="photo" alt="photo">
<div id="image-holder">
To be replaced with pretty pictures
</div>
<video src="" id="preview"></video>
</jdiv>
<div class="buttons are-large">
<button id="photoButton" class="button is-success"><i class="fas fa-camera"></i></button>
</div>
</div>
Each time you are going to take a photo you start the camera and after you got the pic you have to stop the track

I want to grabFrame() in a loop (reused without stop()), thanks to fomasha,
my code works as follow
`
async function aSleep(ms) {
return new Promise(R => setTimeout(R, ms));
}
var prevP=null;
function getPrevImagePromise() {return prevP;}
function setPrevImagePromise(p) {prevP=p;}
function deletePrevImagePromise() {prevP=null;}
async function aRun() {
imageCapture = new ImageCapture(gImageTrack);
while(true) {
const promise = getPrevImagePromise();
if (!promise && !(imageCapture.track.readyState != 'live' || !imageCapture.track.enabled || imageCapture.track.muted)) {
const imagePromise = imageCapture.grabFrame()
.then((image) => {
context.drawImage(image, 0, 0, 640, 480);
})
.then(() => {
deletePrevImagePromise()
})
.catch((error) => {
//
});
setPrevImagePromise(imagePromise);
}
await aSleep(500);
}
}
`
refactoring
`
function zCamera() {
var T=this;
T.aInit=async function() {
T.mStm=await navigator.mediaDevices.getUserMedia({ video: { width: 4096, height: 3200, facingMode: 'environment' } });
T.mTrk=T.mStm.getTracks()[0];
T.mCap=new ImageCapture(T.mTrk);
}
T.getSetting=function() {
return T.mTrk.getSettings();
}
T.aGrabFrame=async function() {
var P=new Promise((R)=>{
T.mCap.grabFrame().then((vImg) => {
R(vImg);
}).catch((error) => {
R(null);
});
});
return P;
}
}
var gContext;
var oCamera;
async function aInitCamera()
{
var canvas=document.getElementById('canvas');
gContext=canvas.getContext('2d');
oCamera=new zCamera();
await oCamera.aInit();
}
async function aRun() {
while(true) {
var wImg=await oCamera.aGrabFrame();
if (wImg!=null)
gContext.drawImage(wImg, 0, 0, 640, 480);
await aSleep(100);
}
}
async function jMain() {
await aInitCamera();
aRun();
}
`

Related

Javascript img.onload() not firing and/or promises always pending on firefox but not chrome

I'm loading one or multiple pictures from local storage and putting it to a canvas on top of another picture.
In the code below I first collect all the loading images in promises that only resolve after the image has loaded. Second after all the promises have resolved I send the created image data.
var Promises = [];
for (const selectedOverlayImage of selectedOverlayImages.values()) {
Promises.push(new Promise((resolve, reject) => {
const overlayImg = new Image();
overlayImg.onerror = () => { console.log(`Image ${selectedImage.value} loading error`); reject(); };
overlayImg.onload = () => {
canvasContext.drawImage(overlayImg, 0, 0, canvas.width, canvas.height);
resolve();
};
overlayImg.src = `overlayImages/${selectedOverlayImage.value}.png`;
console.log(selectedOverlayImage.value);
}));
}
console.log("1");
var ret = Promise.all(Promises).then(() => {
console.log("IN");
let imageData = canvas.toDataURL('image/png');
fetch("http://localhost:8000/picture-editing.php", { //Call php to create picture in database
method: "POST",
headers: {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"},
body: `imageData=${imageData}`
});
alert('Images successfully loaded!'); //This creates a gain of time before the refresh, allowing the new picture to be present more often after refresh
window.location.reload();
}).catch((error) => {console.log(error);});
console.log("2");
console.log(ret);
setTimeout(() => {
console.log('the queue is now empty');
console.log(ret);
});
});
On Chrome I am able to create one image consisting of multiple pictures.
Console logs when successfully creating picture with wine as overlay-image, we can see the Promise.all function gets entered
On firefox I am not able to create one image of multiple pictures.
We can see the code never enters Promise.all while Promise.all never resolves
Thank you for your help!
Reproduce the error
Set the following in index.php.
<h1 class="pageTitle">Create Picture</h1>
<form method="get">
<input name="modeOfPicture" type="submit" value="webcam"/>
<input name="modeOfPicture" type="submit" value="upload"/>
</form>
<div class="wrapperPictureEditing">
<div id="getPicture" class="block1PictureEditing"></div>
<canvas id="takePictureCanvas" style="display:none;" width="320" height="240"></canvas>
<form id="takePictureButton" class="block2PictureEditing">
<p>Choose Overlay Image:</p>
<label><input type="checkbox" name="overlay" value="wine"> Wine</label><br>
<button type="submit">Take photo</button><br>
</form>
<script>
function displaySelectedPicture(input) {
var selectedPictureDisplay = document.getElementById('selectedPictureDisplay');
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
selectedPictureDisplay.src = e.target.result;
};
reader.readAsDataURL(input.files[0]);
} else { selectedPictureDisplay.removeAttribute('src') }
}
function takePicture(camera) {
const takePictureButton = document.getElementById('takePictureButton');
takePictureButton.addEventListener("submit", () => {
let canvas = document.getElementById('takePictureCanvas');
let canvasContext = canvas.getContext('2d');
selectedOverlayImages = document.querySelectorAll('input[name="overlay"]:checked');
console.log(selectedOverlayImages);
if (camera) {
const stream = document.querySelector('video');
canvasContext.drawImage(stream, 0, 0, canvas.width, canvas.height);
} else {
const selectedImage = document.getElementById('selectedPictureDisplay');
canvasContext.drawImage(selectedImage, 0, 0, canvas.width, canvas.height);
}
var Promises = [];
for (const selectedOverlayImage of selectedOverlayImages.values()) {
Promises.push(new Promise((resolve, reject) => {
const overlayImg = new Image();
overlayImg.onerror = () => { console.log(`Image ${selectedImage.value} loading error`); reject(); };
overlayImg.onload = () => {
canvasContext.drawImage(overlayImg, 0, 0, canvas.width, canvas.height);
resolve();
};
overlayImg.src = `image.png`;
console.log(selectedOverlayImage.value);
}));
}
console.log("1");
var ret = Promise.all(Promises).then(() => {
console.log("IN");
let imageData = canvas.toDataURL('image/png');
alert('Images successfully loaded!'); //This creates a gain of time before the refresh, allowing the new picture to be present more often after refresh
window.location.reload();
}).catch((error) => {console.log(error);});
console.log("2");
console.log(ret);
setTimeout(() => {
console.log('the queue is now empty');
console.log(ret);
});
});
}
function setupVideoStream(stream) {
const video = document.querySelector('video');
video.srcObject = stream;
video.play();
}
function errorMsg(msg, error) {
const errorElement = document.getElementById('error');
errorElement.innerHTML += `${msg}`;
if (typeof error !== 'undefined') {
console.error(error);
}
}
function streamError(error) {
if (error.name === 'NotAllowedError') {
errorMsg('Permissions have not been granted to use your camera' +
', you need to allow the page access to your camera in ' +
'order to take pictures. Instead you can upload an image.');
} else if (error.name === "NotFoundError") {
errorMsg('No camera has been found on this device. ' +
'Instead you can upload an image.');
} else {
errorMsg(`getUserMedia error: ${error.name}`, error);
}
}
async function init() {
var getPictureHTML = document.getElementById('getPicture');
var modeOfPicture = (new URLSearchParams(document.location.search)).get('modeOfPicture');
if (modeOfPicture === null || modeOfPicture === "webcam") {
try {
const stream = await navigator.mediaDevices.getUserMedia({video: true});
getPictureHTML.insertAdjacentHTML('afterbegin',
"<video width='320' height='240' autoplay></video><br>");
setupVideoStream(stream);
takePicture(true);
} catch (error) {
getPictureHTML.insertAdjacentHTML('afterbegin',
"<p id='error' class='error'></p>" +
"<label><input type='file' name='selectedPicture' accept='image/*' " +
"onchange='displaySelectedPicture(this);'><br>Upload image<br></label>" +
"<br><img id='selectedPictureDisplay' width='320' height='240'/>");
streamError(error);
takePicture(false);
}
} else {
getPictureHTML.insertAdjacentHTML('afterbegin',
"<label><input type='file' name='selectedPicture' accept='image/*' " +
"onchange='displaySelectedPicture(this);'><br>Upload image<br></label>" +
"<br><img id='selectedPictureDisplay' width='320' height='240'/>");
takePicture(false);
}
}
init();
</script>
In same directory put an image named image.png, such as this one image example
Then run the server with this command in same directory:
php -S localhost:8000
Visit localhost:8000 in Chrome and Firefox, see the console, on success "IN" should appear after taking a picture with wine overlay image.

How can I differentiate the sound level of an audio track in a mediastream in JavaScript? [duplicate]

I'm using the Google Cloud API for Speech-to-text, with a NodeJS back-end.
The app needs to be able to listen for voice commands, and transmit them to the back-end as a buffer. For this, I need to send the buffer of the preceding audio when silence is detected.
Any help would be appreciated. Including the js code below
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function () {
recording = true;
};
window.stopRecording = function () {
recording = false;
// window.Stream.end();
};
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 4096;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function (e) {
if (!recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
console.log(convertoFloat32ToInt16(left));
};
audioInput.connect(recorder);
recorder.connect(context.destination);
}
I'm not too sure as to what exactly is being asked in the question, so this answer is only intended to give a way to detect silences in an AudioStream.
To detect silence in an AudioStream, you can use an AudioAnalyser node, on which you will call the getByteFrequencyData method at regular intervals, and check whether there were sounds higher than than your expected level for a given time.
You can set the threshold level directly with the minDecibels property of the AnalyserNode.
function detectSilence(
stream,
onSoundEnd = _=>{},
onSoundStart = _=>{},
silence_delay = 500,
min_decibels = -80
) {
const ctx = new AudioContext();
const analyser = ctx.createAnalyser();
const streamNode = ctx.createMediaStreamSource(stream);
streamNode.connect(analyser);
analyser.minDecibels = min_decibels;
const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
let silence_start = performance.now();
let triggered = false; // trigger only once per silence event
function loop(time) {
requestAnimationFrame(loop); // we'll loop every 60th of a second to check
analyser.getByteFrequencyData(data); // get current data
if (data.some(v => v)) { // if there is data above the given db limit
if(triggered){
triggered = false;
onSoundStart();
}
silence_start = time; // set it to now
}
if (!triggered && time - silence_start > silence_delay) {
onSoundEnd();
triggered = true;
}
}
loop();
}
function onSilence() {
console.log('silence');
}
function onSpeak() {
console.log('speaking');
}
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
detectSilence(stream, onSilence, onSpeak);
// do something else with the stream
})
.catch(console.error);
And as a fiddle since stackSnippets may block gUM.
You can use SpeechRecognition result event to determine when a word or phrase has been recognized, for example, ls, cd, pwd or other commands, pass the .transcript of SpeechRecognitionAlternative to speechSynthesis.speak() where at attached start and end event of SpeechSynthesisUtterance call .start() or .resume() on MediaRecorder object where MediaStream is passed; convert the Blob at dataavailable event to an ArrayBuffer using FileReader or Response.arrayBuffer().
We could alternatively use audiostart or soundstart with audioend or soundend events of SpeechRecognition to record the users' actual voice, though the ends may not be fired consistently in relation to the actual start and end of audio captured by only a standard system microphone.
<!DOCTYPE html>
<html>
<head>
<title>Speech Recognition Recording</title>
</head>
<body>
<input type="button" value="Stop speech command recognition" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
const recognition = new webkitSpeechRecognition();
const synthesis = new SpeechSynthesisUtterance();
const handleResult = e => {
recognition.onresult = null;
console.log(e.results);
const result = e.results[e.results.length - 1];
if (result.isFinal) {
const [{transcript}] = result;
console.log(transcript);
synthesis.text = transcript;
window.speechSynthesis.speak(synthesis);
}
}
synthesis.onstart = () => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}
synthesis.onend = () => {
recorder.pause();
recorder.requestData();
}
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
console.log(blobURL, ab);
recognition.onresult = handleResult;
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
}
recognition.continuous = true;
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onend = e => {
console.log("recognition ended, stream.active", stream.active);
if (stream.active) {
console.log(e);
// the service disconnects after a period of time
recognition.start();
}
}
recognition.onresult = handleResult;
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("stop")
.onclick = () => {
console.log("stream.active:", stream.active);
if (stream && stream.active && recognition) {
recognition.abort();
recorder.stop();
for (let track of stream.getTracks()) {
track.stop();
}
console.log("stream.active:", stream.active);
}
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview
The simplest approach would be to use .pause() and .resume(), .stop() methods of MediaRecorder() to allow user to start, pause, and stop recording audio captured utilizing navigator.mediaDevices.getUserMedia() and convert the resulting Blob to an ArrayBuffer, if that is what the api is expecting to be POSTed to server
<!DOCTYPE html>
<html>
<head>
<title>User Media Recording</title>
</head>
<body>
<input type="button" value="Start/resume recording audio" id="start">
<input type="button" value="Pause recording audio" id="pause">
<input type="button" value="Stop recording audio" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
// do stuff with `ArrayBuffer` of recorded audio
console.log(blobURL, ab);
// we do not need the `Blob URL`, we can revoke the object
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
recorder.requestData();
}
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("start")
.onclick = () => {
if (recorder.state === "inactive") {
recorder.start();
} else {
recorder.resume();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("pause")
.onclick = () => {
if (recorder.state === "recording") {
recorder.pause();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("stop")
.onclick = () => {
if (recorder.state === "recording" || recorder.state === "paused") {
recorder.stop();
}
for (let track of stream.getTracks()) {
track.stop();
}
document.getElementById("start").onclick = null;
document.getElementById("pause").onclick = null;
console.log("recorder.state:", recorder.state
, "stream.active", stream.active);
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

unmute-mute and stop video in videocall

This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
const socket = io("/");
const chatInputBox = document.getElementById("chat_message");
const all_messages = document.getElementById("all_messages");
const main__chat__window = document.getElementById("main__chat__window");
const videotable = document.getElementById("video-table");
const myVideo = document.createElement("video");
myVideo.muted = true;//for not recieving own voice
var peer = new Peer(undefined, {
path: "/peerjs",
host: "/",
port: "3000",
});
let myVideoStream;
const peers = {};
var getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
//setting initial controls of video and passing them as constraints
const constraints = {
'video': true,
'audio': true
}
navigator.mediaDevices
.getUserMedia(constraints)
.then((stream) => {
myVideoStream = stream;
addVideoStream(myVideo,myVideoStream);//call to function addVideoStream
//answering to calls
peer.on("call", (call) => {
call.answer(myVideoStream);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
addVideoStream(video, userVideoStream);// Show stream in some video/canvas element.
});
});
socket.on("user_connected", (userId) => {//recieving info
connectToNewUser(userId, stream);//call function with id and stream
});
//adding event for messages of chat
document.addEventListener("keydown", (e) => {
if (e.which === 13 && chatInputBox.value != "") {
socket.emit("message", chatInputBox.value);
chatInputBox.value = "";
}
});
//adding text to chat window
socket.on("createMessage", (msg) => {
//console.log(msg);
let li = document.createElement("li");
li.innerHTML = msg;
all_messages.append(li);
main__chat__window.scrollTop = main__chat__window.scrollHeight;//scrolled to latest message
});
});
//For disconnecting user
socket.on('user_disconnected', userId => {
if (peers[userId]) peers[userId].close()
});
peer.on("call", function (call) {
getUserMedia(constraints,
function (stream) {
call.answer(stream); // Answer the call with an A/V stream.
const video = document.createElement("video");
call.on("stream", function (remoteStream) {
addVideoStream(video, remoteStream); // Show stream in some video/canvas element.
});
},
function (err) {
console.log("Failed to get local stream", err);
}
);
});
peer.on("open", (id) => {//send with an id for user
// on open will be launch when you successfully connect to PeerServ
socket.emit("join_room", ROOM_ID, id);//emiting event
});
// Fetch an array of devices of a certain type
async function getConnectedDevices(type) {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter(device => device.kind === type)
}
// Open camera with echoCancellation for better audio
async function openCamera(cameraId) {
const constraints = {
'audio': {'echoCancellation': true}
}
return await navigator.mediaDevices.getUserMedia(constraints);
}
const cameras = getConnectedDevices('videoinput');
if (cameras && cameras.length > 0) {
const stream = openCamera(cameras[0].deviceId);
}
function connectToNewUser (userId, streams) {
const call = peer.call(userId, streams);
//console.log(call);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
//console.log(userVideoStream);
addVideoStream(video, userVideoStream);
});
call.on('close', () => {
video.remove()//for removing video elemnt on closing call
});
peers[userId] = call;
};
const addVideoStream = (videoEl, stream) => {
videoEl.srcObject = stream;
videoEl.addEventListener("loadedmetadata", () => {
videoEl.play();
});
videotable.append(videoEl);//adding video to front-end
let totalUsers = document.getElementsByTagName("video").length;
if (totalUsers > 1) {
for (let index = 0; index < totalUsers; index++) {
document.getElementsByTagName("video")[index].style.width =
100 / totalUsers + "%";
}
}
};
//js for pause and play of video
const playStop = () => {
let enabled = myVideoStream.getVideoTracks()[0].enabled;
if (enabled) {
myVideoStream.getVideoTracks()[0].enabled = false;
setPlayVideo();
} else {
myVideoStream.getVideoTracks()[0].enabled = true;
setStopVideo();
}
};
//js of pause and play of audio
const muteUnmute = () => {
let enabled = myVideoStream.getAudioTracks()[0].enabled;
if (enabled) {
myVideoStream.getAudioTracks()[0].enabled = false;
setUnmuteButton();
} else {
myVideoStream.getAudioTracks()[0].enabled = true;
setMuteButton();
}
};
//setting icon for representing current state of video
const setPlayVideo = () => {
const html = `<i class="unmute fa fa-pause-circle"></i>
<span class="unmute">Resume Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of video
const setStopVideo = () => {
const html = `<i class=" fa fa-video-camera"></i>
<span class="">Pause Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of audio
const setUnmuteButton = () => {
const html = `<i class="unmute fa fa-microphone-slash"></i>
<span class="unmute">Unmute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
//setting icon for representing current state of audio
const setMuteButton = () => {
const html = `<i class="fa fa-microphone"></i>
<span>Mute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.

how to remove this [object promise] tag from your camera window in js?

In this code when you run the webpage, countown starts from 5 till 0 and then the camera window opens to capture your picture, but in that camera window this [object promises] tag also gets flashed. Any insight would be appreciated to remove this line from my camera window.
I am a newbee to js.
Code:
<body>
<div class="container">
<h1 id="count">5</h1>
</div>
<!-- Stream video via webcam -->
<div class="video-wrap">
<video id="video" playsinline autoplay></video>
</div>
<!-- Trigger canvas web API -->
<div class="controller">
<button id="snap">Capture</button>
</div>
<!-- Webcam video snapshot -->
<canvas id="canvas" width="640" height="480"></canvas>
<script type="text/javascript">
'use strict';
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const snap = document.getElementById('snap');
const errorMsgElement = document.getElementById('span#ErrorMsg');
const constraints = {
audio: true,
video: {
width: 1280, height: 720
}
};
async function init() {
try {
const stream = await navigator.mediaDevices.getUserMedia(constraints);
handleSuccess(stream);
}
catch (e) {
errorMsgElement.innerHTML = `navigator.getUserMedia.error:${e.toString()}`;
}
}
// Success
function handleSuccess(stream) {
window.stream = stream;
video.srcObject = stream;
}
var counter = 5;
setInterval(function () {
counter--;
if (counter >= 0) {
var id = document.getElementById("count");
id.innerHTML = counter;
}
if (counter == 0) {
id.innerHTML = init();
}
}, 1000);
var context = canvas.getContext('2d');
snap.addEventListener("click", function () {
context.drawImage(video, 0, 0, 640, 480);
});
</script>
</body>
init is an async function, meaning that it will return a promise first and then, that promise will resolve into something else when the asynchronous execution is complete. When you are setting your innerHTML to the return value of init, you are not awaiting the promise to resolve (or reject).
To solve this issue, you should set your callback of the setInterval to be an async function and await the response of the init.
setInterval(async () => {
counter--;
if (counter >= 0) {
var id = document.getElementById("count");
id.innerHTML = counter;
}
if (counter == 0) {
id.innerHTML = await init();
}
}, 1000);

Why is there only one preview when there are actually 2 (JavaScript, Screen-casting/Camera recording simultaneously)?

I'm working on a quiz (as you may gather from my previous posts), and I've come acre this problem: I need two previews for a camera and the screen-recording to come up, but I only get one preview. Can anybody explain why this is happening?
Also, is there a way to completely remove the previews and instead save the streams to a video file?
<p><button id="start">Start Capture</button>
<br><button id="stop">Stop Capture</button></p>
<video id="screen" autoplay></video>
<video id="camera" autoplay></video>
<pre id="log" style="display: none;"></pre>
<script>
const screenElem = document.getElementById("screen");
const cameraElem = document.getElementById("camera");
const logElem = document.getElementById("log");
const startElem = document.getElementById("start");
const stopElem = document.getElementById("stop");
// Options for getDisplayMedia()
var displayMediaOptions = {
video: {
cursor: "always"
},
audio: false
};
// Set event listeners for the start and stop buttons
startElem.addEventListener("click", function() {
startCapture1();
startCapture2();
}, false);
stopElem.addEventListener("click", function() {
stopCapture();
}, false);
console.log = msg => logElem.innerHTML += `${msg}<br>`;
console.error = msg => logElem.innerHTML += `<span class="error">${msg}</span><br>`;
console.warn = msg => logElem.innerHTML += `<span class="warn">${msg}<span><br>`;
console.info = msg => logElem.innerHTML += `<span class="info">${msg}</span><br>`;
async function startCapture1() {
logElem.innerHTML = "";
try {
cameraElem.srcObject = await navigator.mediaDevices.getUserMedia(displayMediaOptions);
dumpOptionsInfo();
} catch(err) {
console.error("Error: " + err);
}
}
async function startCapture2() {
try {
screenElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
dumpOptionsInfo();
} catch(err) {
console.error("Error: " + err);
}
}
function stopCapture(evt) {
let tracksCam = cameraElem.srcObject.getTracks();
let tracksScr = screenElem.srcObject.getTracks();
tracks.forEach(track => track.stop());
cameraElem.srcObject = null;
screenElem.srcObject = null;
} function dumpOptionsInfo() {
const cameraTrack = cameraElem.srcObject.getVideoTracks()[0];
const screenTrack = screenElem.srcObject.getVideoTracks()[0];
console.info("(Camera)Track settings:");
console.info(JSON.stringify(cameraTrack.getSettings(), null, 2));
console.info("(Camera)Track constraints:");
console.info(JSON.stringify(cameraTrack.getConstraints(), null, 2));
console.info("(Screen)Track settings:");
console.info(JSON.stringify(screenTrack.getSettings(), null, 2));
console.info("(Screen)Track constraints:");
console.info(JSON.stringify(screenTrack.getConstraints(), null, 2));
}
</script>
Thanks for any help!

Categories