I am building a video conferencing web application using WebRTC and I have implemented features for toggling the camera, microphone, and screen sharing. The camera and screen sharing features are working as expected, but I am having an issue with the microphone button.
The issue is that after using screen sharing and then stopping it, the microphone on/off button is not working properly. I am getting an error in the console saying
"Cannot read properties of undefined (reading 'enabled')".
Before using screen sharing, the microphone button works fine.
Here's my current code for handling the buttons:
let screenStream = null;
let localStream = null;
let audioTrack = null;
let pc = null;
// Toggle screen sharing on/off
document.getElementById("share-screen-btn").addEventListener("click", async () => {
try {
const localVideo = document.getElementById("localVideo");
const displayMediaOptions = {
video: true,
audio: true,
};
if (!screenStream) {
screenStream = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
const videoTracks = screenStream.getVideoTracks();
await pc.getSenders().find(sender => sender.track.kind === 'video').replaceTrack(videoTracks[0], videoTracks[0].clone());
localVideo.srcObject = screenStream;
document.getElementById("share-screen-btn").classList.remove("btn-danger");
document.getElementById("share-screen-btn").classList.add("btn-primary");
// Disable audio track from localStream
if (localStream) {
audioTrack = localStream.getAudioTracks()[0];
audioTrack.enabled = false;
}
} else {
const localVideoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
const sender = pc.getSenders().find(sender => sender.track.kind === 'video');
const localVideoTrack = localVideoStream.getVideoTracks()[0];
const localAudioTrack = localVideoStream.getAudioTracks()[0];
const localStream = new MediaStream([localVideoTrack, localAudioTrack]);
await sender.replaceTrack(localVideoTrack);
localVideo.srcObject = localStream;
document.getElementById("share-screen-btn").classList.remove("btn-primary");
document.getElementById("share-screen-btn").classList.add("btn-danger");
screenStream.getTracks().forEach(track => track.stop());
screenStream = null;
// Set audioTrack from localAudioTrack
audioTrack = localAudioTrack;
}
} catch (e) {
console.error("Error sharing screen: ", e);
}
})
// Toggle microphone on/off
document.getElementById("mute-audio-btn").addEventListener("click", () => {
let localStream = document.getElementById("localVideo").srcObject;
if (localStream) {
let audioTrack = localStream.getAudioTracks()[0];
let enabled = audioTrack.enabled;
if (enabled) {
audioTrack.enabled = false;
document.getElementById("mute-audio-btn").innerHTML = '<i class="fa-solid fa-microphone-slash"></i>'
} else {
audioTrack.enabled = true;
document.getElementById("mute-audio-btn").innerHTML = '<i class="fa-solid fa-microphone"></i>'
}
}
})
// Toggle camera on/off
document.getElementById("mute-video-btn").addEventListener("click", () => {
let localStream = document.getElementById("localVideo").srcObject;
if (localStream) {
let videoTrack = localStream.getVideoTracks()[0];
let enabled = videoTrack.enabled;
if (enabled) {
videoTrack.enabled = false;
document.getElementById("mute-video-btn").innerHTML = '<i class="fa fa-video-slash"></i>';
} else {
videoTrack.enabled = true;
document.getElementById("mute-video-btn").innerHTML = '<i class="fa fa-video"></i>';
}
}
})
If I see it correctly then audioTrack or videoTrack is undefined at the time this happens.
Try and console.log() the arrays returned by localStream.getAudioTracks() or screenStream.getVideoTracks()
You may from there work your way up the chain.
And it seems to use audio while sharing the screen you are suppused to use addTrack
have a look here:
Is it possible broadcast audio with screensharing with WebRTC
Related
I want the browser to ask for permission to use the mic.
It askes for permission on PC but doesn't on mobile.
Can someone tell me what I did wrong.
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
var mediaRecorder = new MediaRecorder(stream)
mediaRecorder.start()
var mediaChunks = []
mediaRecorder.addEventListener("dataavailable",function (e) {
mediaChunks.push(e.data)
})
mediaRecorder.addEventListener("stop",function () {
var mediaBlob = new Blob(mediaChunks)
var url = URL.createObjectURL(mediaBlob)
var audio = document.createElement("AUDIO")
audio.src = url
audio.controls = true
//audio.classList.add("ay")
audio.setAttribute('id','ay')
big.appendChild(audio)
})
setTimeout(function() {
mediaRecorder.stop()
alert("stopped")
},3000)
})
I have a web page that is trying to play multiple video stream from two web cam that is attached with the system. Three Cameras attached with my system , one in an in-build camera in the system, second is a usb camera and third is a droid cam client. I can't play video from system cam and usb cam at a time, I mean droid cam always playing but only one of the other camera at a time.
for example:
Droid cam and USB Cam = works
Droid cam and System Camera( in built) = works
Usb and System Camera = not working
My Code is
let devices = await navigator.mediaDevices.enumerateDevices();
if (devices.length > 0) {
log(`Available Device Count ${devices.length}`);
for (const device of devices) {
let localContraints = { audio: false }
if (device.kind === "videoinput") {
localContraints.video = { deviceId: device.deviceId ? { exact: device.deviceId } : undefined };
var newStream = await navigator.mediaDevices.getUserMedia(localContraints).catch(err => console.log(err + device.label));
if (newStream) {
console.log(`Device Added ${device.label}`);
window.stream.addTrack(newStream.getVideoTracks()[0]);
}
}
}
}
else {
log(`No Devices Available`);
}
Error : could't load 'camera label'
two camera stream added in the window object one is always Droid Cam.
first of all I want know is this possible?
After digging into the issue i found the real problem and a solution. the real problem was asynchronous behavior of java script. so i rewrite the loop. This will help others who facing the similar issue.
$(document).ready(async () =>{
let leftVideo = document.querySelector('video#left');
let rightVideo = document.querySelector('video#right');
let middleVideo = document.querySelector('video#middle');
let videoElemArray = [leftVideo, middleVideo, rightVideo]
let devices = await navigator.mediaDevices.enumerateDevices();
let i = 0;
let videoIndx = 0;
await new Promise(async (resolve, reject) => {
try {
if (devices.length == 0) return resolve();
let funSync = async () => {
if (devices[i].kind === "videoinput") {
var newStream = await navigator.mediaDevices.getUserMedia({ audio: false, video: { deviceId: devices[i].deviceId } });
videoElemArray[videoIndx].srcObject = newStream;
videoIndx++;
}
i++;
if (i == devices.length) return resolve();
else funSync();
}
funSync();
} catch (e) {
reject(e);
}
})
});
I’ve created a minimal WebRTC test site that is able to request the user’s webcam/audio stream, to record it, and to playback the recording after it has been stopped.
Demo: https://output.jsbin.com/tabosipefo/
Edit1: https://jsbin.com/tabosipefo/edit?html,console,output
Since this happens all within one Promise navigator.mediaDevices.getUserMedia(), I was wondering, if it is actually possible to detect and on-going stream and to (a) record it, and (b) to stop and save it.
1 WebRTC does not work in jsbin when in edit view for some reason...
If you use no framework and want to use vanilla JS, your best step is to tack the stream object to the global window.
Preview stream
const showWebcamStream = () => {
navigator.mediaDevices
.getUserMedia({ audio: true, video: true })
.then(stream => {
window.localStream = stream; // ⭠ tack it to the window object
// grab the <video> object
const video = document.querySelector("#video-preview");
video.srcObject = stream;
// Display stream
video.onloadedmetadata = () => video.play();
})
.catch(err => console.log(err.name, err.message));
};
Now the video will be displayed within the video element (id: #videp-preview).
Stop Stream(s)
const hideWebcamStream = () => localStream.getTracks().forEach(track => track.stop());
You should put the mediaRecorder in the window object in order to stop it later.
Record Stream
const startWebcamRecorder = () => {
// check if localStream is in window and if it is active
if ("localStream" in window && localStream.active) {
// save the mediaRecorder also to Window in order independently stop it
window.mediaRecorder = new MediaRecorder(localStream);
window.dataChunks = [];
mediaRecorder.start();
console.log(mediaRecorder.state);
mediaRecorder.ondataavailable = e => dataChunks.push(e.data);
}
};
Stop Recording and Preview the recording
You need another video element to playback your recording #video-playback
const stopWebcamRecorder = () => {
if ("mediaRecorder" in window && mediaRecorder.state === "recording") {
mediaRecorder.stop();
console.log(mediaRecorder.state);
mediaRecorder.onstop = () => {
let blob = new Blob(dataChunks, { type: "video/mp4;" });
dataChunks = [];
let videoURL = window.URL.createObjectURL(blob);
const videoPlayback = document.getElementById("video-playback");
videoPlayback.src = videoURL;
};
}
};
I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated.
In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.
Below is what i tried to do, But it doesn't work for obvious reasons.
$(document).ready(function () {
run1()
})
let chunks = []
// run1() for uploading model and for facecam
async function run1() {
const MODELS = "/models";
await faceapi.loadSsdMobilenetv1Model(MODELS)
await faceapi.loadFaceLandmarkModel(MODELS)
await faceapi.loadFaceRecognitionModel(MODELS)
var _stream
//Accessing the user webcam
const videoEl = document.getElementById('inputVideo')
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(
(stream) => {
_stream = stream
recorder = new MediaRecorder(_stream);
recorder.ondataavailable = (e) => {
chunks.push(e.data);
console.log(chunks, i);
if (i == 20) makeLink(); //Trying to make Link from the blob for some i==20
};
videoEl.srcObject = stream
},
(err) => {
console.error(err)
}
)
}
// run2() main recognition code and training
async function run2() {
// wait for the results of mtcnn ,
const input = document.getElementById('inputVideo')
const mtcnnResults = await faceapi.ssdMobilenetv1(input)
// Detect All the faces in the webcam
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
// Training the algorithm with given data of the Current Student
const labeledFaceDescriptors = await Promise.all(
CurrentStudent.map(
async function (label) {
// Training the Algorithm with the current students
for (let i = 1; i <= 10; i++) {
// console.log(label);
const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
const img = await faceapi.fetchImage(imgUrl)
// detect the face with the highest score in the image and compute it's landmarks and face descriptor
const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`)
}
const faceDescriptors = [fullFaceDescription.descriptor]
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
}
}
)
)
const maxDescriptorDistance = 0.65
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
i++;
}
// I somehow want this to work
function makeLink() {
alert("ML")
console.log("IN MAKE LINK");
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
// onPlay(video) function
async function onPlay(videoEl) {
run2()
setTimeout(() => onPlay(videoEl), 50)
}
I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.
I am trying to figure out how to change the microphone or webcam while you are in a videochat with someone.
I have been now trying for a few days and nothing works.
I was following this example, but it seems it is much harder to achieve the change while someone is already connected.
The issues I have: If I change the mic the sound is lost/the mic doesnt react at all. I also cannot change it back to the default.
A similar thing happens if I change the webcam. The stream hangs, the last frame is seen.
I get no error message, in fact it tells me that the changes were successful.
Changing the webcam/mic WORKS before the call is established
Here is the relevant codeblock. Everywhere I am reading just create new constraints and give the desired deviceId to the audio/video stream.:
function ChangeDevice() {
if (localStream) {
localStream.getTracks().forEach(track => {
track.stop();
});
}
var audioSource = audioInputSelect.value;
var videoSource = videoSelect.value;
console.log(videoSource);
console.log(audioSource);
const newConstraints = {
audio: {deviceId: audioSource ? {exact: audioSource} : undefined},
video: {deviceId: videoSource ? {exact: videoSource} : undefined}
};
navigator.mediaDevices.getUserMedia(newConstraints).then(gotStream).then(gotDevices).catch(handleError);
}
function gotStream(stream) {
console.log('Adding local stream.');
localStream = stream;
localVideo.srcObject = stream;
sendMessage(['got user media', room]);
if (isInitiator) {
maybeStart();
}
return navigator.mediaDevices.enumerateDevices(); // I added this
}
I think these two are the relevant functions, ChangeDevice is called when I select a new device from a dropdown. The id's are correct.
Here is the whole code I use:
pastebin.com/6JrK4jJD
Luckily replaceTrack seems to work now on all browsers, so there is no need to renegotiate.
I had to edit my gotStream function like this:
function gotStream(stream) {
// If already started
// Need this if webcam or mic changes
if (isStarted) {
var videoTrack = stream.getVideoTracks()[0];
var audioTrack = stream.getAudioTracks()[0];
var sender = pc.getSenders().find(function(s) {
return s.track.kind == videoTrack.kind;
});
var sender2 = pc.getSenders().find(function(s) {
return s.track.kind == audioTrack.kind;
});
console.log('found sender:', sender);
sender.replaceTrack(videoTrack);
sender2.replaceTrack(audioTrack);
localStream = stream;
localVideo.srcObject = stream;
} else {
console.log('Adding local stream.');
localStream = stream;
localVideo.srcObject = stream;
sendMessage(['got user media', room]);
if (isInitiator) {
maybeStart();
}
}
return navigator.mediaDevices.enumerateDevices(); // I added this
}