I want the browser to ask for permission to use the mic.
It askes for permission on PC but doesn't on mobile.
Can someone tell me what I did wrong.
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
var mediaRecorder = new MediaRecorder(stream)
mediaRecorder.start()
var mediaChunks = []
mediaRecorder.addEventListener("dataavailable",function (e) {
mediaChunks.push(e.data)
})
mediaRecorder.addEventListener("stop",function () {
var mediaBlob = new Blob(mediaChunks)
var url = URL.createObjectURL(mediaBlob)
var audio = document.createElement("AUDIO")
audio.src = url
audio.controls = true
//audio.classList.add("ay")
audio.setAttribute('id','ay')
big.appendChild(audio)
})
setTimeout(function() {
mediaRecorder.stop()
alert("stopped")
},3000)
})
Related
I am building a video conferencing web application using WebRTC and I have implemented features for toggling the camera, microphone, and screen sharing. The camera and screen sharing features are working as expected, but I am having an issue with the microphone button.
The issue is that after using screen sharing and then stopping it, the microphone on/off button is not working properly. I am getting an error in the console saying
"Cannot read properties of undefined (reading 'enabled')".
Before using screen sharing, the microphone button works fine.
Here's my current code for handling the buttons:
let screenStream = null;
let localStream = null;
let audioTrack = null;
let pc = null;
// Toggle screen sharing on/off
document.getElementById("share-screen-btn").addEventListener("click", async () => {
try {
const localVideo = document.getElementById("localVideo");
const displayMediaOptions = {
video: true,
audio: true,
};
if (!screenStream) {
screenStream = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
const videoTracks = screenStream.getVideoTracks();
await pc.getSenders().find(sender => sender.track.kind === 'video').replaceTrack(videoTracks[0], videoTracks[0].clone());
localVideo.srcObject = screenStream;
document.getElementById("share-screen-btn").classList.remove("btn-danger");
document.getElementById("share-screen-btn").classList.add("btn-primary");
// Disable audio track from localStream
if (localStream) {
audioTrack = localStream.getAudioTracks()[0];
audioTrack.enabled = false;
}
} else {
const localVideoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: true });
const sender = pc.getSenders().find(sender => sender.track.kind === 'video');
const localVideoTrack = localVideoStream.getVideoTracks()[0];
const localAudioTrack = localVideoStream.getAudioTracks()[0];
const localStream = new MediaStream([localVideoTrack, localAudioTrack]);
await sender.replaceTrack(localVideoTrack);
localVideo.srcObject = localStream;
document.getElementById("share-screen-btn").classList.remove("btn-primary");
document.getElementById("share-screen-btn").classList.add("btn-danger");
screenStream.getTracks().forEach(track => track.stop());
screenStream = null;
// Set audioTrack from localAudioTrack
audioTrack = localAudioTrack;
}
} catch (e) {
console.error("Error sharing screen: ", e);
}
})
// Toggle microphone on/off
document.getElementById("mute-audio-btn").addEventListener("click", () => {
let localStream = document.getElementById("localVideo").srcObject;
if (localStream) {
let audioTrack = localStream.getAudioTracks()[0];
let enabled = audioTrack.enabled;
if (enabled) {
audioTrack.enabled = false;
document.getElementById("mute-audio-btn").innerHTML = '<i class="fa-solid fa-microphone-slash"></i>'
} else {
audioTrack.enabled = true;
document.getElementById("mute-audio-btn").innerHTML = '<i class="fa-solid fa-microphone"></i>'
}
}
})
// Toggle camera on/off
document.getElementById("mute-video-btn").addEventListener("click", () => {
let localStream = document.getElementById("localVideo").srcObject;
if (localStream) {
let videoTrack = localStream.getVideoTracks()[0];
let enabled = videoTrack.enabled;
if (enabled) {
videoTrack.enabled = false;
document.getElementById("mute-video-btn").innerHTML = '<i class="fa fa-video-slash"></i>';
} else {
videoTrack.enabled = true;
document.getElementById("mute-video-btn").innerHTML = '<i class="fa fa-video"></i>';
}
}
})
If I see it correctly then audioTrack or videoTrack is undefined at the time this happens.
Try and console.log() the arrays returned by localStream.getAudioTracks() or screenStream.getVideoTracks()
You may from there work your way up the chain.
And it seems to use audio while sharing the screen you are suppused to use addTrack
have a look here:
Is it possible broadcast audio with screensharing with WebRTC
I'm new to Django. I'm trying to record a video in the browser than automatically save that to the server as an mp4 file. I managed to do it, but the videos are sent as a blob and saved in the database. But I would need an mp4 file on the server. For recording I'm using MediaRecorder. BTW this only needs to work on desktop, not on mobile.
This is my js code:
const videoElem = document.getElementById('stream-elem')
var startBtn = document.getElementById('start-stream')
var endBtn = document.getElementById('stop-media')
console.log("recorder");
var recorder;
const settings = {
video: true,
audio: true
}
startBtn.addEventListener('click', function (e) {
console.log("StartStream button is clicked");
navigator.mediaDevices.getUserMedia(settings).then((stream) => {
console.log(stream);
videoElem.srcObject = stream
recorder = new MediaRecorder(stream)
console.log(recorder);
recorder.start();
const blobContainer = [];
recorder.ondataavailable = (e) => {
blobContainer.push(e.data)
}
recorder.onerror = (e) => {
return console.log(e.error || new Error(e.name));
}
recorder.onstop = (e) => {
console.log(window.URL.createObjectURL(new Blob(blobContainer)));
var newVideoEl = document.createElement('video')
newVideoEl.height = '400'
newVideoEl.width = '600'
newVideoEl.autoplay = true
newVideoEl.controls = true
newVideoEl.innerHTML = `<source src="${window.URL.createObjectURL(new Blob(blobContainer))}"
type="video/mp4">`
document.body.removeChild(videoElem)
document.body.insertBefore(newVideoEl, startBtn);
var formdata = new FormData();
formdata.append('blobFile', new Blob(blobContainer));
fetch('/upload', {
method: 'POST',
body: formdata
}).then(()=>{
alert('streamed video file uploaded')
})
}
})
})
endBtn.addEventListener('click', function (e) {
videoElem.pause();
recorder.stop();
})
And this is my view function:
#csrf_exempt
def upload(request):
video = request.FILES['blobFile']
video_upload = VideoUpload()
video_upload.video = video
video_upload.name = request.user.username
video_upload.save()
return HttpResponseRedirect('/')
With these I can record the video and save it in the DB as a blob. So how can I save an mp4 file to the server or convert the blob into an mp4 file? What I'm trying to achieve is to do a transcription of the videos. But I can't pass the blob to something like whisper. I need a file for that.
Any ideas will be greatly appreciated. Thanks
I tried saving a video file to the server. I saved a blob.
I am pretty sure I did everything correct but when I try to play or download the file nothing plays. I am using web audio api to record audio from the microphone to a WAV format. I am using this library to create the .wav file. It seems like nothing is being encoded.
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
var data
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var scriptNode = context.createScriptProcessor(8192, 1, 1)
source.connect(scriptNode)
scriptNode.connect(context.destination)
encoder = new WavAudioEncoder(16000,1)
scriptNode.onaudioprocess = function(e){
data = e.inputBuffer.getChannelData('0')
console.log(data)
encoder.encode(data)
}
$('#stop').click(()=>{
source.disconnect()
scriptNode.disconnect()
blob = encoder.finish()
console.log(blob)
url = window.URL.createObjectURL(blob)
// audio source
$('#player').attr('src',url)
// audio control
$("#pw")[0].load()
})
})
I figured it out! To help anyone who needs to do the same thing. It uses Web Audio API and this javascript library
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var rec = new Recorder(source)
rec.record()
$('#stop').click(()=>{
rec.stop()
blob = rec.exportWAV(somefunction) // exportWAV() returns your file
})
use recordRTC for recording video and audio, I used in my project, it's working well, here is the code to record audio using recordrtc.org
startRecording(event) { // call this to start recording the Audio( or video or Both)
this.recording = true;
let mediaConstraints = {
audio: true
};
// Older browsers might not implement mediaDevices at all, so we set an empty object first
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
// Some browsers partially implement mediaDevices. We can't just assign an object
// with getUserMedia as it would overwrite existing properties.
// Here, we will just add the getUserMedia property if it's missing.
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
// First get ahold of the legacy getUserMedia, if present
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// Some browsers just don't implement it - return a rejected promise with an error
// to keep a consistent interface
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
}
// Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(successCallback.bind(this), errorCallback.bind(this));
}
successCallback(stream: MediaStream) {
var options = {
type: 'audio'
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
}
errorCallback(stream: MediaStream) {
console.log(stream);
}
stopRecording() { // call this to stop recording
this.recording = false;
this.converting = true;
let recordRTC = this.recordRTC;
if(!recordRTC) return;
recordRTC.stopRecording(this.processAudio.bind(this));
this.stream.getAudioTracks().forEach(track => track.stop());
}
processAudio(audioVideoWebMURL) {
let recordRTC = this.recordRTC;
var recordedBlob = recordRTC.getBlob(); // you can save the recorded media data in various formats, refer the link below.
console.log(recordedBlob)
this.recordRTC.save('audiorecording.wav');
let base64Data = '';
this.recordRTC.getDataURL((dataURL) => {
base64Data = dataURL.split('base64,')[1];
console.log(RecordRTC.getFromDisk('audio', function(dataURL,type) {
type == 'audio'
}));
console.log(dataURL);
})
}
Note that you cannot record the audio/video from the live site in Google Chrome unless your site is https enabled
I’ve created a minimal WebRTC test site that is able to request the user’s webcam/audio stream, to record it, and to playback the recording after it has been stopped.
Demo: https://output.jsbin.com/tabosipefo/
Edit1: https://jsbin.com/tabosipefo/edit?html,console,output
Since this happens all within one Promise navigator.mediaDevices.getUserMedia(), I was wondering, if it is actually possible to detect and on-going stream and to (a) record it, and (b) to stop and save it.
1 WebRTC does not work in jsbin when in edit view for some reason...
If you use no framework and want to use vanilla JS, your best step is to tack the stream object to the global window.
Preview stream
const showWebcamStream = () => {
navigator.mediaDevices
.getUserMedia({ audio: true, video: true })
.then(stream => {
window.localStream = stream; // ⭠ tack it to the window object
// grab the <video> object
const video = document.querySelector("#video-preview");
video.srcObject = stream;
// Display stream
video.onloadedmetadata = () => video.play();
})
.catch(err => console.log(err.name, err.message));
};
Now the video will be displayed within the video element (id: #videp-preview).
Stop Stream(s)
const hideWebcamStream = () => localStream.getTracks().forEach(track => track.stop());
You should put the mediaRecorder in the window object in order to stop it later.
Record Stream
const startWebcamRecorder = () => {
// check if localStream is in window and if it is active
if ("localStream" in window && localStream.active) {
// save the mediaRecorder also to Window in order independently stop it
window.mediaRecorder = new MediaRecorder(localStream);
window.dataChunks = [];
mediaRecorder.start();
console.log(mediaRecorder.state);
mediaRecorder.ondataavailable = e => dataChunks.push(e.data);
}
};
Stop Recording and Preview the recording
You need another video element to playback your recording #video-playback
const stopWebcamRecorder = () => {
if ("mediaRecorder" in window && mediaRecorder.state === "recording") {
mediaRecorder.stop();
console.log(mediaRecorder.state);
mediaRecorder.onstop = () => {
let blob = new Blob(dataChunks, { type: "video/mp4;" });
dataChunks = [];
let videoURL = window.URL.createObjectURL(blob);
const videoPlayback = document.getElementById("video-playback");
videoPlayback.src = videoURL;
};
}
};
I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated.
In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.
Below is what i tried to do, But it doesn't work for obvious reasons.
$(document).ready(function () {
run1()
})
let chunks = []
// run1() for uploading model and for facecam
async function run1() {
const MODELS = "/models";
await faceapi.loadSsdMobilenetv1Model(MODELS)
await faceapi.loadFaceLandmarkModel(MODELS)
await faceapi.loadFaceRecognitionModel(MODELS)
var _stream
//Accessing the user webcam
const videoEl = document.getElementById('inputVideo')
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(
(stream) => {
_stream = stream
recorder = new MediaRecorder(_stream);
recorder.ondataavailable = (e) => {
chunks.push(e.data);
console.log(chunks, i);
if (i == 20) makeLink(); //Trying to make Link from the blob for some i==20
};
videoEl.srcObject = stream
},
(err) => {
console.error(err)
}
)
}
// run2() main recognition code and training
async function run2() {
// wait for the results of mtcnn ,
const input = document.getElementById('inputVideo')
const mtcnnResults = await faceapi.ssdMobilenetv1(input)
// Detect All the faces in the webcam
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
// Training the algorithm with given data of the Current Student
const labeledFaceDescriptors = await Promise.all(
CurrentStudent.map(
async function (label) {
// Training the Algorithm with the current students
for (let i = 1; i <= 10; i++) {
// console.log(label);
const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
const img = await faceapi.fetchImage(imgUrl)
// detect the face with the highest score in the image and compute it's landmarks and face descriptor
const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`)
}
const faceDescriptors = [fullFaceDescription.descriptor]
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
}
}
)
)
const maxDescriptorDistance = 0.65
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
i++;
}
// I somehow want this to work
function makeLink() {
alert("ML")
console.log("IN MAKE LINK");
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
// onPlay(video) function
async function onPlay(videoEl) {
run2()
setTimeout(() => onPlay(videoEl), 50)
}
I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.