Screen Recording Portion of Screen with JavaScript - javascript

I am attempting to implement a screen recorder in JavaScript that records a video feed rather than the entire screen. My question is whether or not this is possible using getDisplayMedia or if there is a library to achieve this. This is my current implementation, which will ask and record the entire screen.
const handleRecord = async () => {
// console.log('record')
let stream = await navigator.mediaDevices.getDisplayMedia({
video: true
})
// Needed for better browser support
const mime = MediaRecorder.isTypeSupported("video/webm; codecs=vp9")
? "video/webm; codecs=vp9"
: "video/webm"
let mediaRecorder = new MediaRecorder(stream, { mimeType: mime })
let chunks = []
mediaRecorder.addEventListener('dataavailable', ({ data }) => chunks.push(data))
mediaRecorder.addEventListener('stop', function () {
let blob = new Blob(chunks, {type: chunks[0].type})
let url = URL.createObjectURL(blob)
let video = document.querySelector("#cameraFeed")
video.src = url
let a = document.createElement('a')
a.href = url
a.download = 'video.webm'
a.click()
})
//we have to start the recorder manually
mediaRecorder.start()
}

As far as I understand your problem
Do you want to record the user cam if yes here you
let stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: "user" },
})
In case you feel any difficulty feel free to ask!

Related

Javascript MediaRecorder audio recording corrupt

I am struggeling to get record audio in the browser and make it work properly on mobile as well as desktop.
I am using MediaRecorder to start the recording and I want to send it as a file to my Flask server through a form. However, what I receive is a corrupt file, that sometimes plays on my desktop, but not on my mobile phone. I think it is connected to different mimeTypes that are supported and how the blob gets converted.
Here is the JavaScript Code:
function record_audio(){
if(state == "empty"){
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
state = "recording";
document.getElementById('stop_btn').style.display = 'block'
seconds_int = setInterval(
function () {
document.getElementById("record_btn").innerHTML = seconds_rec + " s";
seconds_rec += 1;
}, 1000);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
if(mediaRecorder.state == 'inactive') makeLink();
});
}
}
function makeLink(){
const audioBlob = new Blob(audioChunks, {type: 'audio/mpeg'});
const audioUrl = URL.createObjectURL(audioBlob);
var sound = document.createElement('audio');
sound.id = 'audio-player';
sound.controls = 'controls';
sound.src = audioUrl;
console.log(audioBlob)
sound.type = 'audio/mpeg';
document.getElementById("audio-player-container").innerHTML = sound.outerHTML;
let file = new File([audioBlob], "audio.mp3",{ type:"audio/mpeg",lastModifiedDate: new Date()});
let container = new DataTransfer();
container.items.add(file);
document.getElementById("uploadedFile").files = container.files;
};
Thanks for your help!
The audio that you recorded is most likely not of type 'audio/mpeg'. No browser supports that out of the box.
If you call new MediaRecorder(stream) without the optional second argument the browser will pick the codec it likes best. You can use the mimeType property to find out which codec is used by the browser. It can for example be used to construct the Blob.
const audioBlob = new Blob(
audioChunks,
{
type: mediaRecorder.mimeType
}
);
You would also need to use it in a similar way when creating the File. And you probably also need to adapt your backend logic to handle files which aren't MP3s.

Javascript - Unable to save recorded screen using MediaRecorder API

I'm trying into chrome console this code to record the desktop or a selected tab of the browser.
( async () => {
const displayOptions = {
video: {
cursor: "never"
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100
}
}
const chunks = [];
const stream = await navigator.mediaDevices.getDisplayMedia(displayOptions);
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => chunks.push(e);
recorder.start();
recorder.onstop = e => {
const blob = new Blob(chunks, {type: 'video/mp4'});
const el = document.createElement('a');
el.download = 'testvideo.mp4';
el.href = URL.createObjectURL(blob);
el.click();
}
})();
When I run the script, it will give me the ability to select the source to capture. I've tried to record few seconds and when I stop the capture, it will open the download prompt. The problem is that the output file is empty and quicktime or VLC are unable to open it because it's lenght is of zero seconds. Is there something wrong in the code? How I can save the file as mp4 correctly? If I want to use the code inside a node CLI script to target a browser tab, will it work or I need to use it in combo with puppeteer?

Encode Audio Back From getChannelData()

I call getChannelData and perform some actions and remove values from the Float32Array.
How can I encode this data back into a form that can be saved?
const blob = new Blob(this.chunks, { type: audioType });
// generate audio url from blob
const audioContext = new (window.AudioContext ||
window.webkitAudioContext)();
// reading the file with file reader using a method that uses read file in a promise
ReadFile(blob).then((arrayBuffer) => {
audioContext.decodeAudioData(arrayBuffer).then((audioBuffer) => {
const audioBufferSourceNode = audioContext.createBufferSource();
const numChannels = audioBuffer.numberOfChannels;
const leftChannelArray = audioBuffer.getChannelData(0);
// audioBufferSourceNode.buffer = leftChannelArray;
let rightChannelArray;
if (numChannels>1) {
rightChannelArray = audioBuffer.getChannelData(1);
}
const monoChannelTrimmed = trimSilence(leftChannelArray, rightChannelArray) //we look on both sides for silence, we delete the array values and merge the channels
//Now i want to turn monoChannelTrimmed into a usable audio file
Turning this channel back into something that is usable is what I have been struggling with. I have tried some suggestions from other questions in this field such as Converting Float32Array to Uint8Array while Preserving IEEE 754 Representation But nothing has worked if anyone has suggestions I would be very eager to try them.
You can probably use the MediaStream Recording API.
Here is a small snippet of how to use it, mostly taken from the example, but modified to use a WebAudio OscillatorNode as the source. You can replace that with an AudioBufferSourceNode that is playing out your monoTrimmedChannel
let c;
let s;
let d;
let mediaRecorder;
let recordedChunks = [];
function handleDataAvailable(event) {
console.log("data-available");
if (event.data.size > 0) {
recordedChunks.push(event.data);
download();
} else {
// ...
}
}
function download() {
let blob = new Blob(recordedChunks, {
type: "video/webm"
});
let url = URL.createObjectURL(blob);
let a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "test.mp3";
a.click();
window.URL.revokeObjectURL(url);
}
setTimeout(event => {
console.log("stopping");
mediaRecorder.stop();
}, 9000);
function start() {
console.log("start");
c = new AudioContext();
s = new OscillatorNode(c);
d = new MediaStreamAudioDestinationNode(c);
s.connect(d);
mediaRecorder = new MediaRecorder(d.stream, {
mimeType: "audio/webm"
});
mediaRecorder.ondataavailable = handleDataAvailable;
s.start();
mediaRecorder.start();
}
I tested this locally and it creates a test.webm file that plays a nice oscillator tone as expected. You'll probably want to tweak some things.

How do I record AND download / upload the webcam stream on server (javascript) WHILE using the webcam input for facial recognition (opencv4nodejs)?

I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated.
In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.
Below is what i tried to do, But it doesn't work for obvious reasons.
$(document).ready(function () {
run1()
})
let chunks = []
// run1() for uploading model and for facecam
async function run1() {
const MODELS = "/models";
await faceapi.loadSsdMobilenetv1Model(MODELS)
await faceapi.loadFaceLandmarkModel(MODELS)
await faceapi.loadFaceRecognitionModel(MODELS)
var _stream
//Accessing the user webcam
const videoEl = document.getElementById('inputVideo')
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(
(stream) => {
_stream = stream
recorder = new MediaRecorder(_stream);
recorder.ondataavailable = (e) => {
chunks.push(e.data);
console.log(chunks, i);
if (i == 20) makeLink(); //Trying to make Link from the blob for some i==20
};
videoEl.srcObject = stream
},
(err) => {
console.error(err)
}
)
}
// run2() main recognition code and training
async function run2() {
// wait for the results of mtcnn ,
const input = document.getElementById('inputVideo')
const mtcnnResults = await faceapi.ssdMobilenetv1(input)
// Detect All the faces in the webcam
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
// Training the algorithm with given data of the Current Student
const labeledFaceDescriptors = await Promise.all(
CurrentStudent.map(
async function (label) {
// Training the Algorithm with the current students
for (let i = 1; i <= 10; i++) {
// console.log(label);
const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
const img = await faceapi.fetchImage(imgUrl)
// detect the face with the highest score in the image and compute it's landmarks and face descriptor
const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`)
}
const faceDescriptors = [fullFaceDescription.descriptor]
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
}
}
)
)
const maxDescriptorDistance = 0.65
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
i++;
}
// I somehow want this to work
function makeLink() {
alert("ML")
console.log("IN MAKE LINK");
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
// onPlay(video) function
async function onPlay(videoEl) {
run2()
setTimeout(() => onPlay(videoEl), 50)
}
I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.

How can I store / download the recording from the Screen Capture web API?

I'm using the Screen Capture API and am trying to save the final capture to a video file (WebM, MP4, etc.). I have these two JavaScript functions:
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
} catch(err) {
console.error("Error: " + err);
}
}
function stopCapture() {
let tracks = videoElem.srcObject.getTracks();
tracks.forEach(track => track.stop());
videoElem.srcObject = null;
}
The video is displaying live fine when the capture is started, but I'm not sure how to actually store its contents. videoElem is a Promise that resolves to a MediaStream. tracks is an array of MediaStreamTrack objects. This is my first time doing any kind of web development, so I'm a bit lost!
async function startRecording() {
stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true
});
recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = e => chunks.push(e.data);
recorder.onstop = e => {
const blob = new Blob(chunks, { type: chunks[0].type });
console.log(blob);
stream.getVideoTracks()[0].stop();
filename="yourCustomFileName"
if(window.navigator.msSaveOrOpenBlob) {
window.navigator.msSaveBlob(blob, filename);
}
else{
var elem = window.document.createElement('a');
elem.href = window.URL.createObjectURL(blob);
elem.download = filename;
document.body.appendChild(elem);
elem.click();
document.body.removeChild(elem);
}
};
recorder.start();
}
startRecording(); //Start of the recording
-----------
recorder.stop() // End your recording by emitting this event
This will save your recording as a webm file
Recording a media element on the MDN docs helped me a ton. Basically, instead of using getUserMedia(), we use getDisplayMedia().
Like with any MediaStream, you can record it using the MediaRecorder API.

Categories