Django record and save mp4 - javascript

I'm new to Django. I'm trying to record a video in the browser than automatically save that to the server as an mp4 file. I managed to do it, but the videos are sent as a blob and saved in the database. But I would need an mp4 file on the server. For recording I'm using MediaRecorder. BTW this only needs to work on desktop, not on mobile.
This is my js code:
const videoElem = document.getElementById('stream-elem')
var startBtn = document.getElementById('start-stream')
var endBtn = document.getElementById('stop-media')
console.log("recorder");
var recorder;
const settings = {
video: true,
audio: true
}
startBtn.addEventListener('click', function (e) {
console.log("StartStream button is clicked");
navigator.mediaDevices.getUserMedia(settings).then((stream) => {
console.log(stream);
videoElem.srcObject = stream
recorder = new MediaRecorder(stream)
console.log(recorder);
recorder.start();
const blobContainer = [];
recorder.ondataavailable = (e) => {
blobContainer.push(e.data)
}
recorder.onerror = (e) => {
return console.log(e.error || new Error(e.name));
}
recorder.onstop = (e) => {
console.log(window.URL.createObjectURL(new Blob(blobContainer)));
var newVideoEl = document.createElement('video')
newVideoEl.height = '400'
newVideoEl.width = '600'
newVideoEl.autoplay = true
newVideoEl.controls = true
newVideoEl.innerHTML = `<source src="${window.URL.createObjectURL(new Blob(blobContainer))}"
type="video/mp4">`
document.body.removeChild(videoElem)
document.body.insertBefore(newVideoEl, startBtn);
var formdata = new FormData();
formdata.append('blobFile', new Blob(blobContainer));
fetch('/upload', {
method: 'POST',
body: formdata
}).then(()=>{
alert('streamed video file uploaded')
})
}
})
})
endBtn.addEventListener('click', function (e) {
videoElem.pause();
recorder.stop();
})
And this is my view function:
#csrf_exempt
def upload(request):
video = request.FILES['blobFile']
video_upload = VideoUpload()
video_upload.video = video
video_upload.name = request.user.username
video_upload.save()
return HttpResponseRedirect('/')
With these I can record the video and save it in the DB as a blob. So how can I save an mp4 file to the server or convert the blob into an mp4 file? What I'm trying to achieve is to do a transcription of the videos. But I can't pass the blob to something like whisper. I need a file for that.
Any ideas will be greatly appreciated. Thanks
I tried saving a video file to the server. I saved a blob.

Related

Javascript MediaRecorder audio recording corrupt

I am struggeling to get record audio in the browser and make it work properly on mobile as well as desktop.
I am using MediaRecorder to start the recording and I want to send it as a file to my Flask server through a form. However, what I receive is a corrupt file, that sometimes plays on my desktop, but not on my mobile phone. I think it is connected to different mimeTypes that are supported and how the blob gets converted.
Here is the JavaScript Code:
function record_audio(){
if(state == "empty"){
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
state = "recording";
document.getElementById('stop_btn').style.display = 'block'
seconds_int = setInterval(
function () {
document.getElementById("record_btn").innerHTML = seconds_rec + " s";
seconds_rec += 1;
}, 1000);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
if(mediaRecorder.state == 'inactive') makeLink();
});
}
}
function makeLink(){
const audioBlob = new Blob(audioChunks, {type: 'audio/mpeg'});
const audioUrl = URL.createObjectURL(audioBlob);
var sound = document.createElement('audio');
sound.id = 'audio-player';
sound.controls = 'controls';
sound.src = audioUrl;
console.log(audioBlob)
sound.type = 'audio/mpeg';
document.getElementById("audio-player-container").innerHTML = sound.outerHTML;
let file = new File([audioBlob], "audio.mp3",{ type:"audio/mpeg",lastModifiedDate: new Date()});
let container = new DataTransfer();
container.items.add(file);
document.getElementById("uploadedFile").files = container.files;
};
Thanks for your help!
The audio that you recorded is most likely not of type 'audio/mpeg'. No browser supports that out of the box.
If you call new MediaRecorder(stream) without the optional second argument the browser will pick the codec it likes best. You can use the mimeType property to find out which codec is used by the browser. It can for example be used to construct the Blob.
const audioBlob = new Blob(
audioChunks,
{
type: mediaRecorder.mimeType
}
);
You would also need to use it in a similar way when creating the File. And you probably also need to adapt your backend logic to handle files which aren't MP3s.

Screen Recording Portion of Screen with JavaScript

I am attempting to implement a screen recorder in JavaScript that records a video feed rather than the entire screen. My question is whether or not this is possible using getDisplayMedia or if there is a library to achieve this. This is my current implementation, which will ask and record the entire screen.
const handleRecord = async () => {
// console.log('record')
let stream = await navigator.mediaDevices.getDisplayMedia({
video: true
})
// Needed for better browser support
const mime = MediaRecorder.isTypeSupported("video/webm; codecs=vp9")
? "video/webm; codecs=vp9"
: "video/webm"
let mediaRecorder = new MediaRecorder(stream, { mimeType: mime })
let chunks = []
mediaRecorder.addEventListener('dataavailable', ({ data }) => chunks.push(data))
mediaRecorder.addEventListener('stop', function () {
let blob = new Blob(chunks, {type: chunks[0].type})
let url = URL.createObjectURL(blob)
let video = document.querySelector("#cameraFeed")
video.src = url
let a = document.createElement('a')
a.href = url
a.download = 'video.webm'
a.click()
})
//we have to start the recorder manually
mediaRecorder.start()
}
As far as I understand your problem
Do you want to record the user cam if yes here you
let stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: "user" },
})
In case you feel any difficulty feel free to ask!

Javascript - Unable to save recorded screen using MediaRecorder API

I'm trying into chrome console this code to record the desktop or a selected tab of the browser.
( async () => {
const displayOptions = {
video: {
cursor: "never"
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100
}
}
const chunks = [];
const stream = await navigator.mediaDevices.getDisplayMedia(displayOptions);
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => chunks.push(e);
recorder.start();
recorder.onstop = e => {
const blob = new Blob(chunks, {type: 'video/mp4'});
const el = document.createElement('a');
el.download = 'testvideo.mp4';
el.href = URL.createObjectURL(blob);
el.click();
}
})();
When I run the script, it will give me the ability to select the source to capture. I've tried to record few seconds and when I stop the capture, it will open the download prompt. The problem is that the output file is empty and quicktime or VLC are unable to open it because it's lenght is of zero seconds. Is there something wrong in the code? How I can save the file as mp4 correctly? If I want to use the code inside a node CLI script to target a browser tab, will it work or I need to use it in combo with puppeteer?

How do I record AND download / upload the webcam stream on server (javascript) WHILE using the webcam input for facial recognition (opencv4nodejs)?

I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated.
In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.
Below is what i tried to do, But it doesn't work for obvious reasons.
$(document).ready(function () {
run1()
})
let chunks = []
// run1() for uploading model and for facecam
async function run1() {
const MODELS = "/models";
await faceapi.loadSsdMobilenetv1Model(MODELS)
await faceapi.loadFaceLandmarkModel(MODELS)
await faceapi.loadFaceRecognitionModel(MODELS)
var _stream
//Accessing the user webcam
const videoEl = document.getElementById('inputVideo')
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(
(stream) => {
_stream = stream
recorder = new MediaRecorder(_stream);
recorder.ondataavailable = (e) => {
chunks.push(e.data);
console.log(chunks, i);
if (i == 20) makeLink(); //Trying to make Link from the blob for some i==20
};
videoEl.srcObject = stream
},
(err) => {
console.error(err)
}
)
}
// run2() main recognition code and training
async function run2() {
// wait for the results of mtcnn ,
const input = document.getElementById('inputVideo')
const mtcnnResults = await faceapi.ssdMobilenetv1(input)
// Detect All the faces in the webcam
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
// Training the algorithm with given data of the Current Student
const labeledFaceDescriptors = await Promise.all(
CurrentStudent.map(
async function (label) {
// Training the Algorithm with the current students
for (let i = 1; i <= 10; i++) {
// console.log(label);
const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
const img = await faceapi.fetchImage(imgUrl)
// detect the face with the highest score in the image and compute it's landmarks and face descriptor
const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`)
}
const faceDescriptors = [fullFaceDescription.descriptor]
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
}
}
)
)
const maxDescriptorDistance = 0.65
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
i++;
}
// I somehow want this to work
function makeLink() {
alert("ML")
console.log("IN MAKE LINK");
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
// onPlay(video) function
async function onPlay(videoEl) {
run2()
setTimeout(() => onPlay(videoEl), 50)
}
I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.

How can I store / download the recording from the Screen Capture web API?

I'm using the Screen Capture API and am trying to save the final capture to a video file (WebM, MP4, etc.). I have these two JavaScript functions:
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
} catch(err) {
console.error("Error: " + err);
}
}
function stopCapture() {
let tracks = videoElem.srcObject.getTracks();
tracks.forEach(track => track.stop());
videoElem.srcObject = null;
}
The video is displaying live fine when the capture is started, but I'm not sure how to actually store its contents. videoElem is a Promise that resolves to a MediaStream. tracks is an array of MediaStreamTrack objects. This is my first time doing any kind of web development, so I'm a bit lost!
async function startRecording() {
stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true
});
recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = e => chunks.push(e.data);
recorder.onstop = e => {
const blob = new Blob(chunks, { type: chunks[0].type });
console.log(blob);
stream.getVideoTracks()[0].stop();
filename="yourCustomFileName"
if(window.navigator.msSaveOrOpenBlob) {
window.navigator.msSaveBlob(blob, filename);
}
else{
var elem = window.document.createElement('a');
elem.href = window.URL.createObjectURL(blob);
elem.download = filename;
document.body.appendChild(elem);
elem.click();
document.body.removeChild(elem);
}
};
recorder.start();
}
startRecording(); //Start of the recording
-----------
recorder.stop() // End your recording by emitting this event
This will save your recording as a webm file
Recording a media element on the MDN docs helped me a ton. Basically, instead of using getUserMedia(), we use getDisplayMedia().
Like with any MediaStream, you can record it using the MediaRecorder API.

Categories