Save canvas data as mp4 - Javascript - javascript

I have an animated canvas, I want to convert that into mp4. I am using MediaRecorder to capture the screen and then converting that Blob. I learned that MediaRecorder does not allow recording in mp4, so I am forced to get the canvas in webm. Here is what I have tried:
<canvas id="canvas"></canvas>
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res(url);
var xhr = new XMLHttpRequest;
xhr.responseType = 'blob';
xhr.onload = function() {
var recoveredBlob = xhr.response;
var reader = new FileReader;
reader.onload = function() {
var blobAsDataUrl = reader.result;
document.getElementById("my-video").setAttribute("src", blobAsDataUrl);
};
reader.readAsDataURL(recoveredBlob);
};
xhr.open('GET', url);
xhr.send();
}
});
Any solution is highly appreciated.

Quick demo of transcoding using ffmpeg.wasm:
const { createFFmpeg } = FFmpeg;
const ffmpeg = createFFmpeg({
log: true
});
const transcode = async (webcamData) => {
const message = document.getElementById('message');
const name = 'record.webm';
await ffmpeg.load();
message.innerHTML = 'Start transcoding';
await ffmpeg.write(name, webcamData);
await ffmpeg.transcode(name, 'output.mp4');
message.innerHTML = 'Complete transcoding';
const data = ffmpeg.read('output.mp4');
const video = document.getElementById('output-video');
video.src = URL.createObjectURL(new Blob([data.buffer], { type: 'video/mp4' }));
dl.href = video.src;
dl.innerHTML = "download mp4"
}
fn().then(async ({url, blob})=>{
transcode(new Uint8Array(await (blob).arrayBuffer()));
})
function fn() {
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
// for demo, removed stop() call to capture more than one frame
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res({url, blob}); // resolve both blob and url in an object
myVideo.src = url;
// removed data url conversion for brevity
}
// for demo, draw random lines and then stop recording
var i = 0,
tid = setInterval(()=>{
if(i++ > 20) { // draw 20 lines
clearInterval(tid);
mediaRecorder.stop();
}
let canvas = document.querySelector("canvas");
let cx = canvas.getContext("2d");
cx.beginPath();
cx.strokeStyle = 'green';
cx.moveTo(Math.random()*100, Math.random()*100);
cx.lineTo(Math.random()*100, Math.random()*100);
cx.stroke();
},200)
});
}
<script src="https://unpkg.com/#ffmpeg/ffmpeg#0.8.1/dist/ffmpeg.min.js"></script>
<canvas id="canvas" style="height:100px;width:100px"></canvas>
<video id="myVideo" controls="controls"></video>
<video id="output-video" controls="controls"></video>
<a id="dl" href="" download="download.mp4"></a>
<div id="message"></div>

Related

How to stop MediaSource from loading all content automatically?

The code below works perfectly, but the video needs to be loaded all the way until it is produced, is there any way to load it partially?
const video = document.querySelector('video');
const assetURL = '/e_dashinit.mp4';
const mimeCodec = 'video/mp4; codecs="avc1.42E01E, mp4a.40.2"';
const mediaSource = new MediaSource();
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener('sourceopen', sourceOpen);
function sourceOpen(_) {
const mediaSource = this;
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
fetchAB(assetURL, function (buf) {
sourceBuffer.addEventListener('updateend', function (_) {
mediaSource.endOfStream();
video.play();
});
sourceBuffer.appendBuffer(buf);
});
};
function fetchAB(url, cb) {
const xhr = new XMLHttpRequest;
xhr.open('get', url);
xhr.responseType = 'arraybuffer';
xhr.onload = function () {
cb(xhr.response);
};
xhr.send();
};
return

Detect silence in audio recording

There are similar questions for Java and iOS, but I'm wondering about detecting silence in javascript for audio recordings via getUserMedia(). So given:
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
});
});
I'm wondering if there is anything that can be checked on the Blob, URL, or Audio objects in the stop event for an absence of audio. In the case of a bad microphone or a virtual device selected - anything along those lines. I was previously checking the blob's size, but silent audio still has a filesize. I can do this on the backend via ffmpeg, but hoping there is a way in pure JS to simplify.
With this solution inspired by Visualizations with Web Audio API, you can set minimal required decibels and detect if anything was recorded.
const MIN_DECIBELS = -45;
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.minDecibels = MIN_DECIBELS;
audioStreamSource.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const domainData = new Uint8Array(bufferLength);
let soundDetected = false;
const detectSound = () => {
if (soundDetected) {
return
}
analyser.getByteFrequencyData(domainData);
for (let i = 0; i < bufferLength; i++) {
const value = domainData[i];
if (domainData[i] > 0) {
soundDetected = true
}
}
window.requestAnimationFrame(detectSound);
};
window.requestAnimationFrame(detectSound);
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
console.log({ soundDetected });
});
});
This code can run a function for every dialog it detects. it runs in a loop until the user stops it:
VOICE_MIN_DECIBELS = -35;
DELAY_BETWEEN_DIALOGS = 400;
DIALOG_MAX_LENGTH = 60*1000;
MEDIA_RECORDER = null;
IS_RECORDING = false;
//startRecording:
function startRecording(){
IS_RECORDING = true;
record();
}
//stopRecording:
function stopRecording(){
IS_RECORDING = false;
if(MEDIA_RECORDER !== null)
MEDIA_RECORDER.stop();
}
//record:
function record(){
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
//start recording:
MEDIA_RECORDER = new MediaRecorder(stream);
MEDIA_RECORDER.start();
//save audio chunks:
const audioChunks = [];
MEDIA_RECORDER.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
//analisys:
const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.minDecibels = VOICE_MIN_DECIBELS;
audioStreamSource.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const domainData = new Uint8Array(bufferLength);
//loop:
const time = new Date();
let startTime,
lastDetectedTime = time.getTime();
let anySoundDetected = false;
const detectSound = () => {
//recording stoped by user:
if(IS_RECORDING)
return;
time = new Date();
currentTime = time.getTime();
//time out:
if(currentTime > startTime + DIALOG_MAX_LENGTH){
MEDIA_RECORDER.stop();
return;
}
//a dialog detected:
if( anySoundDetected === true &&
currentTime > lastDetectedTime + DELAY_BETWEEN_DIALOGS
){
MEDIA_RECORDER.stop();
return;
}
//check for detection:
analyser.getByteFrequencyData(domainData);
for(let i = 0; i < bufferLength; i++)
if(domainData[i] > 0){
anySoundDetected = true;
time = new Date();
lastDetectedTime = time.getTime();
}
//continue the loop:
window.requestAnimationFrame(detectSound);
};
window.requestAnimationFrame(detectSound);
//stop event:
MEDIA_RECORDER.addEventListener('stop', () => {
//stop all the tracks:
stream.getTracks().forEach(track => track.stop());
if(!anySoundDetected) return;
//send to server:
const audioBlob = new Blob(audioChunks, {'type': 'audio/mp3'});
doWhateverWithAudio(audioBlob);
//start recording again:
record();
});
});
}
//doWhateverWithAudio:
function doWhateverWithAudio(audioBlob){
//.... send to server, downlod, etc.
}

AudioContext stops working after being used exactly 50 times

Hi so i'm trying to make a Drum-Kit, for this i'm using AudioContext API. My issue is when I use it exactly 50 times than it stops working. Only thing I found to make it work is to close the previous AudioContext that was used, thing is that it makes a "clicky" sound due to the sound stoping abruptly.
Any ideas on what to do?
Here is what i'm working with to not make it stop at 50 uses:
let i = 0;
let audioContext;
let volumeControl;
// Credit to MDN for AudioContext and StereoPannerNode tutorial.
function playSound(src, volume, pitch, stereo) {
if (audioContext != null) {
//volumeControl.gain.value = 0;
audioContext.close();
}
console.log(i++);
audioContext = new AudioContext();
const stereoControl = new StereoPannerNode(audioContext);
volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
const request = new XMLHttpRequest();
request.open('GET', src, true);
request.responseType = 'arraybuffer';
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.playbackRate.value = pitch;
source.connect(volumeControl).connect(stereoControl).connect(audioContext.destination);
});
};
request.send();
source.play = source.start;
source.play();
}
Don't create an audio context for each sound, but create a single one for your page and add nodes to it. Something like this...
const audioContext = new AudioContext();
function playSound(src, volume, pitch, stereo) {
const stereoControl = audioContext.createStereoPanner();
const volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
const request = new XMLHttpRequest();
request.open("GET", src, true);
request.responseType = "arraybuffer";
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.playbackRate.value = pitch;
source
.connect(volumeControl)
.connect(stereoControl)
.connect(audioContext.destination);
source.start();
});
};
request.send();
}
In addition, for a drumkit thing, you'll want to either preload all samples, or at the very least cache the decoded audio buffers and not do a request every time for them:
const cache = {};
const audioContext = new AudioContext();
function loadSound(src) {
if (cache[src]) {
// Already cached
return Promise.resolve(cache[src]);
}
return new Promise(resolve => {
const request = new XMLHttpRequest();
request.open("GET", src, true);
request.responseType = "arraybuffer";
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
cache[src] = buffer;
resolve(buffer);
});
};
request.send();
});
}
function playSound(src, volume, pitch, stereo) {
loadSound(src).then(buffer => {
const stereoControl = audioContext.createStereoPanner();
const volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = pitch;
source
.connect(volumeControl)
.connect(stereoControl)
.connect(audioContext.destination);
source.start();
});
}

Browser's audio record or microphone's permission on button click rather than on page load

I have user the MediaStream Recording API for recording audio using microphone, the recording works well but in case of don't allow the browser to record a voice then click on the record button it's not showing any error,
What I want is in case of user don't allow voice and still clicks on record button browser again asp for audio recording permission.
I have tried several options with the following code but unfortunately, it does not work, Can anyone help me out with this issue?
The same question or similar question asked on reprompt for permissions with getUserMedia() after initial denial but still, there is no resolution for that issue, hope for the best appropriate answer which resolve the reprompt permission popup once initialized the page.
<body>
<div class="wrapper">
<section class="main-controls">
<canvas class="visualizer" height="60px" style="display:none"></canvas>
<div id="buttons" style="margin-bottom:20px;">
<button class="record">Record</button>
<button class="stop">Stop</button>
</div>
</section>
<section class="sound-clips"></section>
</div>
<script>
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const mainSection = document.querySelector('.main-controls');
stop.disabled = true;
let audioCtx;
if (navigator.mediaDevices.getUserMedia) {
const constraints = {
audio: true
};
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
//ask for browser's audio record or microphone permission here
if (navigator.mediaDevices.getUserMedia) {
mediaRecorder.start();
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, {
'type': 'audio/ogg; codecs=opus'
});
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
function visualize(stream) {
if (!audioCtx) {
audioCtx = new AudioContext();
}
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
}
</script>
</body>

FileReader.readAsArray() from source other than <input type="file">

I have the following HTML:
<script type="text/javascript">
window.onload = function() {
var volumeBars = {
mono: document.getElementById("monoFill")
};
document.getElementById("open-file").onchange = function(evt) {
var file = evt.target.files[0];
var reader = new FileReader();
reader.onload = function(e) {
playSound(e.target.result);
console.log(e.target.result);
}
reader.readAsArrayBuffer(file);
}
var context = new AudioContext();
function playSound(arraybuffer) {
context.close();
context = new AudioContext();
var source = context.createBufferSource();
context.decodeAudioData(arraybuffer, function(buffer) {
source.buffer = buffer;
});
//=
console.log(arraybuffer)
var analyser = context.createAnalyser();
analyser.smoothingTimeConstant = .9;
analyser.fftSize = 1024;
jsNode = context.createScriptProcessor(2048, 1, 1);
jsNode.onaudioprocess = function() {
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
volumeBars.mono.style.height = Math.average(array) * 2 + "px";
volumeBars.mono.innerHTML = Math.floor(Math.average(array));
console.log(volumeBars.mono.innerHTML);
}
source.connect(analyser);
source.connect(context.destination);
jsNode.connect(context.destination);
analyser.connect(jsNode);
source.start();
}
Math.average = function(arguments) {
var numbers;
if (arguments[0] instanceof Array) {
numbers = arguments[0];
} else if (typeof arguments[0] == "number") {
numbers = arguments;
}
var sum = 0;
var average = 0;
for (var i = 0; i < numbers.length; i++) {
sum += numbers[i];
}
average = sum / numbers.length;
return average;
}
}
</script>
<div id="container">
<div class="bar" id="mono">
<div class="fill" id="monoFill"></div>
</div>
</div>
<input type="file" id="open-file" accept="audio/*">
The issue that I am facing, is I want to replace the block
var file = evt.target.files[0];
with a local file that doesn't need to be loaded every time. I have been unable to find a method that encodes the file as an ArrayBuffer. I'm sure this is a very amateur question with a obvious answer, but any assistance is welcome.
Since what you want is to access a file on your server, you simply need to make an Ajax request, with the response set to an ArrayBuffer, no need for a FileReader here.
Using the fetch API it would look like
const a_ctx = new(window.AudioContext || window.webkitAudioContext)();
fetch('https://dl.dropboxusercontent.com/s/1cdwpm3gca9mlo0/kick.mp3')
.then(resp => resp.arrayBuffer()) // request as ArrayBuffer
.then(buf => a_ctx.decodeAudioData(buf))
.then(a_buf => {
btn.onclick = e => {
let node = a_ctx.createBufferSource();
node.buffer = a_buf;
node.connect(a_ctx.destination);
node.start(0);
};
btn.disabled = false;
});
<button id="btn" disabled>play</button>
<!-- Promising decodeAudioData for Safari https://github.com/mohayonao/promise-decode-audio-data/ [MIT] -->
<script src="https://cdn.rawgit.com/mohayonao/promise-decode-audio-data/eb4b1322/build/promise-decode-audio-data.min.js"></script>
And using XMLHttpRequest API:
var a_ctx = new(window.AudioContext || window.webkitAudioContext)();
var xhr = new XMLHttpRequest();
xhr.open('get', 'https://dl.dropboxusercontent.com/s/1cdwpm3gca9mlo0/kick.mp3');
xhr.responseType = 'arraybuffer'; // request as ArrayBuffer
xhr.onload = function() {
var buf = xhr.response;
a_ctx.decodeAudioData(buf, function(a_buf) {
btn.onclick = function(e) {
let node = a_ctx.createBufferSource();
node.buffer = a_buf;
node.connect(a_ctx.destination);
node.start(0);
};
btn.disabled = false;
});
};
xhr.send();
<button id="btn" disabled>play</button>

Categories