Mediarecoder derived mp4 is missing duration metadata [duplicate] - javascript

I am in the process of replacing RecordRTC with the built in MediaRecorder for recording audio in Chrome. The recorded audio is then played in the program with audio api. I am having trouble getting the audio.duration property to work. It says
If the video (audio) is streamed and has no predefined length, "Inf" (Infinity) is returned.
With RecordRTC, I had to use ffmpeg_asm.js to convert the audio from wav to ogg. My guess is somewhere in the process RecordRTC sets the predefined audio length. Is there any way to set the predefined length using MediaRecorder?

This is a chrome bug.
FF does expose the duration of the recorded media, and if you do set the currentTimeof the recorded media to more than its actual duration, then the property is available in chrome...
var recorder,
chunks = [],
ctx = new AudioContext(),
aud = document.getElementById('aud');
function exportAudio() {
var blob = new Blob(chunks);
aud.src = URL.createObjectURL(new Blob(chunks));
aud.onloadedmetadata = function() {
// it should already be available here
log.textContent = ' duration: ' + aud.duration;
// handle chrome's bug
if (aud.duration === Infinity) {
// set it to bigger than the actual duration
aud.currentTime = 1e101;
aud.ontimeupdate = function() {
this.ontimeupdate = () => {
return;
}
log.textContent += ' after workaround: ' + aud.duration;
aud.currentTime = 0;
}
}
}
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', 'https://upload.wikimedia.org/wikipedia/commons/4/4b/011229beowulf_grendel.ogg', true);
request.responseType = 'arraybuffer';
request.onload = decodeAudio;
request.send();
}
function decodeAudio(evt) {
var audioData = this.response;
ctx.decodeAudioData(audioData, startRecording);
}
function startRecording(buffer) {
var source = ctx.createBufferSource();
source.buffer = buffer;
var dest = ctx.createMediaStreamDestination();
source.connect(dest);
recorder = new MediaRecorder(dest.stream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportAudio;
source.start(0);
recorder.start();
log.innerHTML = 'recording...'
// record only 5 seconds
setTimeout(function() {
recorder.stop();
}, 5000);
}
function saveChunks(evt) {
if (evt.data.size > 0) {
chunks.push(evt.data);
}
}
// we need user-activation
document.getElementById('button').onclick = function(evt){
getData();
this.remove();
}
<button id="button">start</button>
<audio id="aud" controls></audio><span id="log"></span>
So the advice here would be to star the bug report so that chromium's team takes some time to fix it, even if this workaround can do the trick...

Thanks to #Kaiido for identifying bug and offering the working fix.
I prepared an npm package called get-blob-duration that you can install to get a nice Promise-wrapped function to do the dirty work.
Usage is as follows:
// Returns Promise<Number>
getBlobDuration(blob).then(function(duration) {
console.log(duration + ' seconds');
});
Or ECMAScript 6:
// yada yada async
const duration = await getBlobDuration(blob)
console.log(duration + ' seconds')

A bug in Chrome, detected in 2016, but still open today (March 2019), is the root cause behind this behavior. Under certain scenarios audioElement.duration will return Infinity.
Chrome Bug information here and here
The following code provides a workaround to avoid the bug.
Usage : Create your audioElement, and call this function a single time, providing a reference of your audioElement. When the returned promise resolves, the audioElement.duration property should contain the right value. ( It also fixes the same problem with videoElements )
/**
* calculateMediaDuration()
* Force media element duration calculation.
* Returns a promise, that resolves when duration is calculated
**/
function calculateMediaDuration(media){
return new Promise( (resolve,reject)=>{
media.onloadedmetadata = function(){
// set the mediaElement.currentTime to a high value beyond its real duration
media.currentTime = Number.MAX_SAFE_INTEGER;
// listen to time position change
media.ontimeupdate = function(){
media.ontimeupdate = function(){};
// setting player currentTime back to 0 can be buggy too, set it first to .1 sec
media.currentTime = 0.1;
media.currentTime = 0;
// media.duration should now have its correct value, return it...
resolve(media.duration);
}
}
});
}
// USAGE EXAMPLE :
calculateMediaDuration( yourAudioElement ).then( ()=>{
console.log( yourAudioElement.duration )
});

Thanks #colxi for the actual solution, I've added some validation steps (As the solution was working fine but had problems with long audio files).
It took me like 4 hours to get it to work with long audio files turns out validation was the fix
function fixInfinity(media) {
return new Promise((resolve, reject) => {
//Wait for media to load metadata
media.onloadedmetadata = () => {
//Changes the current time to update ontimeupdate
media.currentTime = Number.MAX_SAFE_INTEGER;
//Check if its infinite NaN or undefined
if (ifNull(media)) {
media.ontimeupdate = () => {
//If it is not null resolve the promise and send the duration
if (!ifNull(media)) {
//If it is not null resolve the promise and send the duration
resolve(media.duration);
}
//Check if its infinite NaN or undefined //The second ontime update is a fallback if the first one fails
media.ontimeupdate = () => {
if (!ifNull(media)) {
resolve(media.duration);
}
};
};
} else {
//If media duration was never infinity return it
resolve(media.duration);
}
};
});
}
//Check if null
function ifNull(media) {
if (media.duration === Infinity || media.duration === NaN || media.duration === undefined) {
return true;
} else {
return false;
}
}
//USAGE EXAMPLE
//Get audio player on html
const AudioPlayer = document.getElementById('audio');
const getInfinity = async () => {
//Await for promise
await fixInfinity(AudioPlayer).then(val => {
//Reset audio current time
AudioPlayer.currentTime = 0;
//Log duration
console.log(val)
})
}

I wrapped the webm-duration-fix package to solve the webm length problem, which can be used in nodejs and web browsers to support video files over 2GB with not too much memory usage.
Usage is as follows:
import fixWebmDuration from 'webm-duration-fix';
const mimeType = 'video/webm\;codecs=vp9';
const blobSlice: BlobPart[] = [];
mediaRecorder = new MediaRecorder(stream, {
mimeType
});
mediaRecorder.ondataavailable = (event: BlobEvent) => {
blobSlice.push(event.data);
}
mediaRecorder.onstop = async () => {
// fix blob, support fix webm file larger than 2GB
const fixBlob = await fixWebmDuration(new Blob([...blobSlice], { type: mimeType }));
// to write locally, it is recommended to use fs.createWriteStream to reduce memory usage
const fileWriteStream = fs.createWriteStream(inputPath);
const blobReadstream = fixBlob.stream();
const blobReader = blobReadstream.getReader();
while (true) {
let { done, value } = await blobReader.read();
if (done) {
console.log('write done.');
fileWriteStream.close();
break;
}
fileWriteStream.write(value);
value = null;
}
blobSlice = [];
};

//If you want to modify the video file completely, you can use this package "webmFixDuration", Other methods are applied at the display level only on the video tag With this method, the complete video file is modified
webmFixDuration github example
mediaRecorder.onstop = async () => {
const duration = Date.now() - startTime;
const buggyBlob = new Blob(mediaParts, { type: 'video/webm' });
const fixedBlob = await webmFixDuration(buggyBlob, duration);
displayResult(fixedBlob);
};

Related

How to change value of global var when it is being used as a parameter in a function? (Javascript)

I want to be able to change the value of a global variable when it is being used by a function as a parameter.
My javascript:
function playAudio(audioFile, canPlay) {
if (canPlay < 2 && audioFile.paused) {
canPlay = canPlay + 1;
audioFile.play();
} else {
if (canPlay >= 2) {
alert("This audio has already been played twice.");
} else {
alert("Please wait for the audio to finish playing.");
};
};
};
const btnPitch01 = document.getElementById("btnPitch01");
const audioFilePitch01 = new Audio("../aud/Pitch01.wav");
var canPlayPitch01 = 0;
btnPitch01.addEventListener("click", function() {
playAudio(audioFilePitch01, canPlayPitch01);
});
My HTML:
<body>
<button id="btnPitch01">Play Pitch01</button>
<button id="btnPitch02">Play Pitch02</button>
<script src="js/js-master.js"></script>
</body>
My scenario:
I'm building a Musical Aptitude Test for personal use that won't be hosted online. There are going to be hundreds of buttons each corresponding to their own audio files. Each audio file may only be played twice and no more than that. Buttons may not be pressed while their corresponding audio files are already playing.
All of that was working completely fine, until I optimised the function to use parameters. I know this would be good to avoid copy-pasting the same function hundreds of times, but it has broken the solution I used to prevent the audio from being played more than once. The "canPlayPitch01" variable, when it is being used as a parameter, no longer gets incremented, and therefore makes the [if (canPlay < 2)] useless.
How would I go about solving this? Even if it is bad coding practise, I would prefer to keep using the method I'm currently using, because I think it is a very logical one.
I'm a beginner and know very little, so please forgive any mistakes or poor coding practises. I welcome corrections and tips.
Thank you very much!
It's not possible, since variables are passed by value, not by reference. You should return the new value, and the caller should assign it to the variable.
function playAudio(audioFile, canPlay) {
if (canPlay < 2 && audioFile.paused) {
canPlay = canPlay + 1;
audioFile.play();
} else {
if (canPlay >= 2) {
alert("This audio has already been played twice.");
} else {
alert("Please wait for the audio to finish playing.");
};
};
return canPlay;
};
const btnPitch01 = document.getElementById("btnPitch01");
const audioFilePitch01 = new Audio("../aud/Pitch01.wav");
var canPlayPitch01 = 0;
btnPitch01.addEventListener("click", function() {
canPlayPitch01 = playAudio(audioFilePitch01, canPlayPitch01);
});
A little improvement of the data will fix the stated problem and probably have quite a few side benefits elsewhere in the code.
Your data looks like this:
const btnPitch01 = document.getElementById("btnPitch01");
const audioFilePitch01 = new Audio("../aud/Pitch01.wav");
var canPlayPitch01 = 0;
// and, judging by the naming used, there's probably more like this:
const btnPitch02 = document.getElementById("btnPitch02");
const audioFilePitch02 = new Audio("../aud/Pitch02.wav");
var canPlayPitch02 = 0;
// and so on
Now consider that global data looking like this:
const model = {
btnPitch01: {
canPlay: 0,
el: document.getElementById("btnPitch01"),
audioFile: new Audio("../aud/Pitch01.wav")
},
btnPitch02: { /* and so on */ }
}
Your event listener(s) can say:
btnPitch01.addEventListener("click", function(event) {
// notice how (if this is all that's done here) we can shrink this even further later
playAudio(event);
});
And your playAudio function can have a side-effect on the data:
function playAudio(event) {
// here's how we get from the button to the model item
const item = model[event.target.id];
if (item.canPlay < 2 && item.audioFile.paused) {
item.canPlay++;
item.audioFile.play();
} else {
if (item.canPlay >= 2) {
alert("This audio has already been played twice.");
} else {
alert("Please wait for the audio to finish playing.");
};
};
};
Side note: the model can probably be built in code...
// you can automate this even more using String padStart() on 1,2,3...
const baseIds = [ '01', '02', ... ];
const model = Object.fromEntries(
baseIds.map(baseId => {
const id = `btnPitch${baseId}`;
const value = {
canPlay: 0,
el: document.getElementById(id),
audioFile: new Audio(`../aud/Pitch${baseId}.wav`)
}
return [id, value];
})
);
// you can build the event listeners in a loop, too
// (or in the loop above)
Object.values(model).forEach(value => {
value.el.addEventListener("click", playAudio)
})
below is an example of the function.
btnPitch01.addEventListener("click", function() {
if ( this.dataset.numberOfPlays >= this.dataset.allowedNumberOfPlays ) return;
playAudio(audioFilePitch01, canPlayPitch01);
this.dataset.numberOfPlays++;
});
you would want to select all of your buttons and assign this to them after your html is loaded.
https://developer.mozilla.org/en-US/docs/Web/API/Document/getElementsByClassName
const listOfButtons = document.getElementsByClassName('pitchButton');
listOfButtons.forEach( item => {
item.addEventListener("click", () => {
if ( this.dataset.numberOfPlays >= this.dataset.allowedNumberOfPlays ) return;
playAudio("audioFilePitch" + this.id);
this.dataset.numberOfPlays++;
});

Output of model in tensorflow js is a object, not a tensor

I am trying to the create word detection on live camera feed using tensorflowJS in the web. (NOT NodeJS).
I have found a model which was shared on tfhub. I was trying to load that model in the web and trying to get some output. I somewhat finished the code to give some output to check before drawing on the canvas. I have tried so many versions of tfjs version to get the code working because some part of the code wasn't working when I was using a specific version, hence I did some reading documentation and copy pasting the code.
Finally, the code below is working but I dont understand the output and how do I use that output to draw anything on the screen? Am I missing any step?
Just for the reference I am also including the tfjs version that I am using in my code.
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-core"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-backend-cpu"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-tflite/dist/tf-tflite.min.js"></script>
Now the code for the detection ...
const videoElement = document.getElementById("webcam");
var model = undefined;
// Check if webcam access is supported.
function getUserMediaSupported() {
var state = !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
console.log(state)
return state
}
if (getUserMediaSupported()) {
enableCam()
} else {
console.warn("getUserMedia() is not supported by your browser");
}
// Enable the live webcam view and start classification.
function enableCam() {
// getUsermedia parameters to force video but not audio.
const constraints = {
video: true,
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {
videoElement.srcObject = stream;
});
}
videoElement.addEventListener('loadeddata', (e) => {
//Video should now be loaded but we can add a second check
if (videoElement.readyState >= 3) {
detect()
}
});
async function detect() {
$('.loading').text("loading...")
if (!model) {
model = await tflite.loadTFLiteModel("./ocr.tflite")
}
const outputTensor = tf.tidy(() => {
// Get pixels data from an image.
let img = tf.browser.fromPixels(videoElement)
img = tf.image.resizeBilinear(img,[320, 320]);
// Normalize (might also do resize here if necessary).
// const input = tf.sub(tf.div(tf.expandDims(img), 127.5), 1);
// Run the inference.
let inputTensor = tf.expandDims(img)
console.log("INPUT-->", inputTensor);
let outputTensor = model.predict(inputTensor);
console.log("OUTPUT-->", outputTensor)
// De-normalize the result.
return tf.mul(tf.add(outputTensor, 1), 127.5)
});
console.log(outputTensor);
// const input_tensor = await tf.browser.fromPixels(videoElement);
// // const input = tf.sub(tf.div(tf.expandDims(input_tensor), 127.5), 1);
// // const tensor = await tf.browser.fromPixels(imageElement)
// const input = tf.tidy(() => {
// return tf.reshape(input_tensor, [320, 320])
// .div(255).expandDims(0);
// });
// model.predict(input).then(predictions => {
// console.log('Predictions: ');
// console.log(predictions);
// });
// Run inference and get output tensors.
// let outputTensor = model.predict(input);
// console.log(outputTensor.dataSync());
// let input = tf.image.resizeBilinear(tf.browser.fromPixels(input_tensor), [320, 320]);
// input = tf.cast(tf.sub(tf.div(tf.expandDims(input), 127.5), 1), 'int32');
// // Run the inference and get the output tensors.
// let result = await model.predict(input);
// console.log(result)
// // renderDetectionResult(result);
// $('.loading').text("loaded")
}

JavaScript: errors when adding 0s (silence) to the begginning and end of audio arrays

I am attempting to add silence before and after an audio file using javascript. My idea was to add zero values at the beginning and end of the audio. both of the methods I tried failed
Method 1, playback audio via AudioContext
const context = new AudioContext()
async function createSoundArray() {
await fetch('file.mp3')
.then(response => response.arrayBuffer())
.then(arrayBuffer => context.decodeAudioData(arrayBuffer))
.then(audioBuffer => {
const data = audioBuffer.getChannelData(0) // Float32Array(100000)
editAudio(data)
});
}
createSoundArray()
function editAudio(data){
const editedData = []
for(var i = 0; i < data.length + 50000; i++){// adds 25000 0 values (silence) to the begginning and end of array
if( i > 25000 && i <= 125000){
editedData.push(data[i - 25000])
}else{
editedData.push(0)
}
}
console.log(editedData) // array(150000)
const Uint32 = Uint32Array.from(values)
const audioCtx = new AudioContext()
console.log(Uint32.buffer)// ArrayBuffer(150000)
audioCtx.decodeAudioData(pimbo.buffer, function (buf) { //error could not decode
const playback = audioCtx.createBufferSource();
playback.buffer = buf;
playback.connect(audioCtx.destination);
audioCtx.resume()
console.log(playback.buffer)
});
}
This method resulted in the following error:
The buffer passed to decodeAudioData contains an unknown content type.
Method 2, Playback audio via audio element
const context = new AudioContext()
async function createSoundArray() {
await fetch('file.mp3')
.then(response => response.arrayBuffer())
.then(arrayBuffer => context.decodeAudioData(arrayBuffer))
.then(audioBuffer => {
const data = audioBuffer.getChannelData(0) // Float32Array(100000)
editAudio(data)
});
}
createSoundArray()
function editAudio(data){
const editedData = []
for(var i = 0; i < data.length + 50000; i++){// adds 25000 0 values (silence) to the begginning and end of array
if( i > 25000 && i <= 125000){
editedData.push(data[i - 25000])
}else{
editedData.push(0)
}
}
const blob = new Blob(editedData)
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play()//error no supported source found
}
This method resulted in the following error:
Failed to play audio because no supported source was found
Because Both of these methods failed, this leads me to believe that the zeros added are causing the data to not be recognized as audio. However I don't understand why this is because I am simply adding zeros which are values that already exist inside the audio data before editing it. Does anyone know what I am doing wrong or why this is not working?

How to slice an audio blob at a specific time?

I have an audio blob and I want to chop it up at a specific time.How should I do that in Javascript ?
Example:
sliceAudioBlob( audio_blob, 0, 10000 ); // Time in milliseconds [ start = 0, end = 10000 ]
Note: I have no clue how to do that, so a little hint would be really
appreciated.
Update :
I'm trying to build a simple audio recorder, but the problem is that there are differences in time duration for each browser, some of them adds few seconds ( Firefox ) and others don't ( Chrome ). So, I came up with the idea to code a method that returns only the slice I want.
Full HTML code :
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Audio Recorder</title>
<style>
audio{
display: block;
}
</style>
</head>
<body>
<button type="button" onclick="mediaRecorder.start(1000)">Start</button>
<button type="button" onclick="mediaRecorder.stop()">Stop</button>
<script type="text/javascript">
var mediaRecorder = null,
chunks = [],
max_duration = 10000;// in milliseconds.
function onSuccess( stream ) {
mediaRecorder = new MediaRecorder( stream );
mediaRecorder.ondataavailable = function( event ) {
// chunks.length is the number of recorded seconds
// since every chunk is 1 second duration.
if ( chunks.length < max_duration / 1000 ) {
chunks.push( event.data );
} else {
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
}
mediaRecorder.onstop = function() {
var audio = document.createElement('audio'),
audio_blob = new Blob(chunks, {
'type' : 'audio/mpeg'
});
audio.controls = 'controls';
audio.autoplay = 'autoplay';
audio.src = window.URL.createObjectURL( audio_blob );
document.body.appendChild(audio);
};
}
var onError = function(err) {
console.log('Error: ' + err);
}
navigator.mediaDevices.getUserMedia({ audio: true }).then(onSuccess, onError);
</script>
</body>
</html>
There is no straight forward way to slice an Audio media like that, and this is because your file is made of more than sound signal: there are multiple segments in it, with some headers etc, which position can't be determined just by a byteLength. It is like you can't crop a jpeg image just by getting its x firsts bytes.
There might be ways using the Web Audio API to convert your media File to an AudioBuffer, and then slicing this AudioBuffer's raw PCM data as you wish before packing it back in a media File with the correct new descriptors, but I think you are facing an X-Y problem and if I got it correctly, there is a simple way to fix this X problem.
Indeed, the problem you describe is that either Chrome or Firefox doesn't produce an 10s media from your code.
But that is because you are relying on the timeslice argument of MediaRecorder.start(timeslice) to give you chunks of perfect time.
It won't. This argument should only be understood as a clue you are giving to the browser, but they may well impose their own minimum timeslice and thus not respect your argument. (2.3[Methods].5.4).
Instead, you'll be better using a simple setTimeout to trigger your recorder's stop() method when you want:
start_btn.onclick = function() {
mediaRecorder.start(); // we don't even need timeslice
// now we'll get similar max duration in every browsers
setTimeout(stopRecording, max_duration);
};
stop_btn.onclick = stopRecording;
function stopRecording() {
if (mediaRecorder.state === "recording")
mediaRecorder.stop();
};
Here is a live example using gUM hosted on jsfiddle.
And a live snippet using a silent stream from the Web Audio API because StackSnippet's protection doesn't run well with gUM...
var start_btn = document.getElementById('start'),
stop_btn = document.getElementById('stop');
var mediaRecorder = null,
chunks = [],
max_duration = 10000; // in milliseconds.
start_btn.onclick = function() {
mediaRecorder.start(); // we don't even need timeslice
// now we'll get similar max duration in every browsers
setTimeout(stopRecording, max_duration);
this.disabled = !(stop_btn.disabled = false);
};
stop_btn.onclick = stopRecording;
function stopRecording() {
if (mediaRecorder.state === "recording")
mediaRecorder.stop();
stop_btn.disabled = true;
};
function onSuccess(stream) {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = function(event) {
// simply always push here, the stop will be controlled by setTimeout
chunks.push(event.data);
}
mediaRecorder.onstop = function() {
var audio_blob = new Blob(chunks);
var audio = new Audio(URL.createObjectURL(audio_blob));
audio.controls = 'controls';
document.body.appendChild(audio);
// workaround https://crbug.com/642012
audio.currentTime = 1e12;
audio.onseeked = function() {
audio.onseeked = null;
console.log(audio.duration);
audio.currentTime = 0;
audio.play();
}
};
start_btn.disabled = false;
}
var onError = function(err) {
console.log('Error: ' + err);
}
onSuccess(SilentStream());
function SilentStream() {
var ctx = new(window.AudioContext || window.webkitAudioContext),
gain = ctx.createGain(),
dest = ctx.createMediaStreamDestination();
gain.connect(dest);
return dest.stream;
}
<button id="start" disabled>start</button>
<button id="stop" disabled>stop</button>
In your line:
<button type="button" onclick="mediaRecorder.start(1000)">Start</button>
mediaRecorder.start receive as a parameter the timeslice. The timeslice specifies the size in milliseconds of the chunks. So in order to cut your audio you should modify the chunk array that you have been creating on mediaRecorder.ondataavailable
E.g:
You pass 1000 as the timeslice that means that you have slices of 1 second, and you want to cut the first 2 seconds of the recording.
You just have to do something like this:
mediaRecorder.onstop = function() {
//Remove first 2 seconds of the audio
var chunksSliced = chunks.slice(2);
var audio = document.createElement('audio'),
// create the audio from the sliced chunk
audio_blob = new Blob(chunksSliced, {
'type' : 'audio/mpeg'
});
audio.controls = 'controls';
audio.autoplay = 'autoplay';
audio.src = window.URL.createObjectURL( audio_blob );
document.body.appendChild(audio);
};
}
You can reduce the size of the chunks in milliseconds if needed. Just pass different number to start and slice the array where you want.
To have a more specific answer you could do this for audioSlice:
const TIMESLICE = 1000;
// #param chunks Array with the audio chunks
// #param start where to start cutting in seconds
// #param end where to stop cutting in seconds
function audioSlice(chunks, start, end) {
const timeSliceToSeconds = TIMESLICE/1000;
const startIndex = Math.round(start / timeSliceToSeconds);
const endIndex = Math.round(end / timeSliceToSeconds);
if (startIndex < chunks.length && endIndex < chunks.length) {
return chunks.slice(startIndex, endIndex)
}
throw Error('You cannot cut this at those points');
}
If you modify TIMESLICE for your value, it will calculate in which place to cut when passed in seconds
Try to collect timeSliced chunks on pause, then filter these out:
if(this.isPaused){
this.deleteChunks.push(...audioChunks)
}
const pushChunks = _.filter(audioChunks, chunk=> _.indexOf(this.deleteChunks, chunk) > -1 ? false : true)
console.log("audioChunks::", this.deleteChunks, pushChunks.length, audioChunks.length)

How can I add predefined length to audio recorded from MediaRecorder in Chrome?

I am in the process of replacing RecordRTC with the built in MediaRecorder for recording audio in Chrome. The recorded audio is then played in the program with audio api. I am having trouble getting the audio.duration property to work. It says
If the video (audio) is streamed and has no predefined length, "Inf" (Infinity) is returned.
With RecordRTC, I had to use ffmpeg_asm.js to convert the audio from wav to ogg. My guess is somewhere in the process RecordRTC sets the predefined audio length. Is there any way to set the predefined length using MediaRecorder?
This is a chrome bug.
FF does expose the duration of the recorded media, and if you do set the currentTimeof the recorded media to more than its actual duration, then the property is available in chrome...
var recorder,
chunks = [],
ctx = new AudioContext(),
aud = document.getElementById('aud');
function exportAudio() {
var blob = new Blob(chunks);
aud.src = URL.createObjectURL(new Blob(chunks));
aud.onloadedmetadata = function() {
// it should already be available here
log.textContent = ' duration: ' + aud.duration;
// handle chrome's bug
if (aud.duration === Infinity) {
// set it to bigger than the actual duration
aud.currentTime = 1e101;
aud.ontimeupdate = function() {
this.ontimeupdate = () => {
return;
}
log.textContent += ' after workaround: ' + aud.duration;
aud.currentTime = 0;
}
}
}
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', 'https://upload.wikimedia.org/wikipedia/commons/4/4b/011229beowulf_grendel.ogg', true);
request.responseType = 'arraybuffer';
request.onload = decodeAudio;
request.send();
}
function decodeAudio(evt) {
var audioData = this.response;
ctx.decodeAudioData(audioData, startRecording);
}
function startRecording(buffer) {
var source = ctx.createBufferSource();
source.buffer = buffer;
var dest = ctx.createMediaStreamDestination();
source.connect(dest);
recorder = new MediaRecorder(dest.stream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportAudio;
source.start(0);
recorder.start();
log.innerHTML = 'recording...'
// record only 5 seconds
setTimeout(function() {
recorder.stop();
}, 5000);
}
function saveChunks(evt) {
if (evt.data.size > 0) {
chunks.push(evt.data);
}
}
// we need user-activation
document.getElementById('button').onclick = function(evt){
getData();
this.remove();
}
<button id="button">start</button>
<audio id="aud" controls></audio><span id="log"></span>
So the advice here would be to star the bug report so that chromium's team takes some time to fix it, even if this workaround can do the trick...
Thanks to #Kaiido for identifying bug and offering the working fix.
I prepared an npm package called get-blob-duration that you can install to get a nice Promise-wrapped function to do the dirty work.
Usage is as follows:
// Returns Promise<Number>
getBlobDuration(blob).then(function(duration) {
console.log(duration + ' seconds');
});
Or ECMAScript 6:
// yada yada async
const duration = await getBlobDuration(blob)
console.log(duration + ' seconds')
A bug in Chrome, detected in 2016, but still open today (March 2019), is the root cause behind this behavior. Under certain scenarios audioElement.duration will return Infinity.
Chrome Bug information here and here
The following code provides a workaround to avoid the bug.
Usage : Create your audioElement, and call this function a single time, providing a reference of your audioElement. When the returned promise resolves, the audioElement.duration property should contain the right value. ( It also fixes the same problem with videoElements )
/**
* calculateMediaDuration()
* Force media element duration calculation.
* Returns a promise, that resolves when duration is calculated
**/
function calculateMediaDuration(media){
return new Promise( (resolve,reject)=>{
media.onloadedmetadata = function(){
// set the mediaElement.currentTime to a high value beyond its real duration
media.currentTime = Number.MAX_SAFE_INTEGER;
// listen to time position change
media.ontimeupdate = function(){
media.ontimeupdate = function(){};
// setting player currentTime back to 0 can be buggy too, set it first to .1 sec
media.currentTime = 0.1;
media.currentTime = 0;
// media.duration should now have its correct value, return it...
resolve(media.duration);
}
}
});
}
// USAGE EXAMPLE :
calculateMediaDuration( yourAudioElement ).then( ()=>{
console.log( yourAudioElement.duration )
});
Thanks #colxi for the actual solution, I've added some validation steps (As the solution was working fine but had problems with long audio files).
It took me like 4 hours to get it to work with long audio files turns out validation was the fix
function fixInfinity(media) {
return new Promise((resolve, reject) => {
//Wait for media to load metadata
media.onloadedmetadata = () => {
//Changes the current time to update ontimeupdate
media.currentTime = Number.MAX_SAFE_INTEGER;
//Check if its infinite NaN or undefined
if (ifNull(media)) {
media.ontimeupdate = () => {
//If it is not null resolve the promise and send the duration
if (!ifNull(media)) {
//If it is not null resolve the promise and send the duration
resolve(media.duration);
}
//Check if its infinite NaN or undefined //The second ontime update is a fallback if the first one fails
media.ontimeupdate = () => {
if (!ifNull(media)) {
resolve(media.duration);
}
};
};
} else {
//If media duration was never infinity return it
resolve(media.duration);
}
};
});
}
//Check if null
function ifNull(media) {
if (media.duration === Infinity || media.duration === NaN || media.duration === undefined) {
return true;
} else {
return false;
}
}
//USAGE EXAMPLE
//Get audio player on html
const AudioPlayer = document.getElementById('audio');
const getInfinity = async () => {
//Await for promise
await fixInfinity(AudioPlayer).then(val => {
//Reset audio current time
AudioPlayer.currentTime = 0;
//Log duration
console.log(val)
})
}
I wrapped the webm-duration-fix package to solve the webm length problem, which can be used in nodejs and web browsers to support video files over 2GB with not too much memory usage.
Usage is as follows:
import fixWebmDuration from 'webm-duration-fix';
const mimeType = 'video/webm\;codecs=vp9';
const blobSlice: BlobPart[] = [];
mediaRecorder = new MediaRecorder(stream, {
mimeType
});
mediaRecorder.ondataavailable = (event: BlobEvent) => {
blobSlice.push(event.data);
}
mediaRecorder.onstop = async () => {
// fix blob, support fix webm file larger than 2GB
const fixBlob = await fixWebmDuration(new Blob([...blobSlice], { type: mimeType }));
// to write locally, it is recommended to use fs.createWriteStream to reduce memory usage
const fileWriteStream = fs.createWriteStream(inputPath);
const blobReadstream = fixBlob.stream();
const blobReader = blobReadstream.getReader();
while (true) {
let { done, value } = await blobReader.read();
if (done) {
console.log('write done.');
fileWriteStream.close();
break;
}
fileWriteStream.write(value);
value = null;
}
blobSlice = [];
};
//If you want to modify the video file completely, you can use this package "webmFixDuration", Other methods are applied at the display level only on the video tag With this method, the complete video file is modified
webmFixDuration github example
mediaRecorder.onstop = async () => {
const duration = Date.now() - startTime;
const buggyBlob = new Blob(mediaParts, { type: 'video/webm' });
const fixedBlob = await webmFixDuration(buggyBlob, duration);
displayResult(fixedBlob);
};

Categories