AudioContext stops working after being used exactly 50 times - javascript

Hi so i'm trying to make a Drum-Kit, for this i'm using AudioContext API. My issue is when I use it exactly 50 times than it stops working. Only thing I found to make it work is to close the previous AudioContext that was used, thing is that it makes a "clicky" sound due to the sound stoping abruptly.
Any ideas on what to do?
Here is what i'm working with to not make it stop at 50 uses:
let i = 0;
let audioContext;
let volumeControl;
// Credit to MDN for AudioContext and StereoPannerNode tutorial.
function playSound(src, volume, pitch, stereo) {
if (audioContext != null) {
//volumeControl.gain.value = 0;
audioContext.close();
}
console.log(i++);
audioContext = new AudioContext();
const stereoControl = new StereoPannerNode(audioContext);
volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
const request = new XMLHttpRequest();
request.open('GET', src, true);
request.responseType = 'arraybuffer';
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.playbackRate.value = pitch;
source.connect(volumeControl).connect(stereoControl).connect(audioContext.destination);
});
};
request.send();
source.play = source.start;
source.play();
}

Don't create an audio context for each sound, but create a single one for your page and add nodes to it. Something like this...
const audioContext = new AudioContext();
function playSound(src, volume, pitch, stereo) {
const stereoControl = audioContext.createStereoPanner();
const volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
const request = new XMLHttpRequest();
request.open("GET", src, true);
request.responseType = "arraybuffer";
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.playbackRate.value = pitch;
source
.connect(volumeControl)
.connect(stereoControl)
.connect(audioContext.destination);
source.start();
});
};
request.send();
}
In addition, for a drumkit thing, you'll want to either preload all samples, or at the very least cache the decoded audio buffers and not do a request every time for them:
const cache = {};
const audioContext = new AudioContext();
function loadSound(src) {
if (cache[src]) {
// Already cached
return Promise.resolve(cache[src]);
}
return new Promise(resolve => {
const request = new XMLHttpRequest();
request.open("GET", src, true);
request.responseType = "arraybuffer";
request.onload = function() {
const audioData = request.response;
audioContext.decodeAudioData(audioData, function(buffer) {
cache[src] = buffer;
resolve(buffer);
});
};
request.send();
});
}
function playSound(src, volume, pitch, stereo) {
loadSound(src).then(buffer => {
const stereoControl = audioContext.createStereoPanner();
const volumeControl = audioContext.createGain();
volumeControl.gain.value = volume;
stereoControl.pan.value = stereo;
const source = audioContext.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = pitch;
source
.connect(volumeControl)
.connect(stereoControl)
.connect(audioContext.destination);
source.start();
});
}

Related

How to stop MediaSource from loading all content automatically?

The code below works perfectly, but the video needs to be loaded all the way until it is produced, is there any way to load it partially?
const video = document.querySelector('video');
const assetURL = '/e_dashinit.mp4';
const mimeCodec = 'video/mp4; codecs="avc1.42E01E, mp4a.40.2"';
const mediaSource = new MediaSource();
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener('sourceopen', sourceOpen);
function sourceOpen(_) {
const mediaSource = this;
const sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
fetchAB(assetURL, function (buf) {
sourceBuffer.addEventListener('updateend', function (_) {
mediaSource.endOfStream();
video.play();
});
sourceBuffer.appendBuffer(buf);
});
};
function fetchAB(url, cb) {
const xhr = new XMLHttpRequest;
xhr.open('get', url);
xhr.responseType = 'arraybuffer';
xhr.onload = function () {
cb(xhr.response);
};
xhr.send();
};
return

Save canvas data as mp4 - Javascript

I have an animated canvas, I want to convert that into mp4. I am using MediaRecorder to capture the screen and then converting that Blob. I learned that MediaRecorder does not allow recording in mp4, so I am forced to get the canvas in webm. Here is what I have tried:
<canvas id="canvas"></canvas>
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res(url);
var xhr = new XMLHttpRequest;
xhr.responseType = 'blob';
xhr.onload = function() {
var recoveredBlob = xhr.response;
var reader = new FileReader;
reader.onload = function() {
var blobAsDataUrl = reader.result;
document.getElementById("my-video").setAttribute("src", blobAsDataUrl);
};
reader.readAsDataURL(recoveredBlob);
};
xhr.open('GET', url);
xhr.send();
}
});
Any solution is highly appreciated.
Quick demo of transcoding using ffmpeg.wasm:
const { createFFmpeg } = FFmpeg;
const ffmpeg = createFFmpeg({
log: true
});
const transcode = async (webcamData) => {
const message = document.getElementById('message');
const name = 'record.webm';
await ffmpeg.load();
message.innerHTML = 'Start transcoding';
await ffmpeg.write(name, webcamData);
await ffmpeg.transcode(name, 'output.mp4');
message.innerHTML = 'Complete transcoding';
const data = ffmpeg.read('output.mp4');
const video = document.getElementById('output-video');
video.src = URL.createObjectURL(new Blob([data.buffer], { type: 'video/mp4' }));
dl.href = video.src;
dl.innerHTML = "download mp4"
}
fn().then(async ({url, blob})=>{
transcode(new Uint8Array(await (blob).arrayBuffer()));
})
function fn() {
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
// for demo, removed stop() call to capture more than one frame
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res({url, blob}); // resolve both blob and url in an object
myVideo.src = url;
// removed data url conversion for brevity
}
// for demo, draw random lines and then stop recording
var i = 0,
tid = setInterval(()=>{
if(i++ > 20) { // draw 20 lines
clearInterval(tid);
mediaRecorder.stop();
}
let canvas = document.querySelector("canvas");
let cx = canvas.getContext("2d");
cx.beginPath();
cx.strokeStyle = 'green';
cx.moveTo(Math.random()*100, Math.random()*100);
cx.lineTo(Math.random()*100, Math.random()*100);
cx.stroke();
},200)
});
}
<script src="https://unpkg.com/#ffmpeg/ffmpeg#0.8.1/dist/ffmpeg.min.js"></script>
<canvas id="canvas" style="height:100px;width:100px"></canvas>
<video id="myVideo" controls="controls"></video>
<video id="output-video" controls="controls"></video>
<a id="dl" href="" download="download.mp4"></a>
<div id="message"></div>

How to loop wav in web audio a certain amount of times?

I know there's an option loop=true on an AudioBufferSourceNode. But I just want to repeat the wav a certain number of times. For example, how might I repeat it twice?
var url = 'main.wav';
var context = new AudioContext();
var source = context.createBufferSource();
source.connect(context.destination);
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, function(response) {
source.buffer = response;
source.start(0);
source.start(source.buffer.duration); // doesn't work "cannot call start more than once."
//source.loop = true; // this is infinite amount of times, not good
}, function () { console.error('The request failed.'); } );
}
request.send();
I also tried to create a second buffer:
var source2 = context.createBufferSource();
// inside callback
source2.buffer = response; // same response so I don't need to load this separately
source2.start(source.buffer.duration); // doesn't work
But that didn't work either. Any ideas?
Your approach with using second buffer is almost correct. You just need to use absolute start time, not relative. To obtain absolute time sum audio context's current time and duration of the sound:
// Play first sound now
var source1 = context.createBufferSource();
source1.buffer = response;
source1.start(context.currentTime); // This is the same as source1.start();
// Play second sound with a delay
var source2 = context.createBufferSource();
source2.buffer = response;
source2.start(context.currentTime + response.duration);
This approach provides gapless playback (if your files are gapless).
You can wrap this into a simple function:
function play(context, buffer, delay=0) {
var source = context.createBufferSource();
source.buffer = buffer;
source.start(context.currentTime + delay);
return source;
}
...
// Play the sound twice, one after another
play(context, response, 0);
play(context, response, response.duration);
You can set the AudioBufferSource's loop to true and call
source.stop( ctx.currentTime + audioBuffer.duration * repeatTime )
(async () => {
const url = 'https://dl.dropboxusercontent.com/s/1cdwpm3gca9mlo0/kick.mp3';
const buf = await fetch( url ).then( (r) => r.arrayBuffer() );
const ctx = new AudioContext();
const audioBuffer = await ctx.decodeAudioData( buf );
btn.onclick = (e) => {
const source = ctx.createBufferSource();
source.buffer = audioBuffer;
source.loop = true;
source.connect( ctx.destination );
source.start( 0 );
source.stop( ctx.currentTime + audioBuffer.duration * inp.valueAsNumber );
};
btn.disabled = false;
})().catch( console.error );
<label> repeat<input id="inp" type="number" min="1" max="20" value="5"></label>
<button id="btn" disabled>start</button>
Another alternative is to use the 3-arg version of start(). Something like:
let nReps = 2; /* Number of times to repeat the source */
let s = new AudioBufferSourceNode(context,
{buffer: buffer, loop: true});
s.start(startTime, 0, buffer.duration * nReps);
One solution would be to allow the loop behavior of your source and then schedule a callback to stop() the sources playback after a period of time that would allow playback of the sound file multiple times:
const url = 'main.wav';
const context = new AudioContext();
const source = context.createBufferSource();
// Tracks the number to playback cycles remaining
const playbackCount = 5;
source.connect(context.destination);
const request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = () => {
context.decodeAudioData(request.response, (response) => {
source.buffer = response;
// Schedule sound to stop at future time, after which sound
// should have played "playbackCount" number of times
setTimeout(() => {
source.stop();
}, response.duration * playbackCount * 1000);
// Enable loop behavior
source.loop = true;
source.start(0);
}, () => {
console.error('The request failed.');
});
}
request.send();

two mono audio streams , play one on the right speaker the other on the left speaker in parallel

I have two mono audio streams (mp3 files) which I want to play in parallel, right.mp3 should only be heard on the right channel (speaker) and left.mp3 only on the left channel (speaker). How can I do this?
I had already tried a merger see below.
var contextL = new AudioContext() || new webkitAudioContext(),
requestL = new XMLHttpRequest();
requestL.open("GET", "./left.mp3", true);
requestL.responseType = "arraybuffer";
requestL.onload = function(){
contextL.decodeAudioData(requestL.response, onDecodedL);
}
var contextR = new AudioContext() || new webkitAudioContext(),
requestR = new XMLHttpRequest();
requestR.open("GET", "./right.mp3", true);
requestR.responseType = "arraybuffer";
requestR.onload = function(){
contextR.decodeAudioData(requestR.response, onDecodedR);
}
function onDecodedL(buffer){
var bufferSourceL = contextL.createBufferSource();
bufferSourceL.buffer = buffer;
var splitterL = contextL.createChannelSplitter(2);
bufferSourceL.connect(splitterL);
var mergerL = contextL.createChannelMerger(2);
var gainL = contextL.createGain();
gainL.value = 0;
splitterL.connect(gainL, 0);
bufferSourceL.start();
}
function onDecodedR(buffer){
var bufferSourceR = contextR.createBufferSource();
bufferSourceR.buffer = buffer;
var splitterR = contextR.createChannelSplitter(2);
bufferSourceR.connect(splitterR);
var mergerR = contextR.createChannelMerger(2);
var gainR = contextR.createGain();
gainR.value = 0;
splitterR.connect(gainR,1);
bufferSourceR.start();
}

Why does this code work in Safari but not Chrome? Arrrgh

Chrome v6, Safari v7 (works in v6)
if('AudioContext' in window) {
var myAudioContext = new AudioContext();
const PATH = 'Sounds/',
SOUNDS = ['Tock08'];
var soundBuffers = {}, myNodes = {};
function fetchSounds() {
var request = new XMLHttpRequest();
for (var i = 0, len = SOUNDS.length; i < len; i++) {
request = new XMLHttpRequest();
request._soundName = SOUNDS[i];
request.open('GET', PATH + request._soundName + '.aiff', true);
request.responseType = 'arraybuffer';
request.addEventListener('load', bufferSound, false);
request.send();
}
}
function bufferSound(event) {
var request = event.target;
var buffer = myAudioContext.createBuffer(request.response, false);
soundBuffers[request._soundName] = buffer;
}
}
function clickSound() {
if('AudioContext' in window ) { //Chrome doesn't work with this or above code
// create a new AudioBufferSourceNode
source = myAudioContext.createBufferSource();
source.buffer = soundBuffers['Tock08'];
source.loop = false;
source.connect(myNodes.volume);
myNodes.volume.gain.value = 1;
myNodes.volume.connect(myAudioContext.destination);
source.noteOn(0); // play right now (0 seconds from now)
}
}
In Safari, all is well. An array of sound buffers is created, and a call to clickSound results in a satisfying "click".
In Chrome, things are different.
The line:
var buffer = myAudioContext.createBuffer(request.response, false);
is flagged with
Uncaught SyntaxError: An invalid or illegal string was specified.
on loading.
Then if I call "clickSound" I get:
source.buffer = soundBuffers['Tock08'];
Uncaught TypeError: Value is not of type AudioBuffer.
Does anyone know why this problem occurs?
Chrome still uses an older webkitAudioContext. This is how I set up context in SoundJS, and it works everywhere:
if (window.webkitAudioContext) {
s.context = new webkitAudioContext();
} else if (window.AudioContext) {
s.context = new AudioContext();
}
Hope that helps.

Categories