Audio playback slows down game - javascript

I am trying to develop a simple game using nw.js (node.js + chromium page).
<canvas width="1200" height="800" id="main"></canvas>
<script>
var Mouse = {x: 0, y: 0, fire: false};
(async function() {
"use strict";
const reload = 25;
var ireload = 0;
const audioCtx = new AudioContext();
let fire = await fetch('shotgun.mp3');
let bgMusic = await fetch('hard.mp3');
fire = await fire.arrayBuffer();
bgMusic = await bgMusic.arrayBuffer();
const bgMdecoded = await audioCtx.decodeAudioData(bgMusic);
const fireDecoded = await audioCtx.decodeAudioData(fire);
const bgM = audioCtx.createBufferSource();
bgM.buffer = bgMdecoded;
bgM.loop = true;
bgM.connect(audioCtx.destination)
bgM.start(0);
let shot = audioCtx.createBufferSource();
shot.buffer = fireDecoded;
shot.connect(audioCtx.destination);
document.getElementById('main').onmousedown = function(e) {
Mouse.x = e.layerX;
Mouse.y = e.layerY;
Mouse.fire = true;
}
function main(tick) {
var dt = lastTick - tick;
lastTick = tick;
///take fire
if(--ireload < 0 && Mouse.fire) {
ireload = reload;
shot.start(0);
shot = audioCtx.createBufferSource();
shot.buffer = fireDecoded;
shot.connect(audioCtx.destination);
Mouse.fire = false;
}
/* moving objects, rendering on thread with offscreen canvas */
requestAnimationFrame(main);
}
let lastTick = performance.now();
main(lastTick);
})();
</script>
I have stripped code to minimal working example.
The problem is with shooting, everytime I fire (///take fire), the game drops FPS. Exactly the same happens in Kaiido example (https://jsfiddle.net/sLpx6b3v/). This works great, using it in long periods, but playing multiple sounds (the game is shooter) several times, gives framerate drop and after some time GC hiccups.
Less than one year old gaming laptop is dropping 60fps to about 40fps, and about 44fps on Kaidos example.
What could be fixed with sound?
Desired behaviour is no lagging / no gc / no framedrops due to sound. The one in background works well.
I will try AudioWorklet, but it is hard to create one and process instantenous sounds (probably another question).

It is possible to reuse buffer, a bit hackish way.
First create
const audioCtx = new AudioContext();
then fetch resource as usual:
let fire = await fetch('shotgun.mp3');
fire = await fire.arrayBuffer();
fire = await audioCtx.decodeAudioData(fire);
const shot = audioCtx.createBufferSource();
shot.buffer = fire;
shot.loopEnd = 0.00001; //some small value to make it unplayable
shot.start(0);
Then, during event (mouse down in my case):
shot.loopEnd = 1; //that restarts sound and plays in a loop.
Next, after it was played, set again
shot.loopEnd = 0.00001;
In my case, I stop it inside requestAnimationFrame
<canvas width="1200" height="800" id="main"></canvas>
<script>
var Mouse = {x: 0, y: 0, fire: false};
(async function() {
"use strict";
const reload = 25;
var ireload = 0;
const audioCtx = new AudioContext();
let fire = await fetch('shotgun.mp3');
let bgMusic = await fetch('hard.mp3');
fire = await fire.arrayBuffer();
bgMusic = await bgMusic.arrayBuffer();
const bgMdecoded = await audioCtx.decodeAudioData(bgMusic);
const fireDecoded = await audioCtx.decodeAudioData(fire);
const bgM = audioCtx.createBufferSource();
bgM.buffer = bgMdecoded;
bgM.loop = true;
bgM.connect(audioCtx.destination)
bgM.start(0);
let shot = audioCtx.createBufferSource();
shot.buffer = fireDecoded;
shot.connect(audioCtx.destination);
shot.loopEnd = 0.00001; //some small value to make it unplayable
shot.start(0);
document.getElementById('main').onmousedown = function(e) {
Mouse.x = e.layerX;
Mouse.y = e.layerY;
Mouse.fire = true;
}
function main(tick) {
var dt = lastTick - tick;
lastTick = tick;
///take fire
//asuming 60fps, which is true in my case, I stop it after a second
if(reload < -35) {
shot.loopEnd = 0.00001;
}
if(--ireload < 0 && Mouse.fire) {
ireload = reload;
shot.loopEnd = 1; //that restarts sound and plays in a loop.
Mouse.fire = false;
}
/* moving objects, rendering on thread with offscreen canvas */
requestAnimationFrame(main);
}
let lastTick = performance.now();
main(lastTick);
})();
</script>
A note about GC, it is true that it handles audiobuffers quickly, but I have checked, GC fires only when there are allocations, and memory reallocations. Garbage Collector interupts all script execution, so there is jank, lag.
I use memory pool in tandem to this trick, allocating pool at initialisation and then only reuse objects, and get literally no GC after second sweep, it runs once, after initialisation and kicks in second time, after optimisation and reduces unused memory. After that, there is no GC at all. Using typed array and workers gives really performant combo, with 60 fps, crisp sound and no lags at all.
You may think that locking GC is a bad idea. Maybe you are right, but after all, wasting resources only because there is GC doesn't seem like a good idea either.
After tests, AudioWorklets seem to work as intended, but these are heavy, hard to maintain and consumes a lot of resources and writing processor that simply copies inputs to outputs defies it's purpose. PostMessaging system is really heavy process, and you have to either connect the standard way and recreate buffers, or copy it to Worklet space and manage it via shared arrays and atomic operations manually.
You may be interested also in: Writeup about WebAudio design where the author share the concerns and gets exactly the same problem, quote
I know I’m fighting an uphill battle here, but a GC is not what we
need during realtime audio playback.
Keeping a pool of AudioBuffers seems to work, though in my own test
app I still see slow growth to 12MB over time before a major GC wipes,
according to the Chrome profiler.
And Writeup about GC, where memory leaks in JavaScript are described. A quote:
Consider the following scenario:
A sizable set of allocations is performed.
Most of these elements (or all of them) are marked as unreachable (suppose we null a reference pointing to a cache we no
longer need).
No further allocations are performed.
In this scenario, most GCs will not run any further collection passes.
In other words, even though there are unreachable references available
for collection, these are not claimed by the collector. These are not
strictly leaks but still, result in higher-than-usual memory usage.

Related

Save ALL video frames to folder in Javascript [duplicate]

I'm working on a client-side project which lets a user supply a video file and apply basic manipulations to it. I'm trying to extract the frames from the video reliably. At the moment I have a <video> which I'm loading selected video into, and then pulling out each frame as follows:
Seek to the beginning
Pause the video
Draw <video> to a <canvas>
Capture the frame from the canvas with .toDataUrl()
Seek forward by 1 / 30 seconds (1 frame).
Rinse and repeat
This is a rather inefficient process, and more specifically, is proving unreliable as I'm often getting stuck frames. This seems to be from it not updating the actual <video> element before it draws to the canvas.
I'd rather not have to upload the original video to the server just to split the frames, and then download them back to the client.
Any suggestions for a better way to do this are greatly appreciated. The only caveat is that I need it to work with any format the browser supports (decoding in JS isn't a great option).
[2021 update]: Since this question (and answer) has first been posted, things have evolved in this area, and it is finally time to make an update; the method that was exposed here went out-of-date, but luckily a few new or incoming APIs can help us better in extracting video frames:
The most promising and powerful one, but still under development, with a lot of restrictions: WebCodecs
This new API unleashes access to the media decoders and encoders, enabling us to access raw data from video frames (YUV planes), which may be a lot more useful for many applications than rendered frames; and for the ones who need rendered frames, the VideoFrame interface that this API exposes can be drawn directly to a <canvas> element or converted to an ImageBitmap, avoiding the slow route of the MediaElement.
However there is a catch, apart from its current low support, this API needs that the input has been demuxed already.
There are some demuxers online, for instance for MP4 videos GPAC's mp4box.js will help a lot.
A full example can be found on the proposal's repo.
The key part consists of
const decoder = new VideoDecoder({
output: onFrame, // the callback to handle all the VideoFrame objects
error: e => console.error(e),
});
decoder.configure(config); // depends on the input file, your demuxer should provide it
demuxer.start((chunk) => { // depends on the demuxer, but you need it to return chunks of video data
decoder.decode(chunk); // will trigger our onFrame callback
})
Note that we can even grab the frames of a MediaStream, thanks to MediaCapture Transform's MediaStreamTrackProcessor.
This means that we should be able to combine HTMLMediaElement.captureStream() and this API in order to get our VideoFrames, without the need for a demuxer. However this is true only for a few codecs, and it means that we will extract frames at reading speed...
Anyway, here is an example working on latest Chromium based browsers, with chrome://flags/#enable-experimental-web-platform-features switched on:
const frames = [];
const button = document.querySelector("button");
const select = document.querySelector("select");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
button.onclick = async(evt) => {
if (window.MediaStreamTrackProcessor) {
let stopped = false;
const track = await getVideoTrack();
const processor = new MediaStreamTrackProcessor(track);
const reader = processor.readable.getReader();
readChunk();
function readChunk() {
reader.read().then(async({ done, value }) => {
if (value) {
const bitmap = await createImageBitmap(value);
const index = frames.length;
frames.push(bitmap);
select.append(new Option("Frame #" + (index + 1), index));
value.close();
}
if (!done && !stopped) {
readChunk();
} else {
select.disabled = false;
}
});
}
button.onclick = (evt) => stopped = true;
button.textContent = "stop";
} else {
console.error("your browser doesn't support this API yet");
}
};
select.onchange = (evt) => {
const frame = frames[select.value];
canvas.width = frame.width;
canvas.height = frame.height;
ctx.drawImage(frame, 0, 0);
};
async function getVideoTrack() {
const video = document.createElement("video");
video.crossOrigin = "anonymous";
video.src = "https://upload.wikimedia.org/wikipedia/commons/a/a4/BBH_gravitational_lensing_of_gw150914.webm";
document.body.append(video);
await video.play();
const [track] = video.captureStream().getVideoTracks();
video.onended = (evt) => track.stop();
return track;
}
video,canvas {
max-width: 100%
}
<button>start</button>
<select disabled>
</select>
<canvas></canvas>
The easiest to use, but still with relatively poor browser support, and subject to the browser dropping frames: HTMLVideoElement.requestVideoFrameCallback
This method allows us to schedule a callback to whenever a new frame will be painted on the HTMLVideoElement.
It is higher level than WebCodecs, and thus may have more latency, and moreover, with it we can only extract frames at reading speed.
const frames = [];
const button = document.querySelector("button");
const select = document.querySelector("select");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
button.onclick = async(evt) => {
if (HTMLVideoElement.prototype.requestVideoFrameCallback) {
let stopped = false;
const video = await getVideoElement();
const drawingLoop = async(timestamp, frame) => {
const bitmap = await createImageBitmap(video);
const index = frames.length;
frames.push(bitmap);
select.append(new Option("Frame #" + (index + 1), index));
if (!video.ended && !stopped) {
video.requestVideoFrameCallback(drawingLoop);
} else {
select.disabled = false;
}
};
// the last call to rVFC may happen before .ended is set but never resolve
video.onended = (evt) => select.disabled = false;
video.requestVideoFrameCallback(drawingLoop);
button.onclick = (evt) => stopped = true;
button.textContent = "stop";
} else {
console.error("your browser doesn't support this API yet");
}
};
select.onchange = (evt) => {
const frame = frames[select.value];
canvas.width = frame.width;
canvas.height = frame.height;
ctx.drawImage(frame, 0, 0);
};
async function getVideoElement() {
const video = document.createElement("video");
video.crossOrigin = "anonymous";
video.src = "https://upload.wikimedia.org/wikipedia/commons/a/a4/BBH_gravitational_lensing_of_gw150914.webm";
document.body.append(video);
await video.play();
return video;
}
video,canvas {
max-width: 100%
}
<button>start</button>
<select disabled>
</select>
<canvas></canvas>
For your Firefox users, Mozilla's non-standard HTMLMediaElement.seekToNextFrame()
As its name implies, this will make your <video> element seek to the next frame.
Combining this with the seeked event, we can build a loop that will grab every frame of our source, faster than reading speed (yeah!).
But this method is proprietary, available only in Gecko based browsers, not on any standard tracks, and probably gonna be removed in the future when they'll implement the methods exposed above.
But for the time being, it is the best option for Firefox users:
const frames = [];
const button = document.querySelector("button");
const select = document.querySelector("select");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
button.onclick = async(evt) => {
if (HTMLMediaElement.prototype.seekToNextFrame) {
let stopped = false;
const video = await getVideoElement();
const requestNextFrame = (callback) => {
video.addEventListener("seeked", () => callback(video.currentTime), {
once: true
});
video.seekToNextFrame();
};
const drawingLoop = async(timestamp, frame) => {
if(video.ended) {
select.disabled = false;
return; // FF apparently doesn't like to create ImageBitmaps
// from ended videos...
}
const bitmap = await createImageBitmap(video);
const index = frames.length;
frames.push(bitmap);
select.append(new Option("Frame #" + (index + 1), index));
if (!video.ended && !stopped) {
requestNextFrame(drawingLoop);
} else {
select.disabled = false;
}
};
requestNextFrame(drawingLoop);
button.onclick = (evt) => stopped = true;
button.textContent = "stop";
} else {
console.error("your browser doesn't support this API yet");
}
};
select.onchange = (evt) => {
const frame = frames[select.value];
canvas.width = frame.width;
canvas.height = frame.height;
ctx.drawImage(frame, 0, 0);
};
async function getVideoElement() {
const video = document.createElement("video");
video.crossOrigin = "anonymous";
video.src = "https://upload.wikimedia.org/wikipedia/commons/a/a4/BBH_gravitational_lensing_of_gw150914.webm";
document.body.append(video);
await video.play();
return video;
}
video,canvas {
max-width: 100%
}
<button>start</button>
<select disabled>
</select>
<canvas></canvas>
The least reliable, that did stop working over time: HTMLVideoElement.ontimeupdate
The strategy pause - draw - play - wait for timeupdate used to be (in 2015) a quite reliable way to know when a new frame got painted to the element, but since then, browsers have put serious limitations on this event which was firing at great rate and now there isn't much information we can grab from it...
I am not sure I can still advocate for its use, I didn't check how Safari (which is currently the only one without a solution) handles this event (their handling of medias is very weird for me), and there is a good chance that a simple setTimeout(fn, 1000 / 30) loop is actually more reliable in most of the cases.
Here's a working function that was tweaked from this question:
async function extractFramesFromVideo(videoUrl, fps = 25) {
return new Promise(async (resolve) => {
// fully download it first (no buffering):
let videoBlob = await fetch(videoUrl).then((r) => r.blob());
let videoObjectUrl = URL.createObjectURL(videoBlob);
let video = document.createElement("video");
let seekResolve;
video.addEventListener("seeked", async function () {
if (seekResolve) seekResolve();
});
video.src = videoObjectUrl;
// workaround chromium metadata bug (https://stackoverflow.com/q/38062864/993683)
while (
(video.duration === Infinity || isNaN(video.duration)) &&
video.readyState < 2
) {
await new Promise((r) => setTimeout(r, 1000));
video.currentTime = 10000000 * Math.random();
}
let duration = video.duration;
let canvas = document.createElement("canvas");
let context = canvas.getContext("2d");
let [w, h] = [video.videoWidth, video.videoHeight];
canvas.width = w;
canvas.height = h;
let frames = [];
let interval = 1 / fps;
let currentTime = 0;
while (currentTime < duration) {
video.currentTime = currentTime;
await new Promise((r) => (seekResolve = r));
context.drawImage(video, 0, 0, w, h);
let base64ImageData = canvas.toDataURL();
frames.push(base64ImageData);
currentTime += interval;
}
resolve(frames);
});
}
Usage:
let frames = await extractFramesFromVideo("https://example.com/video.webm");
Note that there's currently no easy way to determine the actual/natural frame rate of a video unless perhaps you use ffmpeg.js, but that's a 10+ megabyte javascript file (since it's an emscripten port of the actual ffmpeg library, which is obviously huge).
2023 answer:
If you want to extract all frames reliably (i.e. no "seeking" and missing frames), and do so as fast as possible (i.e. not limited by playback speed or other factors) then you probably want to use the WebCodecs API. As of writing it's supported in Chrome and Edge. Other browsers will soon follow - hopefully by the end of 2023 there will be wide support.
I put together a simple library for this, but it currently only supports mp4 files. Here's an example:
<canvas id="canvasEl"></canvas>
<script type="module">
import getVideoFrames from "https://deno.land/x/get_video_frames#v0.0.8/mod.js"
let ctx = canvasEl.getContext("2d");
// `getVideoFrames` requires a video URL as input.
// If you have a file/blob instead of a videoUrl, turn it into a URL like this:
let videoUrl = URL.createObjectURL(fileOrBlob);
await getVideoFrames({
videoUrl,
onFrame(frame) { // `frame` is a VideoFrame object: https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame
ctx.drawImage(frame, 0, 0, canvasEl.width, canvasEl.height);
frame.close();
},
onConfig(config) {
canvasEl.width = config.codedWidth;
canvasEl.height = config.codedHeight;
},
});
URL.revokeObjectURL(fileOrBlob); // revoke URL to prevent memory leak
</script>
Demo: https://jsbin.com/mugoguxiha/edit?html,output
Github: https://github.com/josephrocca/getVideoFrames.js
(Note that the WebCodecs API is mentioned in #Kaiido's excellent answer, but this API alone unfortunately doesn't solve the issue - the example above uses mp4box.js to handle the stuff that the WebCodecs doesn't handle. Perhaps WebCodecs will eventually support the container side of things and this answer will become mostly irrelevant, but until then I hope that this is useful.)

How to screen record a rolling n-second window

I'm aware of the MediaRecorder API and how to record screen/audio/video, and then download those recordings. I'm also aware of npm modules such as react-media-recorder that leverage that API.
I would like to record a rolling n-second window of screen recording, to allow the user to create clips and then be able to share those clips. I cannot record the entire session as I don't know how long they will last, meaning I don't know how big the recordings might get (I assume there is a limit to what the recording can have in memory.)
Is there any easy way to use MediaRecorder to record a rolling window (i.e. to always have in memory the last 30 seconds recorded)?
I spent quite a while trying to make this work. Unfortunately, the only solution that works for me involves making 30 recorders.
The naive solution to this problem is to call recorder.start(1000) to record data in one second intervals, then maintain a circular buffer on the dataavailable event. The issue with this is that MediaRecorder supports a very, very limited number of encodings. None of these encodings allow data packets to be dropped from the beginning, since they contain important metadata. With better understanding of the protocols, I'm sure that it is to some extent possible to make this strategy work. However, simply concatenating the packets together (when some are missing) does not create a valid file.
Another attempt I made used two MediaRecorder objects at once. One of them would record second-long start packets, and the other would record regular data packets. When taking a clip, this then combined a start packet from the first recorder with the packets from the second. However, this usually resulted in corrupted recordings.
This solution is not fantastic, but it does work: the idea is to keep 30 MediaRecorder objects, each offset by one second. For the sake of this demo, the clips are 5 seconds long, not 30:
<canvas></canvas><button>Clip!</button>
<style>
canvas, video, button {
display: block;
}
</style>
<!-- draw to the canvas to create a stream for testing -->
<script>
const canvas = document.querySelector('canvas');
const ctx = canvas.getContext('2d');
// fill background with white
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, canvas.width, canvas.height);
// randomly draw stuff
setInterval(() => {
const x = Math.floor(Math.random() * canvas.width);
const y = Math.floor(Math.random() * canvas.height);
const radius = Math.floor(Math.random() * 30);
ctx.beginPath();
ctx.arc(x, y, radius, 0, Math.PI * 2);
ctx.stroke();
}, 100);
</script>
<!-- actual recording -->
<script>
// five second clips
const LENGTH = 5;
const codec = 'video/webm;codecs=vp8,opus'
const stream = canvas.captureStream();
// circular buffer of recorders
let head = 0;
const recorders = new Array(LENGTH)
.fill()
.map(() => new MediaRecorder(stream, { mimeType: codec }));
// start them all
recorders.forEach((recorder) => recorder.start());
let data = undefined;
recorders.forEach((r) => r.addEventListener('dataavailable', (e) => {
data = e.data;
}));
setInterval(() => {
recorders[head].stop();
recorders[head].start();
head = (head + 1) % LENGTH;
}, 1000);
// download the data
const download = () => {
if (data === undefined) return;
const url = URL.createObjectURL(data);
// download the url
const a = document.createElement('a');
a.download = 'test.webm';
a.href = url;
a.click();
URL.revokeObjectURL(url);
};
// stackoverflow doesn't allow downloads
// we show the clip instead
const show = () => {
if (data === undefined) return;
const url = URL.createObjectURL(data);
// display url in new video element
const v = document.createElement('video');
v.src = url;
v.controls = true;
document.body.appendChild(v);
};
document.querySelector('button').addEventListener('click', show);
</script>

RTCDataChannel's "bufferedamountlow" event not firing in Safari?

I'm working on a project that utilizes WebRTC for file transfers, recently someone reported an issue saying that transfers end prematurely for bigger files. I've found the problem, and my solution to that problem was to rely on the bufferedamountlow event to coordinate the sending of chunks. I've also stopped closing the connection when the sender thinks it's complete.
For some reason, though, in Safari that event does not fire.
Here is the relevant code:
const connection = new RTCPeerConnection(rtcConfiguration);
const channel = connection.createDataChannel('sendDataChannel');
channel.binaryType = 'arraybuffer';
channel.addEventListener('open', () => {
const fileReader = new FileReader();
let offset = 0;
const nextSlice = (currentOffset: number) => {
// Do asynchronous thing with FileReader, that will result in
// channel.send(buffer) getting called.
// Also, offset gets increased by 16384 (the size of the buffer).
};
channel.bufferedAmountLowThreshold = 0;
channel.addEventListener('bufferedamountlow', () => nextSlice(offset));
nextSlice(0);
});
The longer version of my code is available here.
While researching the issue, I've realized that on Safari, my connection.stcp is undefined. (Since I've switched to connection.sctp.maxMessageSize instead of 16384 for my buffer size.) I would assume the problem is related to that.
What could be the cause for this problem? Let me add that on Chrome and Firefox everything works just fine without any issues whatsoever.
The bufferedamountlow event is not required for the proper function of my code, I would like for it to work, though, to get more precise estimates of current progress and speed on the sending end of the file transfer.
After some investigation, it comes to me that Safari has issues with 0 as a value for the bufferedAmountLowThreshold property.
When set to a non-zero value, the code functions properly.
Checking the bufferedAmount inside of the nextSlice function also increases the speed at which the chunks are being sent:
const bufferSize = connection.sctp?.maxMessageSize || 65535;
channel.addEventListener('open', () => {
const fileReader = new FileReader();
let offset = 0;
const nextSlice = (currentOffset: number) => {
const slice = file.slice(offset, currentOffset + bufferSize);
fileReader.readAsArrayBuffer(slice);
};
fileReader.addEventListener('load', e => {
const buffer = e.target.result as ArrayBuffer;
try {
channel.send(buffer);
} catch {
// Deal with failure...
}
offset += buffer.byteLength;
if (channel.bufferedAmount < bufferSize / 2) {
nextSlice(offset);
}
});
channel.bufferedAmountLowThreshold = bufferSize / 2;
channel.addEventListener('bufferedamountlow', () => nextSlice(offset));
nextSlice(0);
});

JS AudioContext Interface: Am I doing this right?

I have the following function:
var PE_AudioManager_playSe = AudioManager.playSe;
AudioManager.playSe = function(se) {
if (se.name.substring(0,5) === `data:`) {
let audioContext = new (window.AudioContext || window.webkitAudioContext)();
let gainNode = audioContext.createGain();
gainNode.gain.value = (se.volume / 100) || 0;
let panNode = audioContext.createStereoPanner();
panNode.pan.value = (se.pan / 100) || 0;
let source = audioContext.createBufferSource();
audioContext.decodeAudioData(se.name.split(`,`)[1].base64ToArrayBuffer(), function(buffer) {
source.buffer = buffer;
source.connect(gainNode);
source.connect(panNode);
source.connect(audioContext.destination);
source.detune.value = (se.pitch - 100);
source.start(0);
});
} else {
PE_AudioManager_playSe.call(this,se);
};
};
It is an alias for an existing function, that handles the playing of audio sound effects. This alias "intercepts" the routine and uses the AudioContext interface to play the sound if the source object's .name property is a data URI / base64 rather than a filename.
The sound effect plays without problem, except I don't think I am doing the panning (.createStereoPanner) or volume (.createGain) correctly- I don't think I hear a difference if I adjust the pan or volume. But I could be wrong / crazy.
Does this code look correct? Can anybody point me in the right direction? Thank you in advance.
The Gain- and PannerNodes have min and max values. Control your input so that those ranges are honored. But the problem lies elsewhere.
const ctx = new AudioContext();
const gainNode = ctx.createGain();
const panNode = ctx.createStereoPanner();
console.log(gainNode.gain.minValue, gainNode.gain.maxValue);
console.log(panNode.pan.minValue, panNode.pan.maxValue);
The connection of the nodes is critical. What helps for me is to look at it like it is a guitar (or any other electrical instrument) with wires that have to be connected. One wire goes from the guitar to the gain pedal, that wire goes to the pan pedal and that wire goes to the amp to output the signal.
Same goes for your nodes. Connect the source (guitar) to the gainNode (gain pedal) then the gainNode to the panNode (pan pedal) and the panNode to the audioContext.destination (the amp).
audioContext.decodeAudioData(se.name.split(`,`)[1].base64ToArrayBuffer(), function(buffer) {
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(panNode);
panNode.connect(audioContext.destination);
source.detune.value = (se.pitch - 100);
source.start(0);
});
Really try to visualize it like that. Maybe even draw it on paper if you will make it more complex.
Multiple nodes can be connected to a single destination. Like having multiple sources which flow through the same effects to the destination. You can even make a switchboard out of this by connecting and disconnecting your nodes to and from different destinations, depending on what you need.
Hope this helps. If you have any question or I have been unclear, please let me know.

Optimize audio for iOS Web App

I'm currently developing and testing a game for iOS using Javascript with the Cordova framework. I'm attempting to add sound effects when certain nodes are touched. Since nodes can be touched repeatedly at any rate. I'm using...
var snd = new Audio("audio/note_"+currentChain.length+".mp3");
snd.play();
Which works for what I need but when I enable these effects I find that the game lags. I'm working with mp3 files that have been shrunken down to about 16kb in size and even still the lag is substantial.
What's is the best way to optimize sound in my situation? Am I limited on quality because the application is not native?
Thanks!
It would be the best option to preload them and have them ready when needed. I just wrote up a quick self-contained closure that I think will show you most of what you'd like to know how to do.
var playSound = (function () {
var sounds = 15, // idk how many mp3 files you have
loaded = new Array(sounds),
current = -1,
chain = [],
audio,
i;
function incrementLoaded(index) {
loaded[index] = true;
}
// preloads the sound data
for(i = 0; i < sounds; i++) {
audio = new Audio();
audio.addEventListener('canplay', incrementLoaded.bind(audio, i));
audio.src = 'audio/note_' + i + '.mp3';
chain.push(audio);
}
// this will play audio only when ready and in sequential order automatically
// or "which" index, if supplied
return function (which) {
if(typeof which === 'number') {
current = which;
} else {
current = (current + 1) % sounds;
}
// only play if loaded
if(loaded[current]) {
chain[current].pause();
chain[current].currentTime = 0;
chain[current].play();
}
};
}());
// would play sounds in order every second
setInterval(function () {
playSound();
}, 1000);
If you are using multiple files, I'd suggest you to change that to a single file, using the idea of sound sprites. This link has more tdetails about it: http://www.ibm.com/developerworks/library/wa-ioshtml5/
From my own experience, try increasing the file bitrate if you are not getting the sound to play exactly where you want it to, ref: http://pupunzi.open-lab.com/2013/03/13/making-html5-audio-actually-work-on-mobile/

Categories