I would like my audio2 file to play when audio1.currentTime is 3 seconds, but I'm not able to make it work. I'm a newbie in javascript, what am I missing?. This is my current javascript code:
function initAudioPlayer(){
var audio1, audio2, ext, agent;
ext = ".mp3";
agent = navigator.userAgent.toLocaleLowerCase();
if(agent.indexOf('firefox') != -1 || agent.indexOf('opera') != -1) { ext = ".ogg";}
//Audio Objects: audio1 and audio2
audio1 = new Audio();
audio1.src = "folder/Audio1"+ext;
audio1.loop = false;
audio1.play();
audio2 = new Audio();
audio2.src = "folder/Audio2"+ext;
audio2.loop = false;
audio2.play();
//Function that reproduces the second audio file at second 3 of the first audio file
function audio2(){
if(audio1.currentTime == 3) {audio2.play();}
};
}
window.addEventListener("load", initAudioPlayer);
You Must use Audio Api and fetch buffers of your files.
Then you must plus each byte and copy to another new buffer.
this code can help you:
let idnex=0;
samples.forEach(buufer => {
if (index === 0) {
tempBuf = buufer;
} else {
tempBuf = this.appendBuffer(tempBuf, buufer);
}
index++;
});
and by thi method you can append two buffer:
private appendBuffer(buffer1, buffer2) {
const numberOfChannels = Math.min(buffer1.numberOfChannels, buffer2.numberOfChannels);
const tmp = this.audioContextService.createBuffer(Math.max(buffer1.numberOfChannels, buffer2.numberOfChannels),
Math.max(buffer1.length, buffer2.length), buffer1.sampleRate);
for (let i = 0; i < numberOfChannels; i++) {
const channel = tmp.getChannelData(i);
let finallArray = [];
let d = [];
const chanelTemp = buffer1.getChannelData(i);
if (buffer2.numberOfChannels <= i) {
finallArray = chanelTemp;
} else {
const c = buffer2.getChannelData(i);
if (chanelTemp.length > c.length) {
finallArray = chanelTemp;
d = c;
} else {
finallArray = c;
d = chanelTemp;
}
for (let j = 0; j < d.length; j++) {
finallArray[j] += d[j] / 2;
}
}
channel.set(finallArray, i);
}
you can see my demo here
Also You Can See this Answer
If you truly want this to be accurate in time, you can't use Audio() - that's the HTML5 <audio> element, which is not sample-accurate. Javascript is also not accurate enough in event delivery to use a callback to do this (to be fair, neither are most general-purpose native OS APIs). You need to schedule the playback in advance, which is where Web Audio (https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) comes in. You need to load both samples, decode them into AudioBuffers, and then schedule playing back each of them with an AudioBufferSourceNode.
Related
Quick look
I'm using Sketch.js plugin in this example. I
would like to use my pre-signed urls as well but they don't work.
Expiration time is set long enough (1 day) so there's something wrong
with the JS itself.
I have an S3 bucket where I store some music public protected.
Using the official AWS SDK I can generate urls like:
https://d225******.cloudfront.net/song.m4a?Expires=1493381986&Signature=***&Key-Pair-Id=***
I'm using pre-signed urls over my website without any problem, but in this script won't work:
<script>
var ALPHA, AudioAnalyser, COLORS, MP3_PATH, NUM_BANDS, NUM_PARTICLES, Particle, SCALE, SIZE, SMOOTHING, SPEED, SPIN;
MP3_PATH = 'my_presigned_url';
AudioAnalyser = (function() {
AudioAnalyser.AudioContext = self.AudioContext || self.webkitAudioContext;
AudioAnalyser.enabled = AudioAnalyser.AudioContext != null;
function AudioAnalyser(audio, numBands, smoothing) {
var src;
this.audio = audio != null ? audio : new Audio();
this.numBands = numBands != null ? numBands : 256;
this.smoothing = smoothing != null ? smoothing : 0.3;
if (typeof this.audio === 'string') {
src = this.audio;
this.audio = new Audio();
this.audio.crossOrigin = "anonymous";
this.audio.controls = true;
this.audio.src = src;
}
this.context = new AudioAnalyser.AudioContext();
this.jsNode = this.context.createScriptProcessor(2048, 1, 1);
this.analyser = this.context.createAnalyser();
this.analyser.smoothingTimeConstant = this.smoothing;
this.analyser.fftSize = this.numBands * 2;
this.bands = new Uint8Array(this.analyser.frequencyBinCount);
this.audio.addEventListener('canplay', (function(_this) {
return function() {
_this.source = _this.context.createMediaElementSource(_this.audio);
_this.source.connect(_this.analyser);
_this.analyser.connect(_this.jsNode);
_this.jsNode.connect(_this.context.destination);
_this.source.connect(_this.context.destination);
return _this.jsNode.onaudioprocess = function() {
_this.analyser.getByteFrequencyData(_this.bands);
if (!_this.audio.paused) {
return typeof _this.onUpdate === "function" ? _this.onUpdate(_this.bands) : void 0;
}
};
};
})(this));
}
AudioAnalyser.prototype.start = function() {
return this.audio.play();
};
AudioAnalyser.prototype.stop = function() {
return this.audio.pause();
};
return AudioAnalyser;
})();
Sketch.create({
particles: [],
setup: function() {
var analyser, error, i, intro, j, particle, ref, warning, x, y;
for (i = j = 0, ref = NUM_PARTICLES - 1; j <= ref; i = j += 1) {
x = random(this.width);
y = random(this.height * 2);
particle = new Particle(x, y);
particle.energy = random(particle.band / 256);
this.particles.push(particle);
}
if (AudioAnalyser.enabled) {
try {
analyser = new AudioAnalyser(MP3_PATH, NUM_BANDS, SMOOTHING);
analyser.onUpdate = (function(_this) {
return function(bands) {
var k, len, ref1, results;
ref1 = _this.particles;
results = [];
for (k = 0, len = ref1.length; k < len; k++) {
particle = ref1[k];
results.push(particle.energy = bands[particle.band] / 256);
}
return results;
};
})(this);
analyser.start();
document.getElementById('player-container').appendChild(analyser.audio);
document.getElementsByTagName("audio")[0].setAttribute("id", "dy_wowaudio");
intro = document.getElementById('intro');
intro.style.display = 'none';
} catch (_error) {
error = _error;
}
}
}
});
// generated by coffee-script 1.9.2
</script>
The script works fine (as you can see in the example above) without a pre-signed url, so what can I do to use my pre-signed urls inside AudioAnalyser function?
I've seen html5 video tags make multiple requests. I assume to fetch some meta-data like play length and the first frame of video to use as a thumbnail. You might try playing with the preload attribute to prevent this.
Specifically, if the clip is small, preload="auto" might be everything you need. If the browser has to make follow requests you're gonna have a hard time I think. Here's some relevant info
Another way to go about this that I think may work more reliably is to generate temporary credentials as needed.
See the docs for more on this:
- Requesting temp creds
- Accessing resources with temp creds
Combined with a JS package for signing AWS requests like binoculars/aws-sigv4 or copy someone else who's doing this in-browser.
Share your browser error message if any. It may by problem related to cross origin for S3 bucket data.
I am trying to play one audio after another using Javascript. I get the audio file name from a vector (int) and from some strings I have created.
This is my code but I can't make it works.
var audio = new Audio();
function playAll(){
var folder_name = "synthex/";
var file_name = ".sentence";
var audio_file = ".wav";
for(i=0;i<18;++i)
{
var dir_file = folder_name + voicesClicks[i];
dir_file = dir_file + file_name;
if(voicesClicks[i] == null){
continue;
}
//var value = voicesClicks[i];
dir_file = dir_file + i;
dir_file = dir_file + audio_file;
console.log(dir_file);
audio.src = dir_file;
audio.play();
}
}
One major problem that I find with your code is, audio's src keep changing in each iteration, so even if the audio.src value is a vaild wav file, you code would end up playing the last one...
I had a similar requirement, I did...
function playAll() {
// CHANGE this list to as per your requirement.
var urls = ['http://upload.wikimedia.org/wikipedia/en/f/f9/Beatles_eleanor_rigby.ogg',
'http://upload.wikimedia.org/wikipedia/commons/5/5b/Ludwig_van_Beethoven_-_Symphonie_5_c-moll_-_1._Allegro_con_brio.ogg'
];
var idx = 0;
var aud = new Audio();
aud.src = urls[idx];
aud.addEventListener('ended', function() { // for automatically starting the next.
idx++;
if (idx === urls.length) idx = 0;
aud.src = urls[idx];
aud.play();
});
aud.play();
}
document.addEventListener("DOMContentLoaded", playAll); // you can change this to button.onclick handler.
Edit: you can change the urls to ...
var urls = [];
for(i=0;i<18;++i){
if(!voicesClicks[i]){
continue;
}
urls.push(folder_name + voicesClicks[i] + file_name + i + audio_file);
}
console.log(JSON.stringify(urls)); // for debug
I have a webcam streaming app based on the webcam.fla example by Wowza. The app streams audio and video from Flash to a Wowza server where it's transcoded etc.
We're trying to add a feature that lets the audio source be changed to any other system audio source. So far we successfully create a dropdown containing all the interfaces and handle the callback but, despite starting and stopping the stream with the doConnect() function, the audio source seems to remain the default.
import flash.media.*;
import flash.geom.*;
import flash.net.*;
import flash.media.*;// Should this be duplicated
var parsed:Object = root.loaderInfo.parameters;
var nc:NetConnection = null;
var nsPublish:NetStream = null;
var nsPlay:NetStream = null;
var camera:Camera = null;
var microphone:Microphone = null;
// Testing
var serverName:String = "rtmp://stream-na.example.tv:1935/live";
var movieName:String = "streamName";
var flushVideoBufferTimer:Number = 0;
// Quality settings
var videoBitrate:Number = 200000;
var videoQuality:Number = 80; // Quality %
var videoWidth:Number = 640;
var videoHeight:Number = 360;
var videoFrameRate:Number = 30;
//////////////// UI Functions Bellow
import fl.controls.ComboBox;
import fl.data.DataProvider;
var aCb:ComboBox = new ComboBox();
function createAudioComboBox(sources)
{
var sourcesArray:Array = new Array();
aCb.dropdownWidth = 210;
aCb.width = 200;
aCb.move(0, 365);
aCb.prompt = "Change Audio Source";
aCb.dataProvider = new DataProvider(sourcesArray);
aCb.addEventListener(Event.CHANGE, changeAudioHandler);
addChild(aCb);
for (var index in sources)
{
//ExternalInterface.call("logBrowserStreaming", sources[index]);
aCb.addItem( { label: sources[index], data: index} );
}
function changeAudioHandler(event:Event):void
{
doConnect();
//var request:URLRequest = new URLRequest();
//request.url = ComboBox(event.target).selectedItem.data;
//navigateToURL(request);
//aCb.selectedIndex = -1;
var audioSource = ComboBox(event.target).selectedItem.data;
//microphone:Microphone = null;
microphone = Microphone.getMicrophone(audioSource);
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
//ExternalInterface.call("logBrowserStreaming", audioSource);
// Trigger restart camera...
//startCamera(); // Nope
doConnect();
}
}
//////////////// Core Streaming Functions Bellow
function startCamera()
{
// get the default Flash camera and microphone
camera = Camera.getCamera();
microphone = Microphone.getMicrophone();
// here are all the quality and performance settings
// here are all the quality and performance settings
if (camera != null)
{
//camera.setMode(1280, 720, 30, false);
camera.setMode(videoWidth, videoHeight, videoFrameRate, false); // false gives framerate priority apparently?? http://www.flash-communications.net/technotes/setMode/index.html
camera.setQuality(videoBitrate, videoQuality);
// Max 800kbps;
camera.setKeyFrameInterval(2);
// List audio sources names
// sourceVideoLabel.text += Camera.names;
// Create audio sources dropdown
// Hide video sources for now...
//createVideoComboBox(Camera.names);
}
else
{
sourceVideoLabel.text = "No Camera Found\n";
}
if ( microphone != null)
{
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
// List audio sources names;
// sourceVideoLabel.text += Microphone.names;
// Create audio sources dropdown
createAudioComboBox(Microphone.names);
// Don't show audio slider for now...
// createAudioSlider();
// Don't monitor audio level for now...
//monitorAudioLevel();
}
else
{
sourceVideoLabel.text += "No Microphone Found\n";
}
nameStr.text = movieName;
AppendCheckbox.selected = false;
connect.connectStr.text = serverName;
connect.connectButton.addEventListener(MouseEvent.CLICK, doConnect);
//enablePlayControls(false);
doConnect();
}
function ncOnStatus(infoObject:NetStatusEvent)
{
trace("nc: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetConnection.Connect.Failed")
{
prompt.text = "Connection failed. Try again or email support#chew.tv";
}
else if (infoObject.info.code == "NetConnection.Connect.Rejected")
{
// Hide connect fail...
prompt.text = infoObject.info.description;
}
}
// Ask for permission to use the camera and show the preview to the user
// event:MouseEvent
// doConnect toggles connections on and off.
function doConnect()
{
// connect to the Wowza Media Server
if (nc == null)
{
// create a connection to the wowza media server
nc = new NetConnection();
nc.addEventListener(NetStatusEvent.NET_STATUS, ncOnStatus);
nc.connect(connect.connectStr.text);
//connect.connectButton.label = "Disconnect";
// uncomment this to monitor frame rate and buffer length
//setInterval("updateStreamValues", 500);
// Attach camera to preview
videoCamera.clear();
videoCamera.attachCamera(camera);
//enablePlayControls(true);
// Pass status to
// ExternalInterface.call("logBrowserStreaming", "cameraagreed");
}
else
{
nsPublish = null;
nsPlay = null;
videoCamera.attachNetStream(null);
videoCamera.clear();
videoRemote.attachNetStream(null);
videoRemote.clear();
nc.close();
nc = null;
//enablePlayControls(false);
doSubscribe.label = 'Play';
doPublish.label = 'Stream';
AppendCheckbox.selected = false;
connect.connectButton.label = "Connect";
prompt.text = "";
}
}
// function to monitor the frame rate and buffer length
function updateStreamValues()
{
if (nsPlay != null)
{
fpsText.text = (Math.round(nsPlay.currentFPS*1000)/1000)+" fps";
bufferLenText.text = (Math.round(nsPlay.bufferLength*1000)/1000)+" secs";
}
else
{
fpsText.text = "";
bufferLenText.text = "";
}
}
function nsPlayOnStatus(infoObject:NetStatusEvent)
{
trace("nsPlay: onStatus: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function doCloseRecord()
{
// after we have hit "Stop" recording and after the buffered video data has been
// sent to the Wowza Media Server close the publishing stream
nsPublish.publish("null");
}
// this function gets called every 250 ms to monitor the;
// progress of flushing the video buffer. Once the video
// buffer is empty we close publishing stream
function flushVideoBuffer()
{
var buffLen:Number = nsPublish.bufferLength;
if (buffLen == 0)
{
clearInterval(flushVideoBufferTimer);
flushVideoBufferTimer = 0;
doCloseRecord();
doPublish.label = 'Stream';
}
}
function nsPublicOnStatus(infoObject:NetStatusEvent)
{
trace("nsPublish: "+infoObject.info.code+" ("+infoObject.info.description+")");
// After calling nsPublish.publish(false); we wait for a status;
// event of "NetStream.Unpublish.Success" which tells us all the video
// and audio data has been written to the flv file. It is at this time
// that we can start playing the video we just recorded.
if (infoObject.info.code == "NetStream.Unpublish.Success")
{
//doPlayStart();
}
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function initH264Recording(nsPublish:NetStream)
{
var h264Settings:H264VideoStreamSettings = new H264VideoStreamSettings();
h264Settings.setProfileLevel(H264Profile.BASELINE, H264Level.LEVEL_3);
nsPublish.videoStreamSettings = h264Settings;
}
// Start recording video to the server
function doStreamStart()
{
//prompt.text = "Starting stream with mic...";
//prompt.text = microphone;
ExternalInterface.call("logBrowserStreaming", "starting stream");
// stop video playback
//doPlayStop();
// create a new NetStream object for publishing
nsPublish = new NetStream(nc);
var nsPublishClient:Object = new Object();
nsPublish.client = nsPublishClient;
// Set the H.264 encoding parameters
if (testVersion(11,0,0,0))
{
initH264Recording(nsPublish);
}
else
{
prompt.text = "Flash player 11 or greater is required for H.264 encoding (" + Capabilities.version + ").";
}// trace the NetStream status information
nsPublish.addEventListener(NetStatusEvent.NET_STATUS, nsPublicOnStatus);
// publish the stream by name;
nsPublish.publish(nameStr.text, (AppendCheckbox.selected?"append":"record"));
// add custom metadata to the header of the .flv file;
var metaData:Object = new Object();
metaData["description"] = "Recorded using WebcamRecording example.";
nsPublish.send("#setDataFrame", "onMetaData", metaData);
// attach the camera and microphone to the server;
nsPublish.attachCamera(camera);
nsPublish.attachAudio(microphone);
ExternalInterface.call("logBrowserStreaming", microphone);
// set the buffer time to 20 seconds to buffer 20 seconds of video;
// data for better performance and higher quality video
nsPublish.bufferTime = 20;
// Disable the audio choice dropdown
aCb.enabled = false;
}
function doStreamStop()
{
ExternalInterface.call("logBrowserStreaming", "stopping stream");
// stop streaming video and audio to the publishing
// NetStream object
nsPublish.attachAudio(null);
nsPublish.attachCamera(null);
// After stopping the publishing we need to check if there is;
// video content in the NetStream buffer. If there is data
// we are going to monitor the video upload progress by calling
// flushVideoBuffer every 250ms. If the buffer length is 0
// we close the recording immediately.
var buffLen:Number = nsPublish.bufferLength;
if (buffLen > 0)
{
flushVideoBufferTimer = setInterval(flushVideoBuffer,250);
doPublish.label = 'Wait...';
}
else
{
trace("nsPublish.publish(null)");
doCloseRecord();
doPublish.label = 'Start';
}
// Disable the audio choice dropdown
aCb.enabled = true;
}
// Test version function checks if the current flash version supports H.264 Encoding.
function testVersion(v0:Number, v1:Number, v2:Number, v3:Number):Boolean
{
var version:String = Capabilities.version;
var index:Number = version.indexOf(" ");
version = version.substr(index+1);
var verParts:Array = version.split(",");
var i:Number;
var ret:Boolean = true;
while (true)
{
if (Number(verParts[0]) < v0)
{
ret = false;
break;
}
else if (Number(verParts[0]) > v0)
{
break;
}
if (Number(verParts[1]) < v1)
{
ret = false;
break;
}
else if (Number(verParts[1]) > v1)
{
break;
}
if (Number(verParts[2]) < v2)
{
ret = false;
break;
}
else if (Number(verParts[2]) > v2)
{
break;
}
if (Number(verParts[3]) < v3)
{
ret = false;
break;
}
break;
}
trace("testVersion: "+Capabilities.version+">="+v0+","+v1+","+v2+","+v3+": "+ret);
return ret;
}
// External trigger from Javascript;
// Allow stream to start with startBrowserStreaming call from js
ExternalInterface.addCallback("startBrowserStreaming", doStreamStart);
// Allow stream to stop with stopBrowserStreaming call from js;
ExternalInterface.addCallback("stopBrowserStreaming", doStreamStop);
stage.align = "TL";
stage.scaleMode = "noScale";
startCamera();
You can switch your audio source without touching the NetConnection and/or the NetStream.
Take this simple example, where I used a button to change my audio source :
const server:String = 'rtmp://localhost/live';
const stream:String = 'live';
var nc:NetConnection;
var ns_publish:NetStream;
nc = new NetConnection();
nc.addEventListener(
NetStatusEvent.NET_STATUS,
function(e:NetStatusEvent):void {
if(e.info.code == 'NetConnection.Connect.Success'){
publish();
}
}
)
nc.addEventListener(AsyncErrorEvent.ASYNC_ERROR, function(e:AsyncErrorEvent):void {})
nc.connect(server);
function publish():void {
var cam:Camera = Camera.getCamera();
// for my case, I have 2 mic, and I start with the first
var mic:Microphone = Microphone.getMicrophone(0);
ns_publish = new NetStream(nc);
ns_publish.attachAudio(mic);
ns_publish.attachCamera(cam);
ns_publish.publish(stream, 'record');
}
btn_switch_mic.addEventListener(MouseEvent.CLICK, function(e){
// I can switch to the second mic without initialize my NetConnection and/or my NetStream
var mic:Microphone = Microphone.getMicrophone(1);
ns_publish.attachAudio(mic);
})
I tested this code with Wowza Streaming Engine 4.1.1 (free version without Wowza Transcoder AddOn of course) and Flash Media Server 4.5, and It's working fine.
Note : We can use the same manner to change video source (Camera).
Hope all that can help you.
Is it possible to achieve low latency audio playback using HTML5? I'm currently using AudioContext API. However, I am getting latency of about 4 seconds. Which is way to much for my use case.
if (!window.audioContextInstance) {
window.audioContextInstance = new webkitAudioContext();
}
var context = window.audioContextInstance;
context.sampleRate = 48000;
var buffers = [];
var src = new Float32Array();
var srcIdx = 0;
var bufferSize = 2048;
var sourceNode = context.createScriptProcessor(bufferSize, 1, 2);
sourceNode.onaudioprocess = function(evt) {
var c0 = evt.outputBuffer.getChannelData(0);
var c1 = evt.outputBuffer.getChannelData(1);
var sample = 0;
while(sample < bufferSize) {
if (srcIdx >= src.length) {
if (!buffers.length) {
console.log("Warning: Audio Buffer Underflow")
return;
}
src = buffers.shift();
srcIdx = 0;
}
while(sample < bufferSize && srcIdx < src.length) {
c0[sample] = src[srcIdx++];
c1[sample] = src[srcIdx++];
sample++;
}
}
};
scope.$on('frame', function (event, frame) {
while (buffers.length > 1) {
buffers.shift();
}
buffers.push(new Float32Array(frame.data));
if (buffers.length > 0) {
sourceNode.connect(context.destination);
}
});
}
You may be interested in riffwave.js which appears to have much lower than 4s latency.
I have several tracks to a song that I want to play together and be able to mute some and play others. So I need to be able to start them all at the same time. Right now, they all start slightly out of sync:
// Start playing
for ( i = 0; i < 5; i++ ) {
tracks[i].audio.play();
}
Even this is apparently not fast enough to start them all at the same time.
Is there any way in javascript to guarantee that HTML5 audio tags will start playing simultaneously?
Not sure if you're already doing this, but Here's some sample code for preloading audio.
var audios = [];
var loading = 0;
AddNote("2C");
AddNote("2E");
AddNote("2G");
AddNote("3C");
function AddNote(name) {
loading++;
var audio = document.createElement("audio");
audio.loop = true;
audio.addEventListener("canplaythrough", function () {
loading--;
if (loading == 0) // All files are preloaded
StartPlayingAll();
}, false);
audio.src = "piano/" + name + ".mp3";
audios.push(audio);
}
function StartPlayingAll() {
for (var i = 0; i < audios.length; i++)
audios[i].play();
}
}
The other thing you can try is setting audio.currentTime on each of the tracks to manually sync up the audio.
You could use setTimeout to sync them after a brief delay in the beginning (you may want to wait for all the audio objects to load though).
JSFiddle: http://jsfiddle.net/bmAYb/35/
var au1 = document.getElementById('au1');
var au2 = document.getElementById('au2');
au1.volume = 1;
au2.volume = 0; //mute
au1.play();
au2.play();
var notfirstRun = false;
//sync for the first time
setTimeout(function() {
au2.currentTime = au1.currentTime;
au2.volume = 1;
}, 250);
My initial thought was to sync every x miliseconds using setInterval, but the audio pops when you do that if volume is set to 1 (audible).
My fiddle isn't totally in sync, but it's pretty close. You can get it 100% in sync but you either need to mute the audio on the other tracks or deal with popping.
The code (and music in the fiddle) are from Hungry Media.
I had the same issue and found a solution using the Audio API.
The problem is that the audio output has a delay of a few milliseconds, so it is impossible to start multiple audios at the same time. However, you can get around this by merging the audio sources into one using a ChannelMergerNode. By putting GainNodes in between, you can control the volume of each audio source separately.
I wrote a simple javascript class for this. This is how you can use it:
var audioMerger = new AudioMerger(["file1.ogg", "file2.mp3", "file3.mp3",
"file4.ogg", "file5.mp3"]);
audioMerger.onBuffered(() => audioMerger.play());
// Make sure it's always in sync (delay should be less than 50 ms)
setInterval(() => {
if (audioMerger.getDelay() >= 0.05) {
audioMerger.setTime(audioMerger.getTime());
}
}, 200);
// Set volume of 3rd audio to 50%
audioMerger.setVolume(0.5, 2);
// When you want to turn it off:
audioMerger.pause();
This code reduced the delay between the audios to less than 10 milliseconds in Firefox on my PC. This delay is so small you won't notice it. Unfortunately, it doesn't work in older browsers like Internet Explorer.
And here's the code for the class:
class AudioMerger {
constructor(files) {
this.files = files;
this.audios = files.map(file => new Audio(file));
var AudioContext = window.AudioContext || window.webkitAudioContext;
var ctx = new AudioContext();
this.merger = ctx.createChannelMerger(this.audios.length);
this.merger.connect(ctx.destination);
this.gains = this.audios.map(audio => {
var gain = ctx.createGain();
var source = ctx.createMediaElementSource(audio);
source.connect(gain);
gain.connect(this.merger);
return gain;
});
this.buffered = false;
var load = files.length;
this.audios.forEach(audio => {
audio.addEventListener("canplaythrough", () => {
load--;
if (load === 0) {
this.buffered = true;
if (this.bufferCallback != null) this.bufferCallback();
}
});
});
}
onBuffered(callback) {
if (this.buffered) callback();
else this.bufferCallback = callback;
}
play() {
this.audios.forEach(audio => audio.play());
}
pause() {
this.audios.forEach(audio => audio.pause());
}
getTime() {
return this.audios[0].currentTime;
}
setTime(time) {
this.audios.forEach(audio => audio.currentTime = time);
}
getDelay() {
var times = [];
for (var i = 0; i < this.audios.length; i++) {
times.push(this.audios[i].currentTime);
}
var minTime = Math.min.apply(Math, times);
var maxTime = Math.max.apply(Math, times);
return maxTime - minTime;
}
setVolume(volume, audioID) {
this.gains[audioID].gain.value = volume;
}
}