I'm new to programming and I'm messing around with the Web Audio API. Right now, I have three samples (kick, clap, hihat) that make up a simple drumkit beat when a Play button is pressed.
I want to be able to illustrate this visually on the front end as a sequencer that plays through this drumkit. For instance, every time the "kick.wav" sample is played, I want to change the color of a div that is associated with it.
My questions are:
How do I associate every time a kick, clap or hihat are played with a div in the html?
How can I add this association to sequence through when the play button is hit?
HTML:
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<button id="play">play</button>
<script src="javascript/tween.js"></script>
<script src="javascript/shared.js"></script>
<script src="javascript/seq.js"></script>
</body>
Javascript:
function playSound(buffer, time) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(time);
}
function loadSounds(obj, soundMap, callback) {
// Array-ify
var names = [];
var paths = [];
for (var name in soundMap) {
var path = soundMap[name];
names.push(name);
paths.push(path);
}
bufferLoader = new BufferLoader(context, paths, function(bufferList) {
for (var i = 0; i < bufferList.length; i++) {
var buffer = bufferList[i];
var name = names[i];
obj[name] = buffer;
}
if (callback) {
callback();
}
});
bufferLoader.load();
}
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = new Array();
this.loadCount = 0;
}
BufferLoader.prototype.loadBuffer = function(url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
request.onload = function() {
// Asynchronously decode the audio file data in request.response
loader.context.decodeAudioData(
request.response,
function(buffer) {
if (!buffer) {
alert('error decoding file data: ' + url);
return;
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function(error) {
console.error('decodeAudioData error', error);
}
);
}
request.onerror = function() {
alert('BufferLoader: XHR error');
}
request.send();
};
BufferLoader.prototype.load = function() {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
var RhythmSample = function() {
loadSounds(this, {
kick: 'sounds/kick.wav',
claps: 'sounds/claps.wav',
hihat: 'sounds/hihat.wav'
});
};
RhythmSample.prototype.play = function() {
// We'll start playing the rhythm 100 milliseconds from "now"
var startTime = context.currentTime + 0.100;
var tempo = 120; // BPM (beats per minute)
var eighthNoteTime = (60 / tempo) / 2;
var allDivs = document.getElementsByName('colorchangingdivs[]');
// Play 2 bars of the following:
for (var bar = 0; bar < 2; bar++) {
var time = startTime + bar * 8 * eighthNoteTime;
// Play the bass (kick) drum on beats 1, 5
playSound(this.kick, time);
playSound(this.kick, time + 4 * eighthNoteTime);
console.log("4")
// Play the snare drum on beats 3, 7
playSound(this.claps, time + 2 * eighthNoteTime);
playSound(this.claps, time + 6 * eighthNoteTime);
// Play the hi-hat every eighthh note.
for (var i = 0; i < 8; ++i) {
playSound(this.hihat, time + i * eighthNoteTime);
}
}
};
var sample = new RhythmSample();
document.querySelector('#play').addEventListener('click', function() {
sample.play();
});
THANKS SO MUCH!
Add another parameter to playSound() that matches the div id of the one you want to color. And logic to change the color when playing the sound, selecting the div by the id you've passed in.
function playSound(buffer, time, colorID) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(time);
document.getElementById(colorID).backgroundColor = blue; //or hex color/rgb value
}
Then you just have to add the right parameter when you call playSound.
playSound(this.kick, time + 4 * eighthNoteTime,"your-kick-div-id");
If you need different colors for different sounds/divs, then just add an if/elseif statement in the background color setting.
Related
I have this record.js script to toggle a recording that is currently working as expected.
function Record_Current(config) {
config = config || {};
var self = this;
var audioInput;
var audioNode;
var bufferSize = config.bufferSize || 4096;
var recordedData = [];
var recording = false;
var recordingLength = 0;
var startDate;
var audioCtx;
this.toggleRecording = function() {
if (recording) {
self.stop();
} else {
self.start();
}
};
this.start = function() {
// reset any previous data
recordedData = [];
recordingLength = 0;
// webkit audio context shim
audioCtx = new(window.AudioContext || window.webkitAudioContext)();
if (audioCtx.createJavaScriptNode) {
audioNode = audioCtx.createJavaScriptNode(bufferSize, 1, 1);
} else if (audioCtx.createScriptProcessor) {
audioNode = audioCtx.createScriptProcessor(bufferSize, 1, 1);
} else {
throw 'WebAudio not supported!';
}
audioNode.connect(audioCtx.destination);
navigator.mediaDevices.getUserMedia({ audio: true })
.then(onMicrophoneCaptured)
.catch(onMicrophoneError);
};
this.stop = function() {
stopRecording(function(blob) {
self.blob = blob;
config.onRecordingStop && config.onRecordingStop(blob);
});
};
this.upload = function(url, params, callback) {
var formData = new FormData();
formData.append("audio", self.blob, config.filename || 'recording.wav');
for (var i in params)
formData.append(i, params[i]);
var request = new XMLHttpRequest();
request.upload.addEventListener("progress", function(e) {
callback('progress', e, request);
});
request.upload.addEventListener("load", function(e) {
callback('load', e, request);
});
request.onreadystatechange = function(e) {
var status = 'loading';
if (request.readyState === 4) {
status = request.status === 200 ? 'done' : 'error';
}
callback(status, e, request);
};
request.open("POST", url);
request.send(formData);
};
function stopRecording(callback) {
// stop recording
recording = false;
// to make sure onaudioprocess stops firing
window.localStream.getTracks().forEach((track) => { track.stop(); });
audioInput.disconnect();
audioNode.disconnect();
exportWav({
sampleRate: sampleRate,
recordingLength: recordingLength,
data: recordedData
}, function(buffer, view) {
self.blob = new Blob([view], { type: 'audio/wav' });
callback && callback(self.blob);
});
}
function onMicrophoneCaptured(microphone) {
if (config.visualizer)
visualize(microphone);
// save the stream so we can disconnect it when we're done
window.localStream = microphone;
audioInput = audioCtx.createMediaStreamSource(microphone);
audioInput.connect(audioNode);
audioNode.onaudioprocess = onAudioProcess;
recording = true;
self.startDate = new Date();
config.onRecordingStart && config.onRecordingStart();
sampleRate = audioCtx.sampleRate;
}
function onMicrophoneError(e) {
console.log(e);
alert('Unable to access the microphone.');
}
function onAudioProcess(e) {
if (!recording) {
return;
}
recordedData.push(new Float32Array(e.inputBuffer.getChannelData(0)));
recordingLength += bufferSize;
self.recordingLength = recordingLength;
self.duration = new Date().getTime() - self.startDate.getTime();
config.onRecording && config.onRecording(self.duration);
}
function visualize(stream) {
var canvas = config.visualizer.element;
if (!canvas)
return;
var canvasCtx = canvas.getContext("2d");
var source = audioCtx.createMediaStreamSource(stream);
var analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
var bufferLength = analyser.frequencyBinCount;
var dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
function draw() {
// get the canvas dimensions
var width = canvas.width,
height = canvas.height;
// ask the browser to schedule a redraw before the next repaint
requestAnimationFrame(draw);
// clear the canvas
canvasCtx.fillStyle = config.visualizer.backcolor || '#fff';
canvasCtx.fillRect(0, 0, width, height);
if (!recording)
return;
canvasCtx.lineWidth = config.visualizer.linewidth || 2;
canvasCtx.strokeStyle = config.visualizer.forecolor || '#f00';
canvasCtx.beginPath();
var sliceWidth = width * 1.0 / bufferLength;
var x = 0;
analyser.getByteTimeDomainData(dataArray);
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = v * height / 2;
i === 0 ? canvasCtx.moveTo(x, y) : canvasCtx.lineTo(x, y);
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
}
draw();
}
function exportWav(config, callback) {
function inlineWebWorker(config, cb) {
var data = config.data.slice(0);
var sampleRate = config.sampleRate;
data = joinBuffers(data, config.recordingLength);
console.log(data);
function joinBuffers(channelBuffer, count) {
var result = new Float64Array(count);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
var dataLength = data.length;
// create wav file
var buffer = new ArrayBuffer(44 + dataLength * 2);
var view = new DataView(buffer);
writeUTFBytes(view, 0, 'RIFF'); // RIFF chunk descriptor/identifier
view.setUint32(4, 44 + dataLength * 2, true); // RIFF chunk length
writeUTFBytes(view, 8, 'WAVE'); // RIFF type
writeUTFBytes(view, 12, 'fmt '); // format chunk identifier, FMT sub-chunk
view.setUint32(16, 16, true); // format chunk length
view.setUint16(20, 1, true); // sample format (raw)
view.setUint16(22, 1, true); // mono (1 channel)
view.setUint32(24, sampleRate, true); // sample rate
view.setUint32(28, sampleRate * 2, true); // byte rate (sample rate * block align)
view.setUint16(32, 2, true); // block align (channel count * bytes per sample)
view.setUint16(34, 16, true); // bits per sample
writeUTFBytes(view, 36, 'data'); // data sub-chunk identifier
view.setUint32(40, dataLength * 2, true); // data chunk length
// write the PCM samples
var index = 44;
for (var i = 0; i < dataLength; i++) {
view.setInt16(index, data[i] * 0x7FFF, true);
index += 2;
}
if (cb) {
return cb({
buffer: buffer,
view: view
});
}
postMessage({
buffer: buffer,
view: view
});
}
var webWorker = processInWebWorker(inlineWebWorker);
webWorker.onmessage = function(event) {
callback(event.data.buffer, event.data.view);
// release memory
URL.revokeObjectURL(webWorker.workerURL);
};
webWorker.postMessage(config);
}
function processInWebWorker(_function) {
var workerURL = URL.createObjectURL(new Blob([_function.toString(),
';this.onmessage = function (e) {' + _function.name + '(e.data);}'
], {
type: 'application/javascript'
}));
var worker = new Worker(workerURL);
worker.workerURL = workerURL;
console.log(worker);
return worker;
}
function renderRecording(workerURL, list) {
const worker_url = URL.createObjectURL(workerURL);
const li = document.createElement('li');
const audio = document.createElement('audio');
const anchor = document.createElement('a');
anchor.setAttribute('href', workerURL);
const now = new Date();
anchor.setAttribute(
'download',
`recording-${now.getFullYear()}-${(now.getMonth() + 1).toString().padStart(2, '0')}-${now.getDay().toString().padStart(2, '0')}--${now.getHours().toString().padStart(2, '0')}-${now.getMinutes().toString().padStart(2, '0')}-${now.getSeconds().toString().padStart(2, '0')}.webm`
);
anchor.innerText = 'Download';
audio.setAttribute('src', worker_url);
audio.setAttribute('controls', 'controls');
li.appendChild(audio);
li.appendChild(anchor);
list.appendChild(li);
}
}
and this Google Drive Script
var driveLink = require('stream');
module.exports.uploadFile = function(req) {
var file;
console.log("driveApi upload reached")
function blobToFile(req) {
file = req.body.blob
file.lastModifiedDate = new Date();
file.name = req.body.word;
return file;
}
var bufStream = new stream.PassThrough();
bufStream.end(file);
console.log(typeof 42);
var folderId = "Folder"; // Enter Folder Name
var fileMetadata = {
"name": req.body.word,
parents: [folderId]
}
var media = {
mimeType: "audio/mp3",
body: bufStream
}
drive.files.create({
auth: jwToken,
resource: fileMetadata,
media: media,
fields: "id"
}, function(err, file) {
if (err) {
console.error(err);
} else {
console.log("File Id: ", file.id);
}
console.log("driveApi upload accomplished")
});
}
I have tried a few different approaches to combining the two to make it so it automatically saves the .wav file to the Drive API but it does not work.
I believe I am either not merging the two scripts together the right way or I am missing something. Do I need to use V3 of the Google API?
If anyone could provide some guidance on how to merge the two properly that would be greatly appreciated. Thank you!
The ScriptProcessorNode doesn't work with OfflineContext.
It works in Chrome, Mozilla Firefox.
It doesn't work in Edge 25, Safari 10.
The issue is that the event is called once when the context OfflineContextis is processed.
Example on jsfiddle without BufferSource.
Example on jsfiddle based on MDN example with BufferSource.
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var current = 0;
var buffer_size = 4096;
var buffer_length = buffer_size * 10;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var scriptNode = audioCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNode.onaudioprocess = whiteNoise;
function whiteNoise(audioProcessingEvent) {
console.log('onaudioprocess', current);
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channel
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var outputData = outputBuffer.getChannelData(channel);
for (var sample = 0; sample < buffer_size; sample++) {
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1);
}
}
current += buffer_size;
if (current > buffer_length)
scriptNode.disconnect();
}
playButton.onclick = function() {
current = 0;
scriptNode.connect(audioCtx.destination);
}
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(1, buffer_length, 48000);
var scriptNodeOffline = offlineCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNodeOffline.onaudioprocess = whiteNoise;
current = 0;
offlineCtx.oncomplete = function(e) {
console.log('rendered buffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
}
scriptNodeOffline.connect(offlineCtx.destination);
offlineCtx.startRendering();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
Update
Clicking on Render offline many times in Chrome and Firefox produced the same output.
Clicking on Render offline many times in Safari and Edge produced the different output.
Example on jsfiddle.
// Create AudioContext and buffer source
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var myBuffer = null;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var source = audioCtx.createBufferSource();
// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
// load in an audio track via XHR and decodeAudioData
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'https://s3-ap-northeast-1.amazonaws.com/storage.cowrite.decodeapps.io/Materials/Media/Audio/59f2b85dd3aed-20171027-043853.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
},
function(e) {
"Error with decoding audio data" + e.err
});
}
request.send();
}
function addNoise(audioProcessingEvent) {
console.log("onaudioprocess")
// The input buffer is the song we loaded earlier
var inputBuffer = audioProcessingEvent.inputBuffer;
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channels (in this case there is only one)
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// Loop through the 4096 samples
for (var sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
}
}
}
// Give the node a function to process audio events
scriptNode.onaudioprocess = addNoise;
getData();
// wire up play button
playButton.onclick = function() {
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);
source.start();
}
// When the buffer source stops playing, disconnect everything
source.onended = function() {
source.disconnect(scriptNode);
scriptNode.disconnect(audioCtx.destination);
}
// When the buffer source stops playing, disconnect everything
// wire up play button
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(2, myBuffer.length, myBuffer.sampleRate);
var scriptNodeOffline = offlineCtx.createScriptProcessor(4096, 1, 1);
var sourceOffline = offlineCtx.createBufferSource();
sourceOffline.buffer = myBuffer;
sourceOffline.onended = function() {
console.log('sourceOffline.onended');
sourceOffline.disconnect(scriptNodeOffline);
scriptNodeOffline.disconnect(offlineCtx.destination);
}
scriptNodeOffline.onaudioprocess = addNoise;
sourceOffline.connect(scriptNodeOffline);
scriptNodeOffline.connect(offlineCtx.destination);
sourceOffline.start();
offlineCtx.oncomplete = function(e) {
console.log('renderedBuffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
listenRendered(e.renderedBuffer);
};
offlineCtx.startRendering();
}
var _audioCtx = new(window.AudioContext || window.webkitAudioContext)();
function listenRendered(buffer) {
var _source = _audioCtx.createBufferSource();
_source.buffer = buffer;
_source.connect(_audioCtx.destination);
_source.start();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
These are bugs in Safari and Edge. A ScriptProcessorNode should work fine in an offline context. File bugs with Safari and Edge.
I'm currently implementing an upload for files. Because I've to handle huge files here and there I've started to slice files and send them in 1mb chunks which works great as long as file are <~500MB after that it seems that memory isn't freed anyone randomly and I can't figure out what I'm missing here.
Prepare chunks
var sliceCount = 0;
var sendCount = 0;
var fileID = generateUUID();
var maxChunks = 0;
var userNotified = false;
function parseFile(file)
{
var fileSize = file.size;
var chunkSize = 1024 * 1024;//64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var numberOfChunks = fileSize / chunkSize;
maxChunks = Math.ceil(numberOfChunks);
// gets called if chunk is read into memory
var readEventHandler = function (evt)
{
if (evt.target.error == null) {
offset += evt.target.result.byteLength;
sendChunkAsBinary(evt.target.result);
}
else
{
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function (_offset, length, _file)
{
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
sliceCount++;
console.log("Slicecount: " + sliceCount);
r.onload = readEventHandler;
r.readAsArrayBuffer(blob);
blob = null;
r = null;
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
Send Chunks
function sendChunkAsBinary(chunk)
{
var progressbar = $("#progressbar"), bar = progressbar.find('.uk-progress-bar');
// create XHR instance
var xhr = new XMLHttpRequest();
// send the file through POST
xhr.open("POST", 'upload.php', true);
var progressHandler = function (e)
{
// get percentage of how much of the current file has been sent
var position = e.loaded || e.position;
var total = e.total || e.totalSize;
var percentage = Math.round((sendCount / maxChunks) * 100);
// set bar width to keep track of progress
bar.css("width", percentage + "%").text(percentage + "%");
}
// let's track upload progress
var eventSource = xhr.upload || xhr;
eventSource.addEventListener("progress", progressHandler);
// state change observer - we need to know when and if the file was successfully uploaded
xhr.onreadystatechange = function ()
{
if (xhr.readyState == 4)
{
if (xhr.status == 200)
{
eventSource.removeEventListener("progress", progressHandler);
if (sendCount == maxChunks && !userNotified)
{
userNotified = true;
notifyUserSuccess("Datei hochgeladen!");
setTimeout(function ()
{
progressbar.addClass("uk-invisible");
bar.css("width", "0%").text("0%");
}, 250);
updateDocList();
}
}
else
{
notifyUser("Fehler beim hochladen der Datei!");
}
}
};
var blob;
if (typeof window.Blob == "function") {
blob = new Blob([chunk]);
} else {
var bb = new (window.MozBlobBuilder || window.WebKitBlobBuilder || window.BlobBuilder)();
bb.append(chunk);
blob = bb.getBlob();
}
sendCount++;
var formData = new FormData();
formData.append("chunkNumber", sendCount);
formData.append("maxChunks", maxChunks);
formData.append("fileID", fileID);
formData.append("chunkpart", blob);
xhr.send(formData);
progressbar.removeClass("uk-invisible");
console.log("Sendcount: " + sendCount);
}
If I attach to the debugger within Visual Studio 2015 it take a bit but soon I get an OutOfMemoryException in the send function at exactly this line: blob = new Blob([chunk]);. It's all the time the same line where the exception occures.
As soon as the Exception happens I get POST [...]/upload.php net::ERR_FILE_NOT_FOUND however I still got the chunks in my php-file.
Here's a Timeline-graph of my error
What I dont understand, I'm not able to see increasing memory inside the Task-Manager (a few mb of course but not close to 16gb ram I got).
So can anyone tell me where this leak comes from? What am I missing?
I'm trying to build an amplifier using a ScriptProcessorNode (yes, I know it is deprecated and yes, I'm fully aware of the createGainNode). The reason being is that the createGainNode is better suited to attenuate gain rather than boost it. I'm an audio engineer and I want to be able to clip my audio. I built a static version that handles a gain of at least 150 (as long as you limit the value of the floats after the boost). However, this new version, which is controlled by range input, just cuts the volume off completely.
I'm not getting any errors in the console; why is the volume cutting out?
<body>
<div id="button" style="background:#000;width:50px;height:50px;" onclick="connect()"></div>
<input id="range" type="range" min="1" max="128" value="1" oninput="myFunction(value)"/>
<script>
var context,
soundSource,
scriptNode,
buffer,
value = 1;
if(typeof AudioContext !== 'undefined') {
context = new AudioContext();
}else if(typeof webkitAudioContext !== 'undefined') {
context = new webkitAudioContext();
}else {
throw new Error('AudioContext not supported. :(');
}
function xhr() {
var request = new XMLHttpRequest();
request.open("GET", 'bass.wav', true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, function onSuccess(decodedData) {
buffer = decodedData;
}, function onFailure() {
alert("Decoding the audio buffer failed");
});
}
request.send();
}
function connect() {
scriptNode = context.createScriptProcessor(256, 2, 2);
scriptNode.onaudioprocess = function(audioProcessingEvent) {
var input = audioProcessingEvent.inputBuffer;
var output = audioProcessingEvent.outputBuffer;
for(var channel = 0; channel < datum.numberOfChannels; channel++) {
var I = input.getChannelData(channel);
var O = output.getChannelData(channel);
for(var i = 0; i < input.length; i++) {
O[i] = I[i];
O[i] *= value;
}
}
}
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
soundSource.start();
}
connect();
function myFunction(val) {
value = val;
soundSource.disconnect();
scriptNode.disconnect();
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
}
</script>
EDIT
Instead of trying to build my own amplifier, I should have just used the createGainNode because it works exactly as intended.
EDIT 2
After playing with the createGainNode more, I figured out the original problem. In Chrome, the createGainNode works fine, but in Firefox, it didn't hold up as well as my amplification code. It will amplify the quieter part of the signals, but when the louder parts come in, the audio drops. Is there any work around for this? My amplification code:
function connect() {
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
volume = context.createGain();
volume.gain.value = 1500;
soundSource.connect(volume);
volume.connect(context.destination);
soundSource.start();
}
function amplify() {
soundSource.stop();
var left = new Float32Array;
var right = new Float32Array;
left = buffer.getChannelData(0);
right = buffer.getChannelData(1);
for(var i = 0; i < left.length; i++) {
left[i] = left[i] * 1500;
if(left[i] > 0) {
left[i] = 128;
}
if(left[i] < 0) {
left[i] = -128;
}
right[i] = right[i] * 1500;
if(right[i] > 0) {
right[i] = 128;
}
if(right[i] < 0) {
right[i] = -128;
}
}
buffer.copyToChannel(left,0);
buffer.copyToChannel(right,1);
var amp = context.createBufferSource();
amp.buffer = buffer;
amp.connect(context.destination);
amp.start();
}
EDIT 3
I've opened a bug report on bugzilla here: https://bugzilla.mozilla.org/show_bug.cgi?id=1233821
I am trying to set up some web audio to load/play multiple sound sources simultaneously.
The sounds are being loaded for now and play is triggered through a button input.
My problem is, I want all the sounds to run through one BiquadFilter (in this case type:0; // LOWPASS filter).
I believe I have created the filter right (in two different places, not sure which look at the attached code) but I cannot get a range input to control the frequencies, something isn't communicating very well and im lost completely.
Also, around the same topic, I want each individual sound to run through their own independent gainNodes (volume controls), this again will be changed via a range input.
Basically there will be 6 audio files, running through their own gainNodes and then coming together to go through a LOWPASS filter before the destination (i.e. the speakers).
Im hopefully going to run through individual pannerNodes but currently facing the chance of giving up with the project all together.
Below is my code (like i said before, the button is triggering all the sounds but but the filter is a BIG problem):
HTML:
<body>
<div id="startbtn">
<p><input type="button" onClick="tracks.toggle();">PLAY!</p>
</div> <!-- startbtn div -->
<div id="frequency">
<p><input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="sound.changeFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
</p>
</div>
<script>
var tracks = new SongTracks();
var sound = new playSound();
</script>
</body>
JAVASCRIPT:
var context = new webkitAudioContext();
var myAudioAnalyser;
function init() {
if('webkitAudioContext' in window) {
myAudioContext = new webkitAudioContext();
// an analyser is used for the spectrum
myAudioAnalyser = myAudioContext.createAnalyser();
myAudioAnalyser.smoothingTimeConstant = 0.85;
myAudioAnalyser.connect(myAudioContext.destination);
fetchSounds();
};
};
// shim layer with setTimeout fallback
window.requestAnimFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function( callback ){
window.setTimeout(callback, 1000 / 60);
};
})();
function playSound(buffer, time) {
var source = context.createBufferSource();
source.buffer = buffer;
var filter = context.createBiquadFilter(); ///////////////// HERE
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source.connect(filter);
filter.connect(context.destination);
source.start(time);
this.filter = filter;
};
function loadSounds(obj, soundMap, callback) {
var names = []
var paths = []
for (var name in soundMap) {
var path = soundMap[name];
names.push(name);
paths.push(path);
}
bufferLoader = new BufferLoader(context, paths, function(bufferList) {
for (var i = 0; i < bufferList.length; i++) {
var buffer = bufferList[i];
var name = names[i];
obj[name] = buffer;
}
if (callback) {
callback();
}
});
bufferLoader.load();
};
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = new Array();
this.loadCount = 0;
}
BufferLoader.prototype.loadBuffer = function(url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
request.onload = function() {
// Asynchronously decode the audio file data in request.response
loader.context.decodeAudioData(
request.response,
function(buffer) {
if (!buffer) {
alert('error decoding file data: ' + url);
return;
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function(error) {
console.error('decodeAudioData error', error);
}
);
}
request.onerror = function() {
alert('BufferLoader: XHR error');
}
request.send();
};
BufferLoader.prototype.load = function() {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
var SongTracks = function() {
loadSounds(this, {
vocals: 'tracks/vocals.mp3',
guitar: 'tracks/guitar.mp3',
piano: 'tracks/piano.mp3'
});
};
var filter;
SongTracks.prototype.play = function() {
playSound(this.vocals, 0);
playSound(this.guitar, 0);
playSound(this.piano, 0);
///////////////////////////////////////////////////////////// OR HERE
var source1 = context.createBufferSource();
source1.buffer = this.buffer
source1 = bufferList[0];
var filter = context.createBiquadFilter();
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source1.connect(filter);
filter.connect(context.destination);
this.filter = filter;
///////////////////////////////////////////////////////////////////// TO HERE?
};
SongTracks.prototype.stop = function() {
this.source.stop(0);
};
SongTracks.prototype.toggle = function() {
this.isPlaying ? this.stop() : this.play();
this.isPlaying = !this.isPlaying;
};
/* SongTracks.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
}; */
playSound.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
};
</script>
As you can see, through my notes etc, im very confused and kind of hit a brick wall.
I've seen code which differentiates the audio files, something like;
var source1 = context.createBufferSource();
var source2 = context.createBufferSource();
var source3 = context.createBufferSource();
var source4 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source3.buffer = bufferList[2];
source4.buffer = bufferList[3];
But i have no idea, good luck.
You should probably simply pass the node to connect to into playSound, and then pass it the FilterNode.
Inside your playSound is the wrong place to create the BiquadFilter - you'll end up creating N of them, one for each playing sound, and you only want one.
You want something like:
HTML file the same, except:
<input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="changeFilterFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
JS:
function playSound(buffer, outputNode, time) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(outputNode);
source.start(time);
}
var globalFilter = null; // one global filter
SongTracks.prototype.play = function() {
var globalFilter = context.createBiquadFilter();
globalFilter.type = globalFilter.LOWPASS;
globalFilter.frequency.value = 5000;
globalFilter.connect(context.destination);
playSound(this.vocals, globalFilter, 0);
playSound(this.guitar, globalFilter, 0);
playSound(this.piano, globalFilter, 0);
};
function changeFilterFrequency(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
globalFilter.frequency.value = maxValue * multiplier;
}