I'm trying to build an amplifier using a ScriptProcessorNode (yes, I know it is deprecated and yes, I'm fully aware of the createGainNode). The reason being is that the createGainNode is better suited to attenuate gain rather than boost it. I'm an audio engineer and I want to be able to clip my audio. I built a static version that handles a gain of at least 150 (as long as you limit the value of the floats after the boost). However, this new version, which is controlled by range input, just cuts the volume off completely.
I'm not getting any errors in the console; why is the volume cutting out?
<body>
<div id="button" style="background:#000;width:50px;height:50px;" onclick="connect()"></div>
<input id="range" type="range" min="1" max="128" value="1" oninput="myFunction(value)"/>
<script>
var context,
soundSource,
scriptNode,
buffer,
value = 1;
if(typeof AudioContext !== 'undefined') {
context = new AudioContext();
}else if(typeof webkitAudioContext !== 'undefined') {
context = new webkitAudioContext();
}else {
throw new Error('AudioContext not supported. :(');
}
function xhr() {
var request = new XMLHttpRequest();
request.open("GET", 'bass.wav', true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, function onSuccess(decodedData) {
buffer = decodedData;
}, function onFailure() {
alert("Decoding the audio buffer failed");
});
}
request.send();
}
function connect() {
scriptNode = context.createScriptProcessor(256, 2, 2);
scriptNode.onaudioprocess = function(audioProcessingEvent) {
var input = audioProcessingEvent.inputBuffer;
var output = audioProcessingEvent.outputBuffer;
for(var channel = 0; channel < datum.numberOfChannels; channel++) {
var I = input.getChannelData(channel);
var O = output.getChannelData(channel);
for(var i = 0; i < input.length; i++) {
O[i] = I[i];
O[i] *= value;
}
}
}
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
soundSource.start();
}
connect();
function myFunction(val) {
value = val;
soundSource.disconnect();
scriptNode.disconnect();
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
}
</script>
EDIT
Instead of trying to build my own amplifier, I should have just used the createGainNode because it works exactly as intended.
EDIT 2
After playing with the createGainNode more, I figured out the original problem. In Chrome, the createGainNode works fine, but in Firefox, it didn't hold up as well as my amplification code. It will amplify the quieter part of the signals, but when the louder parts come in, the audio drops. Is there any work around for this? My amplification code:
function connect() {
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
volume = context.createGain();
volume.gain.value = 1500;
soundSource.connect(volume);
volume.connect(context.destination);
soundSource.start();
}
function amplify() {
soundSource.stop();
var left = new Float32Array;
var right = new Float32Array;
left = buffer.getChannelData(0);
right = buffer.getChannelData(1);
for(var i = 0; i < left.length; i++) {
left[i] = left[i] * 1500;
if(left[i] > 0) {
left[i] = 128;
}
if(left[i] < 0) {
left[i] = -128;
}
right[i] = right[i] * 1500;
if(right[i] > 0) {
right[i] = 128;
}
if(right[i] < 0) {
right[i] = -128;
}
}
buffer.copyToChannel(left,0);
buffer.copyToChannel(right,1);
var amp = context.createBufferSource();
amp.buffer = buffer;
amp.connect(context.destination);
amp.start();
}
EDIT 3
I've opened a bug report on bugzilla here: https://bugzilla.mozilla.org/show_bug.cgi?id=1233821
Related
I have the following HTML:
<script type="text/javascript">
window.onload = function() {
var volumeBars = {
mono: document.getElementById("monoFill")
};
document.getElementById("open-file").onchange = function(evt) {
var file = evt.target.files[0];
var reader = new FileReader();
reader.onload = function(e) {
playSound(e.target.result);
console.log(e.target.result);
}
reader.readAsArrayBuffer(file);
}
var context = new AudioContext();
function playSound(arraybuffer) {
context.close();
context = new AudioContext();
var source = context.createBufferSource();
context.decodeAudioData(arraybuffer, function(buffer) {
source.buffer = buffer;
});
//=
console.log(arraybuffer)
var analyser = context.createAnalyser();
analyser.smoothingTimeConstant = .9;
analyser.fftSize = 1024;
jsNode = context.createScriptProcessor(2048, 1, 1);
jsNode.onaudioprocess = function() {
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
volumeBars.mono.style.height = Math.average(array) * 2 + "px";
volumeBars.mono.innerHTML = Math.floor(Math.average(array));
console.log(volumeBars.mono.innerHTML);
}
source.connect(analyser);
source.connect(context.destination);
jsNode.connect(context.destination);
analyser.connect(jsNode);
source.start();
}
Math.average = function(arguments) {
var numbers;
if (arguments[0] instanceof Array) {
numbers = arguments[0];
} else if (typeof arguments[0] == "number") {
numbers = arguments;
}
var sum = 0;
var average = 0;
for (var i = 0; i < numbers.length; i++) {
sum += numbers[i];
}
average = sum / numbers.length;
return average;
}
}
</script>
<div id="container">
<div class="bar" id="mono">
<div class="fill" id="monoFill"></div>
</div>
</div>
<input type="file" id="open-file" accept="audio/*">
The issue that I am facing, is I want to replace the block
var file = evt.target.files[0];
with a local file that doesn't need to be loaded every time. I have been unable to find a method that encodes the file as an ArrayBuffer. I'm sure this is a very amateur question with a obvious answer, but any assistance is welcome.
Since what you want is to access a file on your server, you simply need to make an Ajax request, with the response set to an ArrayBuffer, no need for a FileReader here.
Using the fetch API it would look like
const a_ctx = new(window.AudioContext || window.webkitAudioContext)();
fetch('https://dl.dropboxusercontent.com/s/1cdwpm3gca9mlo0/kick.mp3')
.then(resp => resp.arrayBuffer()) // request as ArrayBuffer
.then(buf => a_ctx.decodeAudioData(buf))
.then(a_buf => {
btn.onclick = e => {
let node = a_ctx.createBufferSource();
node.buffer = a_buf;
node.connect(a_ctx.destination);
node.start(0);
};
btn.disabled = false;
});
<button id="btn" disabled>play</button>
<!-- Promising decodeAudioData for Safari https://github.com/mohayonao/promise-decode-audio-data/ [MIT] -->
<script src="https://cdn.rawgit.com/mohayonao/promise-decode-audio-data/eb4b1322/build/promise-decode-audio-data.min.js"></script>
And using XMLHttpRequest API:
var a_ctx = new(window.AudioContext || window.webkitAudioContext)();
var xhr = new XMLHttpRequest();
xhr.open('get', 'https://dl.dropboxusercontent.com/s/1cdwpm3gca9mlo0/kick.mp3');
xhr.responseType = 'arraybuffer'; // request as ArrayBuffer
xhr.onload = function() {
var buf = xhr.response;
a_ctx.decodeAudioData(buf, function(a_buf) {
btn.onclick = function(e) {
let node = a_ctx.createBufferSource();
node.buffer = a_buf;
node.connect(a_ctx.destination);
node.start(0);
};
btn.disabled = false;
});
};
xhr.send();
<button id="btn" disabled>play</button>
The ScriptProcessorNode doesn't work with OfflineContext.
It works in Chrome, Mozilla Firefox.
It doesn't work in Edge 25, Safari 10.
The issue is that the event is called once when the context OfflineContextis is processed.
Example on jsfiddle without BufferSource.
Example on jsfiddle based on MDN example with BufferSource.
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var current = 0;
var buffer_size = 4096;
var buffer_length = buffer_size * 10;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var scriptNode = audioCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNode.onaudioprocess = whiteNoise;
function whiteNoise(audioProcessingEvent) {
console.log('onaudioprocess', current);
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channel
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var outputData = outputBuffer.getChannelData(channel);
for (var sample = 0; sample < buffer_size; sample++) {
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1);
}
}
current += buffer_size;
if (current > buffer_length)
scriptNode.disconnect();
}
playButton.onclick = function() {
current = 0;
scriptNode.connect(audioCtx.destination);
}
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(1, buffer_length, 48000);
var scriptNodeOffline = offlineCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNodeOffline.onaudioprocess = whiteNoise;
current = 0;
offlineCtx.oncomplete = function(e) {
console.log('rendered buffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
}
scriptNodeOffline.connect(offlineCtx.destination);
offlineCtx.startRendering();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
Update
Clicking on Render offline many times in Chrome and Firefox produced the same output.
Clicking on Render offline many times in Safari and Edge produced the different output.
Example on jsfiddle.
// Create AudioContext and buffer source
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var myBuffer = null;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var source = audioCtx.createBufferSource();
// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
// load in an audio track via XHR and decodeAudioData
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'https://s3-ap-northeast-1.amazonaws.com/storage.cowrite.decodeapps.io/Materials/Media/Audio/59f2b85dd3aed-20171027-043853.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
},
function(e) {
"Error with decoding audio data" + e.err
});
}
request.send();
}
function addNoise(audioProcessingEvent) {
console.log("onaudioprocess")
// The input buffer is the song we loaded earlier
var inputBuffer = audioProcessingEvent.inputBuffer;
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channels (in this case there is only one)
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// Loop through the 4096 samples
for (var sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
}
}
}
// Give the node a function to process audio events
scriptNode.onaudioprocess = addNoise;
getData();
// wire up play button
playButton.onclick = function() {
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);
source.start();
}
// When the buffer source stops playing, disconnect everything
source.onended = function() {
source.disconnect(scriptNode);
scriptNode.disconnect(audioCtx.destination);
}
// When the buffer source stops playing, disconnect everything
// wire up play button
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(2, myBuffer.length, myBuffer.sampleRate);
var scriptNodeOffline = offlineCtx.createScriptProcessor(4096, 1, 1);
var sourceOffline = offlineCtx.createBufferSource();
sourceOffline.buffer = myBuffer;
sourceOffline.onended = function() {
console.log('sourceOffline.onended');
sourceOffline.disconnect(scriptNodeOffline);
scriptNodeOffline.disconnect(offlineCtx.destination);
}
scriptNodeOffline.onaudioprocess = addNoise;
sourceOffline.connect(scriptNodeOffline);
scriptNodeOffline.connect(offlineCtx.destination);
sourceOffline.start();
offlineCtx.oncomplete = function(e) {
console.log('renderedBuffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
listenRendered(e.renderedBuffer);
};
offlineCtx.startRendering();
}
var _audioCtx = new(window.AudioContext || window.webkitAudioContext)();
function listenRendered(buffer) {
var _source = _audioCtx.createBufferSource();
_source.buffer = buffer;
_source.connect(_audioCtx.destination);
_source.start();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
These are bugs in Safari and Edge. A ScriptProcessorNode should work fine in an offline context. File bugs with Safari and Edge.
I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don't know anything about ffmpeg and I've been doing lots of research I don't know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I'll be very thankfull
here is my full code:
the javascript page:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value)
{
// if R is pressed, we start recording
if ( value == "record"){
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML="Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if ( value == "stop" ){
// we stop recording
recording = false;
document.getElementById('output').innerHTML="Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
// we interleave both channels together
var interleaved = interleave ( leftBuffer, rightBuffer );
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++){
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob ( [ view ], { type : 'audio/wav' } );
// let's save it locally
document.getElementById('output').innerHTML='Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e){
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){
if (!recording) return;
var left = e.inputBuffer.getChannelData (0);
var right = e.inputBuffer.getChannelData (1);
leftchannel.push (new Float32Array (left));
rightchannel.push (new Float32Array (right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c=document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
and here is the html code:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Simple Web Audio Recorder</title>
<script src="js/functions.js"></script>
<link href="css/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<input type="button" value="record" onclick="getVal(this.value)">
<input type="button" value="stop" onclick="getVal(this.value)">
<p id="output"></p>
<ul id="recordingList"></ul>
<canvas id="myCanvas" width="1000" height="325" style="display: block;"></canvas>
</body>
</html>
ffmpeg is a native library. Running it through Emscripten might not be be practical in everyday web development yet. For utilizing Emcscripten and JS generated by it some advanced JavaScript skills are needed.
Instead, I suggest you upload your audio files to the server-side and convert them there.
Record the voice as blob using getUserMedia()
Upload the recorded blob to the server
On the server-side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
Here is Python example for converting uploaded MP3 files to 48kbit AAC files:
def create_prelisten_aac(mp3, aac):
"""
Run en-code for a single file
Do 48 kbit files for prelisten.
"""
cmdline = [ FFMPEG, '-y', '-i', mp3, '-acodec', 'libfaac', '-ar', '22050', '-ac', '1', '-ab', '48000', aac ]
return subprocess.call(cmdline)
Here you can find the full source code of this application:
https://github.com/miohtama/LibertyMusicStore
I am trying to set up some web audio to load/play multiple sound sources simultaneously.
The sounds are being loaded for now and play is triggered through a button input.
My problem is, I want all the sounds to run through one BiquadFilter (in this case type:0; // LOWPASS filter).
I believe I have created the filter right (in two different places, not sure which look at the attached code) but I cannot get a range input to control the frequencies, something isn't communicating very well and im lost completely.
Also, around the same topic, I want each individual sound to run through their own independent gainNodes (volume controls), this again will be changed via a range input.
Basically there will be 6 audio files, running through their own gainNodes and then coming together to go through a LOWPASS filter before the destination (i.e. the speakers).
Im hopefully going to run through individual pannerNodes but currently facing the chance of giving up with the project all together.
Below is my code (like i said before, the button is triggering all the sounds but but the filter is a BIG problem):
HTML:
<body>
<div id="startbtn">
<p><input type="button" onClick="tracks.toggle();">PLAY!</p>
</div> <!-- startbtn div -->
<div id="frequency">
<p><input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="sound.changeFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
</p>
</div>
<script>
var tracks = new SongTracks();
var sound = new playSound();
</script>
</body>
JAVASCRIPT:
var context = new webkitAudioContext();
var myAudioAnalyser;
function init() {
if('webkitAudioContext' in window) {
myAudioContext = new webkitAudioContext();
// an analyser is used for the spectrum
myAudioAnalyser = myAudioContext.createAnalyser();
myAudioAnalyser.smoothingTimeConstant = 0.85;
myAudioAnalyser.connect(myAudioContext.destination);
fetchSounds();
};
};
// shim layer with setTimeout fallback
window.requestAnimFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function( callback ){
window.setTimeout(callback, 1000 / 60);
};
})();
function playSound(buffer, time) {
var source = context.createBufferSource();
source.buffer = buffer;
var filter = context.createBiquadFilter(); ///////////////// HERE
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source.connect(filter);
filter.connect(context.destination);
source.start(time);
this.filter = filter;
};
function loadSounds(obj, soundMap, callback) {
var names = []
var paths = []
for (var name in soundMap) {
var path = soundMap[name];
names.push(name);
paths.push(path);
}
bufferLoader = new BufferLoader(context, paths, function(bufferList) {
for (var i = 0; i < bufferList.length; i++) {
var buffer = bufferList[i];
var name = names[i];
obj[name] = buffer;
}
if (callback) {
callback();
}
});
bufferLoader.load();
};
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = new Array();
this.loadCount = 0;
}
BufferLoader.prototype.loadBuffer = function(url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
request.onload = function() {
// Asynchronously decode the audio file data in request.response
loader.context.decodeAudioData(
request.response,
function(buffer) {
if (!buffer) {
alert('error decoding file data: ' + url);
return;
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function(error) {
console.error('decodeAudioData error', error);
}
);
}
request.onerror = function() {
alert('BufferLoader: XHR error');
}
request.send();
};
BufferLoader.prototype.load = function() {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
var SongTracks = function() {
loadSounds(this, {
vocals: 'tracks/vocals.mp3',
guitar: 'tracks/guitar.mp3',
piano: 'tracks/piano.mp3'
});
};
var filter;
SongTracks.prototype.play = function() {
playSound(this.vocals, 0);
playSound(this.guitar, 0);
playSound(this.piano, 0);
///////////////////////////////////////////////////////////// OR HERE
var source1 = context.createBufferSource();
source1.buffer = this.buffer
source1 = bufferList[0];
var filter = context.createBiquadFilter();
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source1.connect(filter);
filter.connect(context.destination);
this.filter = filter;
///////////////////////////////////////////////////////////////////// TO HERE?
};
SongTracks.prototype.stop = function() {
this.source.stop(0);
};
SongTracks.prototype.toggle = function() {
this.isPlaying ? this.stop() : this.play();
this.isPlaying = !this.isPlaying;
};
/* SongTracks.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
}; */
playSound.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
};
</script>
As you can see, through my notes etc, im very confused and kind of hit a brick wall.
I've seen code which differentiates the audio files, something like;
var source1 = context.createBufferSource();
var source2 = context.createBufferSource();
var source3 = context.createBufferSource();
var source4 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source3.buffer = bufferList[2];
source4.buffer = bufferList[3];
But i have no idea, good luck.
You should probably simply pass the node to connect to into playSound, and then pass it the FilterNode.
Inside your playSound is the wrong place to create the BiquadFilter - you'll end up creating N of them, one for each playing sound, and you only want one.
You want something like:
HTML file the same, except:
<input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="changeFilterFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
JS:
function playSound(buffer, outputNode, time) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(outputNode);
source.start(time);
}
var globalFilter = null; // one global filter
SongTracks.prototype.play = function() {
var globalFilter = context.createBiquadFilter();
globalFilter.type = globalFilter.LOWPASS;
globalFilter.frequency.value = 5000;
globalFilter.connect(context.destination);
playSound(this.vocals, globalFilter, 0);
playSound(this.guitar, globalFilter, 0);
playSound(this.piano, globalFilter, 0);
};
function changeFilterFrequency(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
globalFilter.frequency.value = maxValue * multiplier;
}
I'm new to programming and I'm messing around with the Web Audio API. Right now, I have three samples (kick, clap, hihat) that make up a simple drumkit beat when a Play button is pressed.
I want to be able to illustrate this visually on the front end as a sequencer that plays through this drumkit. For instance, every time the "kick.wav" sample is played, I want to change the color of a div that is associated with it.
My questions are:
How do I associate every time a kick, clap or hihat are played with a div in the html?
How can I add this association to sequence through when the play button is hit?
HTML:
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<button id="play">play</button>
<script src="javascript/tween.js"></script>
<script src="javascript/shared.js"></script>
<script src="javascript/seq.js"></script>
</body>
Javascript:
function playSound(buffer, time) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(time);
}
function loadSounds(obj, soundMap, callback) {
// Array-ify
var names = [];
var paths = [];
for (var name in soundMap) {
var path = soundMap[name];
names.push(name);
paths.push(path);
}
bufferLoader = new BufferLoader(context, paths, function(bufferList) {
for (var i = 0; i < bufferList.length; i++) {
var buffer = bufferList[i];
var name = names[i];
obj[name] = buffer;
}
if (callback) {
callback();
}
});
bufferLoader.load();
}
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = new Array();
this.loadCount = 0;
}
BufferLoader.prototype.loadBuffer = function(url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
request.onload = function() {
// Asynchronously decode the audio file data in request.response
loader.context.decodeAudioData(
request.response,
function(buffer) {
if (!buffer) {
alert('error decoding file data: ' + url);
return;
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function(error) {
console.error('decodeAudioData error', error);
}
);
}
request.onerror = function() {
alert('BufferLoader: XHR error');
}
request.send();
};
BufferLoader.prototype.load = function() {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
var RhythmSample = function() {
loadSounds(this, {
kick: 'sounds/kick.wav',
claps: 'sounds/claps.wav',
hihat: 'sounds/hihat.wav'
});
};
RhythmSample.prototype.play = function() {
// We'll start playing the rhythm 100 milliseconds from "now"
var startTime = context.currentTime + 0.100;
var tempo = 120; // BPM (beats per minute)
var eighthNoteTime = (60 / tempo) / 2;
var allDivs = document.getElementsByName('colorchangingdivs[]');
// Play 2 bars of the following:
for (var bar = 0; bar < 2; bar++) {
var time = startTime + bar * 8 * eighthNoteTime;
// Play the bass (kick) drum on beats 1, 5
playSound(this.kick, time);
playSound(this.kick, time + 4 * eighthNoteTime);
console.log("4")
// Play the snare drum on beats 3, 7
playSound(this.claps, time + 2 * eighthNoteTime);
playSound(this.claps, time + 6 * eighthNoteTime);
// Play the hi-hat every eighthh note.
for (var i = 0; i < 8; ++i) {
playSound(this.hihat, time + i * eighthNoteTime);
}
}
};
var sample = new RhythmSample();
document.querySelector('#play').addEventListener('click', function() {
sample.play();
});
THANKS SO MUCH!
Add another parameter to playSound() that matches the div id of the one you want to color. And logic to change the color when playing the sound, selecting the div by the id you've passed in.
function playSound(buffer, time, colorID) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(time);
document.getElementById(colorID).backgroundColor = blue; //or hex color/rgb value
}
Then you just have to add the right parameter when you call playSound.
playSound(this.kick, time + 4 * eighthNoteTime,"your-kick-div-id");
If you need different colors for different sounds/divs, then just add an if/elseif statement in the background color setting.