Recording voice using HTML5 and processing it with ffmpeg - javascript

I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don't know anything about ffmpeg and I've been doing lots of research I don't know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I'll be very thankfull
here is my full code:
the javascript page:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value)
{
// if R is pressed, we start recording
if ( value == "record"){
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML="Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if ( value == "stop" ){
// we stop recording
recording = false;
document.getElementById('output').innerHTML="Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
// we interleave both channels together
var interleaved = interleave ( leftBuffer, rightBuffer );
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++){
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob ( [ view ], { type : 'audio/wav' } );
// let's save it locally
document.getElementById('output').innerHTML='Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e){
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){
if (!recording) return;
var left = e.inputBuffer.getChannelData (0);
var right = e.inputBuffer.getChannelData (1);
leftchannel.push (new Float32Array (left));
rightchannel.push (new Float32Array (right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c=document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
and here is the html code:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Simple Web Audio Recorder</title>
<script src="js/functions.js"></script>
<link href="css/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<input type="button" value="record" onclick="getVal(this.value)">
<input type="button" value="stop" onclick="getVal(this.value)">
<p id="output"></p>
<ul id="recordingList"></ul>
<canvas id="myCanvas" width="1000" height="325" style="display: block;"></canvas>
</body>
</html>

ffmpeg is a native library. Running it through Emscripten might not be be practical in everyday web development yet. For utilizing Emcscripten and JS generated by it some advanced JavaScript skills are needed.
Instead, I suggest you upload your audio files to the server-side and convert them there.
Record the voice as blob using getUserMedia()
Upload the recorded blob to the server
On the server-side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
Here is Python example for converting uploaded MP3 files to 48kbit AAC files:
def create_prelisten_aac(mp3, aac):
"""
Run en-code for a single file
Do 48 kbit files for prelisten.
"""
cmdline = [ FFMPEG, '-y', '-i', mp3, '-acodec', 'libfaac', '-ar', '22050', '-ac', '1', '-ab', '48000', aac ]
return subprocess.call(cmdline)
Here you can find the full source code of this application:
https://github.com/miohtama/LibertyMusicStore

Related

How to send msg.payload Int16Array from node-red dashboard template

I am trying to send an Int16Array from a node-red dashboard template. In the template I have:
var i16Buff = new Int16Array(i16BuffSize);
//... fill with data
scope.send({payload: i16Buff});
The buffer comes through msg.payload and I can see the data in console.log as a JSON array. How do I send it from node-red dashboard template so it remains an Int16Array.
Because this was so painful to figure out, I thought I'd just put my entire code up as an example for others:
<!DOCTYPE html>
<video style="height: 0px; width: 0px;"></video>
<md-button ng-click="startStopRec()">{{ label }}</md-button>
<script>
(function(scope) {
// Setup cross browser compatibility
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
var video = document.querySelector('video');
var startedRecording = false;
var endRecording = false;
var scriptNode;
var audioCtx;
// Check if supported by browser
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
navigator.getUserMedia (
{
audio: true,
video: false
},
// Success callback
function(stream) {
var buffSize = 2048;
var buff = [];
video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
video.onloadedmetadata = function(e) {
video.muted = 'true';
};
audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(stream);
scriptNode = audioCtx.createScriptProcessor(buffSize, 1, 1);
scriptNode.onaudioprocess = function(APE) {
if(endRecording === true) {
// There is probably a better way to do this but it worked for me
scriptNode.disconnect();
startedRecording = false;
endRecording = false;
// This was key for creating appropriate buffer for msg.payload.
var rawBuffer = new ArrayBuffer(buff.length * buffSize * 2);
var rawView = new DataView(rawBuffer);
var index = 0;
for (var i = 0; i < buff.length; i++) {
// Convert multi array audio buffer to flat Int16
for (var j = 0; j < (buffSize); j++){
rawView.setInt16(index, buff[i][j] * 0x7FFF, true);
index += 2;
}
}
// Send msg
scope.send({payload: rawBuffer, sampleRate: audioCtx.sampleRate});
// Clear buffer for next time
buff = [];
} else {
// Collect audio buffers into array
console.log('Getting data');
buff.push(new Float32Array(APE.inputBuffer.getChannelData(0)));
}
}
source.connect(scriptNode);
},
// Error callback
function(err) {
console.log('The following gUM error occured: ' + err);
}
);
} else {
console.log('getUserMedia not supported on your browser!');
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
if(scope.label === undefined) {
scope.label = 'Record';
}
scope.startStopRec = function() {
if(scope.label === 'Record') {
scope.label = 'Send';
scriptNode.connect(audioCtx.destination);
video.play();
startedRecording = true;
} else {
scope.label = 'Record';
if(startedRecording === true) {
endRecording = true;
video.pause();
}
}
}
})(scope);
</script>
This template returns (msg.payload) a buffer of raw mono audio data from the microphone and (msg.sampleRate) the sample rate through msg to the next node in the line.
NOTE: You must use HTTPS for it to work (from what I've read/experienced).

Error in recording audio on android using getUserMedia "js"

This code below is from a webpage that makes audio recording. I have a problem that I can not solve already tried several ways and nothing ... The problem is the following when I squeeze the code on a desktop it works normal, the audio comes out clean the real problem happens when I do the same test in android "6.0 , 7.0 ", it manages to do the whole process but the audio comes out with a voice failing, already tried to correct more I can not. Maybe someone who already has a bigger experience can help me.
NOTE: I use "https" on the page.
var audioContext = null;
var context = null;
var volume = null;
var audioInput = null;
var recorder = null;
var recordingLength = 0;
var leftchannel = [];
var rightchannel = [];
var bufferSize = 16384;
var sampleRate = 8000;
var requestStreamReadPermission = {
audio: true,
video:false
};
$scope.canRecordAudio = false;
// -
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// -
var writeUTFBytes = function(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
};
// -
var interleave = function(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
};
// -
var mergeBuffers = function(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
};
// -
var errorGetUserMedia = function(error) {
console.log(error);
GeneralMessageService.clean();
GeneralMessageService.addMessage($filter('translate')('chat.mobile.checkorwaitforconnection'));
};
// -
var successGetUserMedia = function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// recupera a taxa de amostragem atual a ser usada para a embalagem WAV
// sampleRate = context.sampleRate;
volume = context.createGain();
audioInput = context.createMediaStreamSource(stream);
audioInput.connect(volume);
//recorder = context.createScriptProcessor(bufferSize, 2, 2);
var numberOfInputChannels = 2;
var numberOfOutputChannels = 2;
if (context.createScriptProcessor) {
recorder = context.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels);
} else {
recorder = context.createJavaScriptNode(bufferSize, numberOfInputChannels, numberOfOutputChannels);
}
recorder.onaudioprocess = function(stream) {
if (!$scope.recordingAudio) {
return;
}
var left = stream.inputBuffer.getChannelData(0);
//var right = stream.inputBuffer.getChannelData(1);
leftchannel.push(new Float32Array(left));
//rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
};
volume.connect(recorder);
recorder.connect(context.destination);
$scope.canRecordAudio = true;
};
// -
if (!!navigator.getUserMedia) {
navigator.getUserMedia(
requestStreamReadPermission,
successGetUserMedia,
errorGetUserMedia
);
} else {
errorGetUserMedia('UserMedia is empty');
}
// -
var startRecording = function() {
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
};
// -
var stopRecording = function() {
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
//var rightBuffer = mergeBuffers(rightchannel, recordingLength);
//var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + leftBuffer.length * 2);
var view = new DataView(buffer);
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + leftBuffer.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
//MONO
view.setUint16(22, 1, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 2, true);
view.setUint16(32, 2, true);
view.setUint16(34, 16, true);
//stereo
/*view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);*/
//end
writeUTFBytes(view, 36, 'data');
view.setUint32(40, leftBuffer.length * 2, true);
var lng = leftBuffer.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
var s = Math.max(-1, Math.min(1, leftBuffer[i]));
view.setInt16(index, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
var url = (window.URL || window.webkitURL).createObjectURL(blob);
addAudioMessage(url);
//clear();
};

The ScriptProcessorNode doesn't work with OfflineContext

The ScriptProcessorNode doesn't work with OfflineContext.
It works in Chrome, Mozilla Firefox.
It doesn't work in Edge 25, Safari 10.
The issue is that the event is called once when the context OfflineContextis is processed.
Example on jsfiddle without BufferSource.
Example on jsfiddle based on MDN example with BufferSource.
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var current = 0;
var buffer_size = 4096;
var buffer_length = buffer_size * 10;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var scriptNode = audioCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNode.onaudioprocess = whiteNoise;
function whiteNoise(audioProcessingEvent) {
console.log('onaudioprocess', current);
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channel
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var outputData = outputBuffer.getChannelData(channel);
for (var sample = 0; sample < buffer_size; sample++) {
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1);
}
}
current += buffer_size;
if (current > buffer_length)
scriptNode.disconnect();
}
playButton.onclick = function() {
current = 0;
scriptNode.connect(audioCtx.destination);
}
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(1, buffer_length, 48000);
var scriptNodeOffline = offlineCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNodeOffline.onaudioprocess = whiteNoise;
current = 0;
offlineCtx.oncomplete = function(e) {
console.log('rendered buffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
}
scriptNodeOffline.connect(offlineCtx.destination);
offlineCtx.startRendering();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
Update
Clicking on Render offline many times in Chrome and Firefox produced the same output.
Clicking on Render offline many times in Safari and Edge produced the different output.
Example on jsfiddle.
// Create AudioContext and buffer source
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var myBuffer = null;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var source = audioCtx.createBufferSource();
// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
// load in an audio track via XHR and decodeAudioData
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'https://s3-ap-northeast-1.amazonaws.com/storage.cowrite.decodeapps.io/Materials/Media/Audio/59f2b85dd3aed-20171027-043853.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
},
function(e) {
"Error with decoding audio data" + e.err
});
}
request.send();
}
function addNoise(audioProcessingEvent) {
console.log("onaudioprocess")
// The input buffer is the song we loaded earlier
var inputBuffer = audioProcessingEvent.inputBuffer;
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channels (in this case there is only one)
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// Loop through the 4096 samples
for (var sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
}
}
}
// Give the node a function to process audio events
scriptNode.onaudioprocess = addNoise;
getData();
// wire up play button
playButton.onclick = function() {
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);
source.start();
}
// When the buffer source stops playing, disconnect everything
source.onended = function() {
source.disconnect(scriptNode);
scriptNode.disconnect(audioCtx.destination);
}
// When the buffer source stops playing, disconnect everything
// wire up play button
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(2, myBuffer.length, myBuffer.sampleRate);
var scriptNodeOffline = offlineCtx.createScriptProcessor(4096, 1, 1);
var sourceOffline = offlineCtx.createBufferSource();
sourceOffline.buffer = myBuffer;
sourceOffline.onended = function() {
console.log('sourceOffline.onended');
sourceOffline.disconnect(scriptNodeOffline);
scriptNodeOffline.disconnect(offlineCtx.destination);
}
scriptNodeOffline.onaudioprocess = addNoise;
sourceOffline.connect(scriptNodeOffline);
scriptNodeOffline.connect(offlineCtx.destination);
sourceOffline.start();
offlineCtx.oncomplete = function(e) {
console.log('renderedBuffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
listenRendered(e.renderedBuffer);
};
offlineCtx.startRendering();
}
var _audioCtx = new(window.AudioContext || window.webkitAudioContext)();
function listenRendered(buffer) {
var _source = _audioCtx.createBufferSource();
_source.buffer = buffer;
_source.connect(_audioCtx.destination);
_source.start();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
These are bugs in Safari and Edge. A ScriptProcessorNode should work fine in an offline context. File bugs with Safari and Edge.

Building a Web Audio Amplifier

I'm trying to build an amplifier using a ScriptProcessorNode (yes, I know it is deprecated and yes, I'm fully aware of the createGainNode). The reason being is that the createGainNode is better suited to attenuate gain rather than boost it. I'm an audio engineer and I want to be able to clip my audio. I built a static version that handles a gain of at least 150 (as long as you limit the value of the floats after the boost). However, this new version, which is controlled by range input, just cuts the volume off completely.
I'm not getting any errors in the console; why is the volume cutting out?
<body>
<div id="button" style="background:#000;width:50px;height:50px;" onclick="connect()"></div>
<input id="range" type="range" min="1" max="128" value="1" oninput="myFunction(value)"/>
<script>
var context,
soundSource,
scriptNode,
buffer,
value = 1;
if(typeof AudioContext !== 'undefined') {
context = new AudioContext();
}else if(typeof webkitAudioContext !== 'undefined') {
context = new webkitAudioContext();
}else {
throw new Error('AudioContext not supported. :(');
}
function xhr() {
var request = new XMLHttpRequest();
request.open("GET", 'bass.wav', true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, function onSuccess(decodedData) {
buffer = decodedData;
}, function onFailure() {
alert("Decoding the audio buffer failed");
});
}
request.send();
}
function connect() {
scriptNode = context.createScriptProcessor(256, 2, 2);
scriptNode.onaudioprocess = function(audioProcessingEvent) {
var input = audioProcessingEvent.inputBuffer;
var output = audioProcessingEvent.outputBuffer;
for(var channel = 0; channel < datum.numberOfChannels; channel++) {
var I = input.getChannelData(channel);
var O = output.getChannelData(channel);
for(var i = 0; i < input.length; i++) {
O[i] = I[i];
O[i] *= value;
}
}
}
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
soundSource.start();
}
connect();
function myFunction(val) {
value = val;
soundSource.disconnect();
scriptNode.disconnect();
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
}
</script>
EDIT
Instead of trying to build my own amplifier, I should have just used the createGainNode because it works exactly as intended.
EDIT 2
After playing with the createGainNode more, I figured out the original problem. In Chrome, the createGainNode works fine, but in Firefox, it didn't hold up as well as my amplification code. It will amplify the quieter part of the signals, but when the louder parts come in, the audio drops. Is there any work around for this? My amplification code:
function connect() {
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
volume = context.createGain();
volume.gain.value = 1500;
soundSource.connect(volume);
volume.connect(context.destination);
soundSource.start();
}
function amplify() {
soundSource.stop();
var left = new Float32Array;
var right = new Float32Array;
left = buffer.getChannelData(0);
right = buffer.getChannelData(1);
for(var i = 0; i < left.length; i++) {
left[i] = left[i] * 1500;
if(left[i] > 0) {
left[i] = 128;
}
if(left[i] < 0) {
left[i] = -128;
}
right[i] = right[i] * 1500;
if(right[i] > 0) {
right[i] = 128;
}
if(right[i] < 0) {
right[i] = -128;
}
}
buffer.copyToChannel(left,0);
buffer.copyToChannel(right,1);
var amp = context.createBufferSource();
amp.buffer = buffer;
amp.connect(context.destination);
amp.start();
}
EDIT 3
I've opened a bug report on bugzilla here: https://bugzilla.mozilla.org/show_bug.cgi?id=1233821

Setting up Web Audio, multiple sounds, seperate gainNodes and global LOWPASS filter?

I am trying to set up some web audio to load/play multiple sound sources simultaneously.
The sounds are being loaded for now and play is triggered through a button input.
My problem is, I want all the sounds to run through one BiquadFilter (in this case type:0; // LOWPASS filter).
I believe I have created the filter right (in two different places, not sure which look at the attached code) but I cannot get a range input to control the frequencies, something isn't communicating very well and im lost completely.
Also, around the same topic, I want each individual sound to run through their own independent gainNodes (volume controls), this again will be changed via a range input.
Basically there will be 6 audio files, running through their own gainNodes and then coming together to go through a LOWPASS filter before the destination (i.e. the speakers).
Im hopefully going to run through individual pannerNodes but currently facing the chance of giving up with the project all together.
Below is my code (like i said before, the button is triggering all the sounds but but the filter is a BIG problem):
HTML:
<body>
<div id="startbtn">
<p><input type="button" onClick="tracks.toggle();">PLAY!</p>
</div> <!-- startbtn div -->
<div id="frequency">
<p><input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="sound.changeFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
</p>
</div>
<script>
var tracks = new SongTracks();
var sound = new playSound();
</script>
</body>
JAVASCRIPT:
var context = new webkitAudioContext();
var myAudioAnalyser;
function init() {
if('webkitAudioContext' in window) {
myAudioContext = new webkitAudioContext();
// an analyser is used for the spectrum
myAudioAnalyser = myAudioContext.createAnalyser();
myAudioAnalyser.smoothingTimeConstant = 0.85;
myAudioAnalyser.connect(myAudioContext.destination);
fetchSounds();
};
};
// shim layer with setTimeout fallback
window.requestAnimFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function( callback ){
window.setTimeout(callback, 1000 / 60);
};
})();
function playSound(buffer, time) {
var source = context.createBufferSource();
source.buffer = buffer;
var filter = context.createBiquadFilter(); ///////////////// HERE
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source.connect(filter);
filter.connect(context.destination);
source.start(time);
this.filter = filter;
};
function loadSounds(obj, soundMap, callback) {
var names = []
var paths = []
for (var name in soundMap) {
var path = soundMap[name];
names.push(name);
paths.push(path);
}
bufferLoader = new BufferLoader(context, paths, function(bufferList) {
for (var i = 0; i < bufferList.length; i++) {
var buffer = bufferList[i];
var name = names[i];
obj[name] = buffer;
}
if (callback) {
callback();
}
});
bufferLoader.load();
};
function BufferLoader(context, urlList, callback) {
this.context = context;
this.urlList = urlList;
this.onload = callback;
this.bufferList = new Array();
this.loadCount = 0;
}
BufferLoader.prototype.loadBuffer = function(url, index) {
// Load buffer asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
var loader = this;
request.onload = function() {
// Asynchronously decode the audio file data in request.response
loader.context.decodeAudioData(
request.response,
function(buffer) {
if (!buffer) {
alert('error decoding file data: ' + url);
return;
}
loader.bufferList[index] = buffer;
if (++loader.loadCount == loader.urlList.length)
loader.onload(loader.bufferList);
},
function(error) {
console.error('decodeAudioData error', error);
}
);
}
request.onerror = function() {
alert('BufferLoader: XHR error');
}
request.send();
};
BufferLoader.prototype.load = function() {
for (var i = 0; i < this.urlList.length; ++i)
this.loadBuffer(this.urlList[i], i);
};
var SongTracks = function() {
loadSounds(this, {
vocals: 'tracks/vocals.mp3',
guitar: 'tracks/guitar.mp3',
piano: 'tracks/piano.mp3'
});
};
var filter;
SongTracks.prototype.play = function() {
playSound(this.vocals, 0);
playSound(this.guitar, 0);
playSound(this.piano, 0);
///////////////////////////////////////////////////////////// OR HERE
var source1 = context.createBufferSource();
source1.buffer = this.buffer
source1 = bufferList[0];
var filter = context.createBiquadFilter();
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
source1.connect(filter);
filter.connect(context.destination);
this.filter = filter;
///////////////////////////////////////////////////////////////////// TO HERE?
};
SongTracks.prototype.stop = function() {
this.source.stop(0);
};
SongTracks.prototype.toggle = function() {
this.isPlaying ? this.stop() : this.play();
this.isPlaying = !this.isPlaying;
};
/* SongTracks.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
}; */
playSound.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
};
</script>
As you can see, through my notes etc, im very confused and kind of hit a brick wall.
I've seen code which differentiates the audio files, something like;
var source1 = context.createBufferSource();
var source2 = context.createBufferSource();
var source3 = context.createBufferSource();
var source4 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source3.buffer = bufferList[2];
source4.buffer = bufferList[3];
But i have no idea, good luck.
You should probably simply pass the node to connect to into playSound, and then pass it the FilterNode.
Inside your playSound is the wrong place to create the BiquadFilter - you'll end up creating N of them, one for each playing sound, and you only want one.
You want something like:
HTML file the same, except:
<input type="range" id="freq1" min="0" max="1" step="0.01" value="1" onchange="changeFilterFrequency(this);" style="width:180px; background-color:#FFF;"> Frequency</p>
JS:
function playSound(buffer, outputNode, time) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(outputNode);
source.start(time);
}
var globalFilter = null; // one global filter
SongTracks.prototype.play = function() {
var globalFilter = context.createBiquadFilter();
globalFilter.type = globalFilter.LOWPASS;
globalFilter.frequency.value = 5000;
globalFilter.connect(context.destination);
playSound(this.vocals, globalFilter, 0);
playSound(this.guitar, globalFilter, 0);
playSound(this.piano, globalFilter, 0);
};
function changeFilterFrequency(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
globalFilter.frequency.value = maxValue * multiplier;
}

Categories