I am trying to send an Int16Array from a node-red dashboard template. In the template I have:
var i16Buff = new Int16Array(i16BuffSize);
//... fill with data
scope.send({payload: i16Buff});
The buffer comes through msg.payload and I can see the data in console.log as a JSON array. How do I send it from node-red dashboard template so it remains an Int16Array.
Because this was so painful to figure out, I thought I'd just put my entire code up as an example for others:
<!DOCTYPE html>
<video style="height: 0px; width: 0px;"></video>
<md-button ng-click="startStopRec()">{{ label }}</md-button>
<script>
(function(scope) {
// Setup cross browser compatibility
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
var video = document.querySelector('video');
var startedRecording = false;
var endRecording = false;
var scriptNode;
var audioCtx;
// Check if supported by browser
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
navigator.getUserMedia (
{
audio: true,
video: false
},
// Success callback
function(stream) {
var buffSize = 2048;
var buff = [];
video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
video.onloadedmetadata = function(e) {
video.muted = 'true';
};
audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(stream);
scriptNode = audioCtx.createScriptProcessor(buffSize, 1, 1);
scriptNode.onaudioprocess = function(APE) {
if(endRecording === true) {
// There is probably a better way to do this but it worked for me
scriptNode.disconnect();
startedRecording = false;
endRecording = false;
// This was key for creating appropriate buffer for msg.payload.
var rawBuffer = new ArrayBuffer(buff.length * buffSize * 2);
var rawView = new DataView(rawBuffer);
var index = 0;
for (var i = 0; i < buff.length; i++) {
// Convert multi array audio buffer to flat Int16
for (var j = 0; j < (buffSize); j++){
rawView.setInt16(index, buff[i][j] * 0x7FFF, true);
index += 2;
}
}
// Send msg
scope.send({payload: rawBuffer, sampleRate: audioCtx.sampleRate});
// Clear buffer for next time
buff = [];
} else {
// Collect audio buffers into array
console.log('Getting data');
buff.push(new Float32Array(APE.inputBuffer.getChannelData(0)));
}
}
source.connect(scriptNode);
},
// Error callback
function(err) {
console.log('The following gUM error occured: ' + err);
}
);
} else {
console.log('getUserMedia not supported on your browser!');
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
if(scope.label === undefined) {
scope.label = 'Record';
}
scope.startStopRec = function() {
if(scope.label === 'Record') {
scope.label = 'Send';
scriptNode.connect(audioCtx.destination);
video.play();
startedRecording = true;
} else {
scope.label = 'Record';
if(startedRecording === true) {
endRecording = true;
video.pause();
}
}
}
})(scope);
</script>
This template returns (msg.payload) a buffer of raw mono audio data from the microphone and (msg.sampleRate) the sample rate through msg to the next node in the line.
NOTE: You must use HTTPS for it to work (from what I've read/experienced).
Related
I have this record.js script to toggle a recording that is currently working as expected.
function Record_Current(config) {
config = config || {};
var self = this;
var audioInput;
var audioNode;
var bufferSize = config.bufferSize || 4096;
var recordedData = [];
var recording = false;
var recordingLength = 0;
var startDate;
var audioCtx;
this.toggleRecording = function() {
if (recording) {
self.stop();
} else {
self.start();
}
};
this.start = function() {
// reset any previous data
recordedData = [];
recordingLength = 0;
// webkit audio context shim
audioCtx = new(window.AudioContext || window.webkitAudioContext)();
if (audioCtx.createJavaScriptNode) {
audioNode = audioCtx.createJavaScriptNode(bufferSize, 1, 1);
} else if (audioCtx.createScriptProcessor) {
audioNode = audioCtx.createScriptProcessor(bufferSize, 1, 1);
} else {
throw 'WebAudio not supported!';
}
audioNode.connect(audioCtx.destination);
navigator.mediaDevices.getUserMedia({ audio: true })
.then(onMicrophoneCaptured)
.catch(onMicrophoneError);
};
this.stop = function() {
stopRecording(function(blob) {
self.blob = blob;
config.onRecordingStop && config.onRecordingStop(blob);
});
};
this.upload = function(url, params, callback) {
var formData = new FormData();
formData.append("audio", self.blob, config.filename || 'recording.wav');
for (var i in params)
formData.append(i, params[i]);
var request = new XMLHttpRequest();
request.upload.addEventListener("progress", function(e) {
callback('progress', e, request);
});
request.upload.addEventListener("load", function(e) {
callback('load', e, request);
});
request.onreadystatechange = function(e) {
var status = 'loading';
if (request.readyState === 4) {
status = request.status === 200 ? 'done' : 'error';
}
callback(status, e, request);
};
request.open("POST", url);
request.send(formData);
};
function stopRecording(callback) {
// stop recording
recording = false;
// to make sure onaudioprocess stops firing
window.localStream.getTracks().forEach((track) => { track.stop(); });
audioInput.disconnect();
audioNode.disconnect();
exportWav({
sampleRate: sampleRate,
recordingLength: recordingLength,
data: recordedData
}, function(buffer, view) {
self.blob = new Blob([view], { type: 'audio/wav' });
callback && callback(self.blob);
});
}
function onMicrophoneCaptured(microphone) {
if (config.visualizer)
visualize(microphone);
// save the stream so we can disconnect it when we're done
window.localStream = microphone;
audioInput = audioCtx.createMediaStreamSource(microphone);
audioInput.connect(audioNode);
audioNode.onaudioprocess = onAudioProcess;
recording = true;
self.startDate = new Date();
config.onRecordingStart && config.onRecordingStart();
sampleRate = audioCtx.sampleRate;
}
function onMicrophoneError(e) {
console.log(e);
alert('Unable to access the microphone.');
}
function onAudioProcess(e) {
if (!recording) {
return;
}
recordedData.push(new Float32Array(e.inputBuffer.getChannelData(0)));
recordingLength += bufferSize;
self.recordingLength = recordingLength;
self.duration = new Date().getTime() - self.startDate.getTime();
config.onRecording && config.onRecording(self.duration);
}
function visualize(stream) {
var canvas = config.visualizer.element;
if (!canvas)
return;
var canvasCtx = canvas.getContext("2d");
var source = audioCtx.createMediaStreamSource(stream);
var analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
var bufferLength = analyser.frequencyBinCount;
var dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
function draw() {
// get the canvas dimensions
var width = canvas.width,
height = canvas.height;
// ask the browser to schedule a redraw before the next repaint
requestAnimationFrame(draw);
// clear the canvas
canvasCtx.fillStyle = config.visualizer.backcolor || '#fff';
canvasCtx.fillRect(0, 0, width, height);
if (!recording)
return;
canvasCtx.lineWidth = config.visualizer.linewidth || 2;
canvasCtx.strokeStyle = config.visualizer.forecolor || '#f00';
canvasCtx.beginPath();
var sliceWidth = width * 1.0 / bufferLength;
var x = 0;
analyser.getByteTimeDomainData(dataArray);
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = v * height / 2;
i === 0 ? canvasCtx.moveTo(x, y) : canvasCtx.lineTo(x, y);
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
}
draw();
}
function exportWav(config, callback) {
function inlineWebWorker(config, cb) {
var data = config.data.slice(0);
var sampleRate = config.sampleRate;
data = joinBuffers(data, config.recordingLength);
console.log(data);
function joinBuffers(channelBuffer, count) {
var result = new Float64Array(count);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function writeUTFBytes(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
var dataLength = data.length;
// create wav file
var buffer = new ArrayBuffer(44 + dataLength * 2);
var view = new DataView(buffer);
writeUTFBytes(view, 0, 'RIFF'); // RIFF chunk descriptor/identifier
view.setUint32(4, 44 + dataLength * 2, true); // RIFF chunk length
writeUTFBytes(view, 8, 'WAVE'); // RIFF type
writeUTFBytes(view, 12, 'fmt '); // format chunk identifier, FMT sub-chunk
view.setUint32(16, 16, true); // format chunk length
view.setUint16(20, 1, true); // sample format (raw)
view.setUint16(22, 1, true); // mono (1 channel)
view.setUint32(24, sampleRate, true); // sample rate
view.setUint32(28, sampleRate * 2, true); // byte rate (sample rate * block align)
view.setUint16(32, 2, true); // block align (channel count * bytes per sample)
view.setUint16(34, 16, true); // bits per sample
writeUTFBytes(view, 36, 'data'); // data sub-chunk identifier
view.setUint32(40, dataLength * 2, true); // data chunk length
// write the PCM samples
var index = 44;
for (var i = 0; i < dataLength; i++) {
view.setInt16(index, data[i] * 0x7FFF, true);
index += 2;
}
if (cb) {
return cb({
buffer: buffer,
view: view
});
}
postMessage({
buffer: buffer,
view: view
});
}
var webWorker = processInWebWorker(inlineWebWorker);
webWorker.onmessage = function(event) {
callback(event.data.buffer, event.data.view);
// release memory
URL.revokeObjectURL(webWorker.workerURL);
};
webWorker.postMessage(config);
}
function processInWebWorker(_function) {
var workerURL = URL.createObjectURL(new Blob([_function.toString(),
';this.onmessage = function (e) {' + _function.name + '(e.data);}'
], {
type: 'application/javascript'
}));
var worker = new Worker(workerURL);
worker.workerURL = workerURL;
console.log(worker);
return worker;
}
function renderRecording(workerURL, list) {
const worker_url = URL.createObjectURL(workerURL);
const li = document.createElement('li');
const audio = document.createElement('audio');
const anchor = document.createElement('a');
anchor.setAttribute('href', workerURL);
const now = new Date();
anchor.setAttribute(
'download',
`recording-${now.getFullYear()}-${(now.getMonth() + 1).toString().padStart(2, '0')}-${now.getDay().toString().padStart(2, '0')}--${now.getHours().toString().padStart(2, '0')}-${now.getMinutes().toString().padStart(2, '0')}-${now.getSeconds().toString().padStart(2, '0')}.webm`
);
anchor.innerText = 'Download';
audio.setAttribute('src', worker_url);
audio.setAttribute('controls', 'controls');
li.appendChild(audio);
li.appendChild(anchor);
list.appendChild(li);
}
}
and this Google Drive Script
var driveLink = require('stream');
module.exports.uploadFile = function(req) {
var file;
console.log("driveApi upload reached")
function blobToFile(req) {
file = req.body.blob
file.lastModifiedDate = new Date();
file.name = req.body.word;
return file;
}
var bufStream = new stream.PassThrough();
bufStream.end(file);
console.log(typeof 42);
var folderId = "Folder"; // Enter Folder Name
var fileMetadata = {
"name": req.body.word,
parents: [folderId]
}
var media = {
mimeType: "audio/mp3",
body: bufStream
}
drive.files.create({
auth: jwToken,
resource: fileMetadata,
media: media,
fields: "id"
}, function(err, file) {
if (err) {
console.error(err);
} else {
console.log("File Id: ", file.id);
}
console.log("driveApi upload accomplished")
});
}
I have tried a few different approaches to combining the two to make it so it automatically saves the .wav file to the Drive API but it does not work.
I believe I am either not merging the two scripts together the right way or I am missing something. Do I need to use V3 of the Google API?
If anyone could provide some guidance on how to merge the two properly that would be greatly appreciated. Thank you!
This code below is from a webpage that makes audio recording. I have a problem that I can not solve already tried several ways and nothing ... The problem is the following when I squeeze the code on a desktop it works normal, the audio comes out clean the real problem happens when I do the same test in android "6.0 , 7.0 ", it manages to do the whole process but the audio comes out with a voice failing, already tried to correct more I can not. Maybe someone who already has a bigger experience can help me.
NOTE: I use "https" on the page.
var audioContext = null;
var context = null;
var volume = null;
var audioInput = null;
var recorder = null;
var recordingLength = 0;
var leftchannel = [];
var rightchannel = [];
var bufferSize = 16384;
var sampleRate = 8000;
var requestStreamReadPermission = {
audio: true,
video:false
};
$scope.canRecordAudio = false;
// -
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// -
var writeUTFBytes = function(view, offset, string) {
var lng = string.length;
for (var i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
};
// -
var interleave = function(leftChannel, rightChannel) {
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
};
// -
var mergeBuffers = function(channelBuffer, recordingLength) {
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++) {
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
};
// -
var errorGetUserMedia = function(error) {
console.log(error);
GeneralMessageService.clean();
GeneralMessageService.addMessage($filter('translate')('chat.mobile.checkorwaitforconnection'));
};
// -
var successGetUserMedia = function(stream) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// recupera a taxa de amostragem atual a ser usada para a embalagem WAV
// sampleRate = context.sampleRate;
volume = context.createGain();
audioInput = context.createMediaStreamSource(stream);
audioInput.connect(volume);
//recorder = context.createScriptProcessor(bufferSize, 2, 2);
var numberOfInputChannels = 2;
var numberOfOutputChannels = 2;
if (context.createScriptProcessor) {
recorder = context.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels);
} else {
recorder = context.createJavaScriptNode(bufferSize, numberOfInputChannels, numberOfOutputChannels);
}
recorder.onaudioprocess = function(stream) {
if (!$scope.recordingAudio) {
return;
}
var left = stream.inputBuffer.getChannelData(0);
//var right = stream.inputBuffer.getChannelData(1);
leftchannel.push(new Float32Array(left));
//rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
};
volume.connect(recorder);
recorder.connect(context.destination);
$scope.canRecordAudio = true;
};
// -
if (!!navigator.getUserMedia) {
navigator.getUserMedia(
requestStreamReadPermission,
successGetUserMedia,
errorGetUserMedia
);
} else {
errorGetUserMedia('UserMedia is empty');
}
// -
var startRecording = function() {
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
};
// -
var stopRecording = function() {
var leftBuffer = mergeBuffers(leftchannel, recordingLength);
//var rightBuffer = mergeBuffers(rightchannel, recordingLength);
//var interleaved = interleave(leftBuffer, rightBuffer);
var buffer = new ArrayBuffer(44 + leftBuffer.length * 2);
var view = new DataView(buffer);
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + leftBuffer.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
//MONO
view.setUint16(22, 1, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 2, true);
view.setUint16(32, 2, true);
view.setUint16(34, 16, true);
//stereo
/*view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);*/
//end
writeUTFBytes(view, 36, 'data');
view.setUint32(40, leftBuffer.length * 2, true);
var lng = leftBuffer.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++) {
var s = Math.max(-1, Math.min(1, leftBuffer[i]));
view.setInt16(index, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
index += 2;
}
var blob = new Blob([view], {
type: 'audio/wav'
});
var url = (window.URL || window.webkitURL).createObjectURL(blob);
addAudioMessage(url);
//clear();
};
The ScriptProcessorNode doesn't work with OfflineContext.
It works in Chrome, Mozilla Firefox.
It doesn't work in Edge 25, Safari 10.
The issue is that the event is called once when the context OfflineContextis is processed.
Example on jsfiddle without BufferSource.
Example on jsfiddle based on MDN example with BufferSource.
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var current = 0;
var buffer_size = 4096;
var buffer_length = buffer_size * 10;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var scriptNode = audioCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNode.onaudioprocess = whiteNoise;
function whiteNoise(audioProcessingEvent) {
console.log('onaudioprocess', current);
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channel
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var outputData = outputBuffer.getChannelData(channel);
for (var sample = 0; sample < buffer_size; sample++) {
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1);
}
}
current += buffer_size;
if (current > buffer_length)
scriptNode.disconnect();
}
playButton.onclick = function() {
current = 0;
scriptNode.connect(audioCtx.destination);
}
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(1, buffer_length, 48000);
var scriptNodeOffline = offlineCtx.createScriptProcessor(buffer_size, 1, 1);
scriptNodeOffline.onaudioprocess = whiteNoise;
current = 0;
offlineCtx.oncomplete = function(e) {
console.log('rendered buffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
}
scriptNodeOffline.connect(offlineCtx.destination);
offlineCtx.startRendering();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
Update
Clicking on Render offline many times in Chrome and Firefox produced the same output.
Clicking on Render offline many times in Safari and Edge produced the different output.
Example on jsfiddle.
// Create AudioContext and buffer source
console.clear();
var playButton = document.querySelector('.play');
var playButtonOffline = document.querySelector('.play-offline');
var myBuffer = null;
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var source = audioCtx.createBufferSource();
// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
// load in an audio track via XHR and decodeAudioData
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'https://s3-ap-northeast-1.amazonaws.com/storage.cowrite.decodeapps.io/Materials/Media/Audio/59f2b85dd3aed-20171027-043853.mp3', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
},
function(e) {
"Error with decoding audio data" + e.err
});
}
request.send();
}
function addNoise(audioProcessingEvent) {
console.log("onaudioprocess")
// The input buffer is the song we loaded earlier
var inputBuffer = audioProcessingEvent.inputBuffer;
// The output buffer contains the samples that will be modified and played
var outputBuffer = audioProcessingEvent.outputBuffer;
// Loop through the output channels (in this case there is only one)
for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// Loop through the 4096 samples
for (var sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
// add noise to each output sample
outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
}
}
}
// Give the node a function to process audio events
scriptNode.onaudioprocess = addNoise;
getData();
// wire up play button
playButton.onclick = function() {
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);
source.start();
}
// When the buffer source stops playing, disconnect everything
source.onended = function() {
source.disconnect(scriptNode);
scriptNode.disconnect(audioCtx.destination);
}
// When the buffer source stops playing, disconnect everything
// wire up play button
playButtonOffline.onclick = function() {
var offlineCtx = new(window.OfflineAudioContext || window.webkitOfflineAudioContext)(2, myBuffer.length, myBuffer.sampleRate);
var scriptNodeOffline = offlineCtx.createScriptProcessor(4096, 1, 1);
var sourceOffline = offlineCtx.createBufferSource();
sourceOffline.buffer = myBuffer;
sourceOffline.onended = function() {
console.log('sourceOffline.onended');
sourceOffline.disconnect(scriptNodeOffline);
scriptNodeOffline.disconnect(offlineCtx.destination);
}
scriptNodeOffline.onaudioprocess = addNoise;
sourceOffline.connect(scriptNodeOffline);
scriptNodeOffline.connect(offlineCtx.destination);
sourceOffline.start();
offlineCtx.oncomplete = function(e) {
console.log('renderedBuffer', e.renderedBuffer.getChannelData(0).filter(f => f != 0).length);
listenRendered(e.renderedBuffer);
};
offlineCtx.startRendering();
}
var _audioCtx = new(window.AudioContext || window.webkitAudioContext)();
function listenRendered(buffer) {
var _source = _audioCtx.createBufferSource();
_source.buffer = buffer;
_source.connect(_audioCtx.destination);
_source.start();
}
<button class="play">
play
</button>
<button class="play-offline">
Render offline
</button>
These are bugs in Safari and Edge. A ScriptProcessorNode should work fine in an offline context. File bugs with Safari and Edge.
I'm trying to build an amplifier using a ScriptProcessorNode (yes, I know it is deprecated and yes, I'm fully aware of the createGainNode). The reason being is that the createGainNode is better suited to attenuate gain rather than boost it. I'm an audio engineer and I want to be able to clip my audio. I built a static version that handles a gain of at least 150 (as long as you limit the value of the floats after the boost). However, this new version, which is controlled by range input, just cuts the volume off completely.
I'm not getting any errors in the console; why is the volume cutting out?
<body>
<div id="button" style="background:#000;width:50px;height:50px;" onclick="connect()"></div>
<input id="range" type="range" min="1" max="128" value="1" oninput="myFunction(value)"/>
<script>
var context,
soundSource,
scriptNode,
buffer,
value = 1;
if(typeof AudioContext !== 'undefined') {
context = new AudioContext();
}else if(typeof webkitAudioContext !== 'undefined') {
context = new webkitAudioContext();
}else {
throw new Error('AudioContext not supported. :(');
}
function xhr() {
var request = new XMLHttpRequest();
request.open("GET", 'bass.wav', true);
request.responseType = 'arraybuffer';
request.onload = function() {
context.decodeAudioData(request.response, function onSuccess(decodedData) {
buffer = decodedData;
}, function onFailure() {
alert("Decoding the audio buffer failed");
});
}
request.send();
}
function connect() {
scriptNode = context.createScriptProcessor(256, 2, 2);
scriptNode.onaudioprocess = function(audioProcessingEvent) {
var input = audioProcessingEvent.inputBuffer;
var output = audioProcessingEvent.outputBuffer;
for(var channel = 0; channel < datum.numberOfChannels; channel++) {
var I = input.getChannelData(channel);
var O = output.getChannelData(channel);
for(var i = 0; i < input.length; i++) {
O[i] = I[i];
O[i] *= value;
}
}
}
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
soundSource.start();
}
connect();
function myFunction(val) {
value = val;
soundSource.disconnect();
scriptNode.disconnect();
soundSource.connect(scriptNode);
scriptNode.connect(context.destination);
}
</script>
EDIT
Instead of trying to build my own amplifier, I should have just used the createGainNode because it works exactly as intended.
EDIT 2
After playing with the createGainNode more, I figured out the original problem. In Chrome, the createGainNode works fine, but in Firefox, it didn't hold up as well as my amplification code. It will amplify the quieter part of the signals, but when the louder parts come in, the audio drops. Is there any work around for this? My amplification code:
function connect() {
soundSource = context.createBufferSource();
soundSource.buffer = buffer;
volume = context.createGain();
volume.gain.value = 1500;
soundSource.connect(volume);
volume.connect(context.destination);
soundSource.start();
}
function amplify() {
soundSource.stop();
var left = new Float32Array;
var right = new Float32Array;
left = buffer.getChannelData(0);
right = buffer.getChannelData(1);
for(var i = 0; i < left.length; i++) {
left[i] = left[i] * 1500;
if(left[i] > 0) {
left[i] = 128;
}
if(left[i] < 0) {
left[i] = -128;
}
right[i] = right[i] * 1500;
if(right[i] > 0) {
right[i] = 128;
}
if(right[i] < 0) {
right[i] = -128;
}
}
buffer.copyToChannel(left,0);
buffer.copyToChannel(right,1);
var amp = context.createBufferSource();
amp.buffer = buffer;
amp.connect(context.destination);
amp.start();
}
EDIT 3
I've opened a bug report on bugzilla here: https://bugzilla.mozilla.org/show_bug.cgi?id=1233821
I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don't know anything about ffmpeg and I've been doing lots of research I don't know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I'll be very thankfull
here is my full code:
the javascript page:
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value)
{
// if R is pressed, we start recording
if ( value == "record"){
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML="Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if ( value == "stop" ){
// we stop recording
recording = false;
document.getElementById('output').innerHTML="Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
// we interleave both channels together
var interleaved = interleave ( leftBuffer, rightBuffer );
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++){
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob ( [ view ], { type : 'audio/wav' } );
// let's save it locally
document.getElementById('output').innerHTML='Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e){
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){
if (!recording) return;
var left = e.inputBuffer.getChannelData (0);
var right = e.inputBuffer.getChannelData (1);
leftchannel.push (new Float32Array (left));
rightchannel.push (new Float32Array (right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c=document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}
and here is the html code:
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Simple Web Audio Recorder</title>
<script src="js/functions.js"></script>
<link href="css/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<input type="button" value="record" onclick="getVal(this.value)">
<input type="button" value="stop" onclick="getVal(this.value)">
<p id="output"></p>
<ul id="recordingList"></ul>
<canvas id="myCanvas" width="1000" height="325" style="display: block;"></canvas>
</body>
</html>
ffmpeg is a native library. Running it through Emscripten might not be be practical in everyday web development yet. For utilizing Emcscripten and JS generated by it some advanced JavaScript skills are needed.
Instead, I suggest you upload your audio files to the server-side and convert them there.
Record the voice as blob using getUserMedia()
Upload the recorded blob to the server
On the server-side pick your preferable web programming framework
The web programming framework accepts the upload and stores the file on the server
The web programming framework runs a ffmpeg (command line) which processes the file
The user can download the processed file
Here is Python example for converting uploaded MP3 files to 48kbit AAC files:
def create_prelisten_aac(mp3, aac):
"""
Run en-code for a single file
Do 48 kbit files for prelisten.
"""
cmdline = [ FFMPEG, '-y', '-i', mp3, '-acodec', 'libfaac', '-ar', '22050', '-ac', '1', '-ab', '48000', aac ]
return subprocess.call(cmdline)
Here you can find the full source code of this application:
https://github.com/miohtama/LibertyMusicStore