AnalyserNode.getFloatFrequencyData always returns -Infinity - javascript

Alright, so I'm trying to determine the intensity (in dB) on samples of an audio file which is recorded by the user's browser.
I have been able to record it and play it through an HTML element.
But when I try to use this element as a source and connect it to an AnalyserNode, AnalyserNode.getFloatFrequencyData always returns an array full of -Infinity, getByteFrequencyData always returns zeroes, getByteTimeDomainData is full of 128.
Here's my code:
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var analyser = audioCtx.createAnalyser();
var bufferLength = analyser.frequencyBinCount;
var data = new Float32Array(bufferLength);
mediaRecorder.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
var audioURL = window.URL.createObjectURL(blob);
// audio is an HTML audio element
audio.src = audioURL;
audio.addEventListener("canplaythrough", function() {
source = audioCtx.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(audioCtx.destination);
analyser.getFloatFrequencyData(data);
console.log(data);
});
}
Any idea why the AnalyserNode behaves like the source is empty/mute? I also tried to put the stream as source while recording, with the same result.

I ran into the same issue, thanks to some of your code snippets, I made it work on my end (the code bellow is typescript and will not work in the browser at the moment of writing):
audioCtx.decodeAudioData(this.result as ArrayBuffer).then(function (buffer: AudioBuffer) {
soundSource = audioCtx.createBufferSource();
soundSource.buffer = buffer;
//soundSource.connect(audioCtx.destination); //I do not need to play the sound
soundSource.connect(analyser);
soundSource.start(0);
setInterval(() => {
calc(); //In here, I will get the analyzed data with analyser.getFloatFrequencyData
}, 300); //This can be changed to 0.
// The interval helps with making sure the buffer has the data
Some explanation (I'm still a beginner when it comes to the Web Audio API, so my explanation might be wrong or incomplete):
An analyzer needs to be able to analyze a specific part of your sound file. In this case I create a AudioBufferSoundNode that contains the buffer that I got from decoding the audio data. I feed the buffer to the source, which eventually will be able to be copied inside the Analyzer. However, without the interval callback, the buffer never seems to be ready and the analysed data contains -Inifinity (which I assume is the absence of any sound, as it has nothing to read) at every index of the array. Which is why the interval is there. It analyses the data every 300ms.
Hope this helps someone!

You need to fetch the audio file and decode the audio buffer.
The url to the audio source must also be on the same domain or have have the correct CORS headers as well (as mentioned by Anthony).
Note: Replace <FILE-URI> with the path to your file in the example below.
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var analyser = audioCtx.createAnalyser();
var button = document.querySelector('button');
var freqs;
var times;
button.addEventListener('click', (e) => {
fetch("<FILE-URI>", {
headers: new Headers({
"Content-Type" : "audio/mpeg"
})
}).then(function(response){
return response.arrayBuffer()
}).then((ab) => {
audioCtx.decodeAudioData(ab, (buffer) => {
source = audioCtx.createBufferSource();
source.connect(audioCtx.destination)
source.connect(analyser);
source.buffer = buffer;
source.start(0);
viewBufferData();
});
});
});
// Watch the changes in the audio buffer
function viewBufferData() {
setInterval(function(){
freqs = new Uint8Array(analyser.frequencyBinCount);
times = new Uint8Array(analyser.frequencyBinCount);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 2048;
analyser.getByteFrequencyData(freqs);
analyser.getByteTimeDomainData(times);
console.log(freqs)
console.log(times)
}, 1000)
}

If the source file from a different domain? That would fail in createMediaElementSource.

Related

Why AudioBufferSourceNodes stacks on play?

Basicly, I'm trying to build and play audio data from bytes, that comes from WS sockets.
Detailed:
I have simple WS server written in Django-Channels, that on connect returns me splitted audio file in blob object with 6144 bytes of each chunk. Next, I want to decode this blob data and turn it into sound:
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var play = document.querySelector('#play');
var audioQueue = []
const chatSocket = new WebSocket(
'ws://'
+ window.location.host
+ 'audio-stream-test'
+ '/'
);
chatSocket.onmessage = function(e) {
e.data.arrayBuffer().then(buffer => {
audioCtx.decodeAudioData(buffer, (x)=>{
source = audioCtx.createBufferSource();
source.buffer = x;
source.connect(audioCtx.destination);
source.loop = false;
audioQueue.push(source)
})
})
}
After WS sent all the data, it closes on server side. The last thing is to play queued buffers from audioQueue array:
play.onclick = function() {
var playOffset;
for (let [bufferCount, buffer] of audioQueue.entries()) {
if (bufferCount == 0) {
playOffset = 0
} else {
playOffset = audioQueue[bufferCount-1].buffer.duration
}
buffer.start(when=playOffset)
}
}
Want to clarify about this line: playOffset = audioQueue[bufferCount-1].buffer.duration. I think, i'm writed it right because I want to play new buffer at the end of old (already played) one.
For me, as server-side developer, it seems like it should work fine.
But, the main problem is: all buffers from audioQueue array is played at once. IDK what I'm doing wrong. Hoping for youre help :)
The song
You need to start each AudioBufferSourceNode in relation to the currentTime of the AudioContext.
play.onclick = function() {
audioQueue.reduce((startTime, audioBufferSourceNode) => {
audioBufferSourceNode.start(startTime);
return startTime + audioBufferSourceNode.buffer.duration;
}, audioContext.currentTime);
};
The code above will loop through all nodes in the audioQueue. It computes the startTime for each AudioBufferSourceNode by accumulating the durations of the previous nodes based on the currentTime of the AudioContext.

Web Audio API getFloatFrequencyData function setting Float32Array argument data to array of -Infinity values

I'm currently playing around with the Web Audio API in Chrome (60.0.3112.90) to possibly build a sound wave of a given file via FilerReader, AudioContext, createScriptProcessor, and createAnalyser. I have the following code:
const visualize = analyser => {
analyser.fftSize = 256;
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Float32Array(bufferLength);
analyser.getFloatFrequencyData(dataArray);
}
loadAudio(file){
// creating FileReader to convert audio file to an ArrayBuffer
const fileReader = new FileReader();
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
fileReader.addEventListener('loadend', () => {
const fileArrayBuffer = fileReader.result;
let audioCtx = new (window.AudioContext || window.webkitAudioContext)();
let processor = audioCtx.createScriptProcessor(4096, 1, 1);
let analyser = audioCtx.createAnalyser();
analyser.connect(processor);
let data = new Float32Array(analyser.frequencyBinCount);
let soundBuffer;
let soundSource = audioCtx.createBufferSource();
// loading audio track into buffer
audioCtx.decodeAudioData(
fileArrayBuffer,
buffer => {
soundBuffer = buffer;
soundSource.buffer = soundBuffer;
soundSource.connect(analyser);
soundSource.connect(audioCtx.destination);
processor.onaudioprocess = () => {
// data becomes array of -Infinity values after call below
analyser.getFloatFrequencyData(data);
};
visuaulize(analyser);
},
error => 'error with decoding audio data: ' + error.err
);
});
fileReader.readAsArrayBuffer(file);
}
Upon loading a file, I get all the way to analyser.getFloatFrequencyData(data). Upon reading the Web audio API docs, it says that the parameter is:
The Float32Array that the frequency domain data will be copied to.
For any sample which is silent, the value is -Infinity.
In my case, I have both an mp3 and wav file I'm using to test this and after invoking analyser.getFloatFrequency(data), both files end up giving me data which becomes an array of `-Infinity' values.
This may be due to my ignorance with Web Audio's API, but my question is why are both files, which contain loud audio, giving me an array that represents silent samples?
The Web Audio AnalyserNode is only designed to work in realtime. (It used to be called RealtimeAnalyser.) Web Audio doesn't have the ability to do analysis on buffers; take a look at another library, like DSP.js.
Instead of:
soundSource.connect(analyser);
soundSource.connect(audioCtx.destination);
try:
soundSource.connect(analyser);
analyser.connect(audioCtx.destination);
Realising I sould do a source ==> anlalsyser ==>> destination chain solved this problem when I encountered it.

How to draw analyzer from audio url?

How to draw analyzer from audio url?
I am getting the audio url (http://api.server.com/uploads/files/ecae64b511b1266fa3930731ec379d2dcdcc7546.wav) from an API server. I want to draw this sound on canvas, following function works on the blob object (recorded from suer), but it does not work with the url:
$window.AudioContext = $window.AudioContext || $window.webkitAudioContext;
vm.audioContext = new AudioContext();
function gotStream(stream) {
vm.inputPoint = vm.audioContext.createGain();
vm.realAudioInput = vm.audioContext.createMediaStreamSource(stream);
vm.audioInput = vm.realAudioInput;
vm.audioInput.connect(vm.inputPoint);
// audioInput = convertToMono( input );
vm.analyserNode = vm.audioContext.createAnalyser();
vm.analyserNode.fftSize = 2048;
vm.inputPoint.connect(vm.analyserNode);
vm.audioRecorder = new Recorder(vm.inputPoint);
var zeroGain = vm.audioContext.createGain();
zeroGain.gain.value = 0.0;
vm.inputPoint.connect(zeroGain);
zeroGain.connect(vm.audioContext.destination);
updateAnalysers();
}
This is probably a CORS issue if the file is from a different origin from the code. If you don't fix this, MediaStreamSource will probably just output zeroes.

Read samples from wav-file

I'm trying to make a webpage in html5 which stores sample-data from a wav-file in an array. Is there any way to get the sample-data with javascript?
I'm using a file-input to select the wav-file.
In the javascript I already added:
document.getElementById('fileinput').addEventListener('change', readFile, false);
but I have no idea what to do in readFile.
EDIT:
I tried to get the file in an ArrayBuffer, pass it to the decodeAudioData method and get a typedArraybuffer out of it.
This is my code:
var openFile = function(event) {
var input = event.target;
var audioContext = new AudioContext();
var reader = new FileReader();
reader.onload = function(){
var arrayBuffer = reader.result;
console.log("arrayBuffer:");
console.log(arrayBuffer);
audioContext.decodeAudioData(arrayBuffer, decodedDone);
};
reader.readAsArrayBuffer(input.files[0]);
};
function decodedDone(decoded) {
var typedArray = new Uint32Array(decoded, 1, decoded.length);
console.log("decoded");
console.log(decoded);
console.log("typedArray");
console.log(typedArray);
for (i=0; i<10; i++)
{
console.log(typedArray[i]);
}
}
The elements of typedArray are all 0. Is my way of creating the typedArray wrong or did I do something else wrong on?
EDIT:
I finally got it. This is my code:
var openFile = function(event) {
var input = event.target;
var audioContext = new AudioContext();
var reader = new FileReader();
reader.onload = function(){
var arrayBuffer = reader.result;
console.log("arrayBuffer:");
console.log(arrayBuffer);
audioContext.decodeAudioData(arrayBuffer, decodedDone);
};
reader.readAsArrayBuffer(input.files[0]);
};
function decodedDone(decoded) {
var typedArray = new Float32Array(decoded.length);
typedArray=decoded.getChannelData(0);
console.log("typedArray:");
console.log(typedArray);
}
Thanks for the answers!
You'll need to learn a lot about Web APIs to accomplish that, but in the end it's quite simple.
Get the file contents in an ArrayBuffer with the File API
Pass it to the Web Audio API's decodeAudioData method.
Get a typed ArrayBuffer with the raw samples you wanted.
Edit: If you want to implement an equalizer, you're wasting your time, there's a native equalizer node in the Audio API. Depending on the length of your wave file it might be better not to load it all in memory and instead to just create a source that reads from it and connect that source to an equalizer node.
Here's a simple code example to get a Float32Array from a wav audio file in JavaScript:
let audioData = await fetch("https://example.com/test.wav").then(r => r.arrayBuffer());
let audioCtx = new AudioContext({sampleRate:44100});
let decodedData = await audioCtx.decodeAudioData(audioData); // audio is resampled to the AudioContext's sampling rate
console.log(decodedData.length, decodedData.duration, decodedData.sampleRate, decodedData.numberOfChannels);
let float32Data = decodedData.getChannelData(0); // Float32Array for channel 0

Firefox's MediaRecorder interface only providing new video data every two seconds

I'm trying to send a video stream obtained via WebRTC's getUserMedia() method to the server for additional processing. Latency is important, as I wish to detect changes in the video stream and update the client immediately. For this particular use, a Firefox-only solution is acceptable, and so I'm investigating the MediaRecorder interface.
I've put together a simple test case, included below. There are no errors, and the ondataavailable callback is called every 500ms, as expected. However, for three out of four of these calls, the size of the data provided is zero. This suggests to me that the data is being grouped into chunks of about two seconds (possibly due to constraints of the video encoding being used).
Is it possible to get MediaRecorder to provide data at a finer granularity? If not, what is the best way to get video data from the userMedia stream to the server with low latency? An interface specific to Chrome or Firefox would be fine, but one that worked in both would be even better.
<html>
<body>
<h1>MediaRecorder Test</h1>
<video id="video" width="640" style="border: 1px solid black"></video>
</body>
</html>
<script>
// The variable that holds the video stream
var mediastream = null;
// Start video capture (and provide a way to stop it)
navigator.mozGetUserMedia ( { video: true, audio: false },
function(stream_arg) {
mediastream = stream_arg;
var vendorURL = window.URL || window.webkitURL;
video.src = vendorURL.createObjectURL(mediastream);
video.play();
recordStream();
},
function(err) { console.log("Error starting video stream: " + err); }
);
// Record the stream
var recorder = null;
function recordStream() {
recorder = new MediaRecorder(mediastream);
recorder.ondataavailable = function(ev) {
console.log("Got: "+ev.data.size);
};
recorder.start(500);
}
</script>
The 500ms interval you're passing to MediaRecorder is advisory. The codec used may require larger chunks of data/time to work with. It's probably giving you data as fast as it can.
If you need low latency, MediaRecorder is the wrong tool for the job. A regular WebRTC call will use codec settings that optimize for latency over quality. I've heard of folks recording WebRTC server-side, but I do not know of anything open source to do this off the top of my head.
Perhaps some day in the future, the MediaRecorder API will allow us to choose codec parameters and this won't be an issue.
You may use another approach: each N milliseconds send video to (optionally hidden) canvas which allows to to get base64 representation of an image. Thus, you will get an array of base64 frames. Now you have 2 options:
send each frame to server in base64 format. Since base64 is a regular string, this is the easiest way;
transform each base64 frame to Blob and send it to server via FormData. In my case uploading this way is twice as fast as first one.
Below you can see my example (performing the second option). This example is quite big but every part of it is important.
index.html:
<!DOCTYPE html>
<html>
<head>
<script src="record-test.js"></script>
</head>
<body>
<video id="video"></video>
<canvas id="canvas" style="display:none;"></canvas>
<input type="button" id="stopRecordBtn" value="Stop recording">
</body>
</html>
record-test.js:
(function() {
'use strict';
//you can play with these settings
var FRAME_INTERVAL_MS = 500; //take snapshot each 500 ms
var FRAME_WIDTH = 320; //width and
var FRAME_HEIGHT = 240; //height of resulting frame
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
window.URL = window.URL || window.webkitURL;
var video, canvas, ctx;
var mediaStream;
var videoRecordItv;
var base64Frames = [];
var init = function() {
video = document.getElementById('video');
canvas = document.getElementById('canvas'); //use canvas to capture a frame and convert it to base64 data
canvas.width = FRAME_WIDTH;
canvas.height = FRAME_HEIGHT;
ctx = canvas.getContext('2d');
var stopBtn = document.getElementById('stopRecordBtn');
stopBtn.addEventListener('click', stopRecording);
navigator.getUserMedia({video: true}, onGotStream, function(e) {console.log(e);});
}
var onGotStream = function(stream) {
mediaStream = stream;
video.src = URL.createObjectURL(mediaStream);
video.play();
videoRecordItv = setInterval(function() { //capture a frame each FRAME_INTERVAL_MS milliseconds
var frame = getBase64FrameFromVideo();
base64Frames.push(frame);
}, FRAME_INTERVAL_MS);
}
var getBase64FrameFromVideo = function() {
ctx.drawImage(video, 0, 0, FRAME_WIDTH, FRAME_HEIGHT);
//a canvas snapshot looks like data:image/jpeg;base64,ACTUAL_DATA_HERE
//we need to cut out first 22 characters:
var base64PrefixLength = 'data:image/jpeg;base64,'.length;
return canvas.toDataURL('image/jpeg').slice(base64PrefixLength);
}
var stopRecording = function() {
mediaStream && mediaStream.stop && mediaStream.stop();
mediaStream = null;
clearInterval(videoRecordItv); //stop capturing video
uploadFramesToServer();
}
var uploadFramesToServer = function() {
var sid = Math.random(); //generate unique id
var curFrameIdx = 0; //current frame index
(function postFrame() {
console.log('post frame #' + curFrameIdx);
var base64Frame = base64Frames[curFrameIdx];
var blobFrame = base64ToBlob(base64Frame, 'image/jpeg');
var formData = new FormData;
formData.append('frame', blobFrame, 'upload.jpg');
formData.append('sid', sid);
var xhr = new XMLHttpRequest();
//post a single frame to /postFrame url with multipart/form-data enctype
//on the server you get "sid" param and "frame" file as you would post a file with regular html form
xhr.open('POST', '/postFrame', true);
xhr.onload = function(e) {
console.log(this.response);
if (base64Frames[++curFrameIdx]) {
postFrame(); //post next frame
} else {
//DONE!
console.log('finish post frames');
}
};
xhr.send(formData);
})();
}
var base64ToBlob = function(base64Data, contentType, sliceSize) {
contentType = contentType || '';
sliceSize = sliceSize || 512;
var byteCharacters = atob(base64Data);
var byteArrays = [];
for (var offset = 0; offset < byteCharacters.length; offset += sliceSize) {
var slice = byteCharacters.slice(offset, offset + sliceSize);
var byteNumbers = new Array(slice.length);
for (var i = 0; i < slice.length; i++) {
byteNumbers[i] = slice.charCodeAt(i);
}
var byteArray = new Uint8Array(byteNumbers);
byteArrays.push(byteArray);
}
return new Blob(byteArrays, {type: contentType});
}
document.addEventListener('DOMContentLoaded', init);
})();
On the server-side you still have to perform some actions, for example, create video from these frames with FFmpeg.
This approach works in both Chrome and Firefox.
Hope this helps. Sorry for my English and good luck!

Categories