NodeJS: Capturing a stereo PCM wave stream into mono AudioBuffer - javascript

I'm recording audio from nodejs using node-microphone (which is just a javascript interface for arecord), and want to store the stream chunks in an AudioBuffer using web-audio-api (which is a nodejs implementation of the Web Audio API).
My audio source has two channels while my AudioBuffer has only one (in purpose).
This is my working configuration for recording audio with arecord through my USB sound card (I'm using a Raspberry pi 3 running on Raspbian buster):
arecord -D hw:1,0 -c 2 -f S16_LE -r 44100
Running this command with an output path and playing the resulting wav file with aplay works just fine. So node-microphone is able to record audio with these parameters, and at the end I get a nodejs readable stream flowing wave data.
But
I'm struggling doing the bridge from the stream chunks (Buffer instances) to the AudioBuffer. More precisely; I'm not sure of the format of the incoming data, not sure of the destination format, and not sure of how I would do the conversion whatever:
The stream chunks are Buffers so they also are Uint8Arrays. Regarding my configuration, I guess they are binary representations of 16 bits signed integers (little endian, I don't know what it means).
The AudioBuffer holds multiple buffers (one per channel, so only one in my case) that I can access as Float32Arrays by calling AudioBuffer.prototype.getChannelData(). MDN also says:
The buffer contains data in the following format: non-interleaved IEEE754 32-bit linear PCM with a nominal range between -1 and +1, that is, 32bits floating point buffer, with each samples between -1.0 and 1.0.
The point is to find what I have to extract from the incoming Buffers and how I should transform it so it's suitable for the Float32Array destination (and remains valid wave data), knowing that the audio source is stereo and the AudioBuffer isn't.
My best contender so far was the Buffer.prototype.readFloatLE() method whose name looks like it would solve my problem, but this wasn't a success (just noise).
My first try (before doing research) was just to naively copy buffer data to Float32Array and interleaving indexes to handle stereo/mono conversion. Obviously it mostly produced noise but I could hear some of the sound I recorded (incredibly distorted but surely present) so I guess I should mention that.
This is a simplified version of my naive try (I'm aware this is not meant to work well, I just include it in my question as a base of discussion):
import { AudioBuffer } from 'web-audio-api'
import Microphone from 'node-microphone'
const rate = 44100
const channels = 2 // Number of source channels
const microphone = new Microphone({ // These parameters result to the arecord command above
channels,
rate,
device: 'hw:1,0',
bitwidth: 16,
endian: 'little',
encoding: 'signed-integer'
})
const audioBuffer = new AudioBuffer(
1, // 1 channel
30 * rate, // 30 seconds buffer
rate
})
const chunks = []
const data = audioBuffer.getChannelData(0) // This is the Float32Array
const stream = microphone.startRecording()
setTimeout(() => microphone.stopRecording(), 5000) // Recording for 5 seconds
stream.on('data', chunk => chunks.push(chunk))
stream.on('close', () => {
chunks.reduce((offset, chunk) => {
for (var index = 0; index < chunk.length; index += channels) {
let value = 0
for (var channel = 0; channel < channels; channel++) {
value += chunk[index + channel]
}
data[(offset + index) / channels] = value / channels // Average value from the two channels
}
return offset + chunk.length // Since data comes as chunks, this offsets AudioBuffer's index
}, 0)
})
I would be really grateful if you could help :)

So the input stereo signal is coming as 16 bits signed integers, interleaving left and right channels, meaning that the corresponding buffers (8 bits unsigned integers) have this format for a single stereo sample:
[LEFT ] 8 bits (LSB)
[LEFT ] 8 bits (MSB)
[RIGHT] 8 bits (LSB)
[RIGHT] 8 bits (MSB)
Since arecord is configured with little endian format, the Least Significant Byte (LSB) comes first, and the Most Significant Byte (MSB) comes next.
The AudioBuffer single channel buffer, represented by a Float32Array, expects values between -1 and 1 (one value per sample).
So to map values from the input Buffer to the destination Float32Array, I had to use the Buffer.prototype.readInt16LE(offset) method incrementing the bytes offset parameter by 4 each sample (2 left bytes + 2 right bytes = 4 bytes), and interpolating input values from range [-32768;+32768] (16 bits signed integer range) to range [-1;+1]:
import { AudioBuffer } from 'web-audio-api'
import Microphone from 'node-microphone'
const rate = 44100
const channels = 2 // 2 input channels
const microphone = new Microphone({
channels,
rate,
device: 'hw:1,0',
bitwidth: 16,
endian: 'little',
encoding: 'signed-integer'
})
const audioBuffer = new AudioBuffer(
1, // 1 channel
30 * rate, // 30 seconds buffer
rate
})
const chunks = []
const data = audioBuffer.getChannelData(0)
const stream = microphone.startRecording()
setTimeout(() => microphone.stopRecording(), 5000) // Recording for 5 seconds
stream.on('data', chunk => chunks.push(chunk))
stream.on('close', () => {
chunks.reduce((offset, chunk) => {
for (var index = 0; index < chunk.length; index += channels + 2) {
let value = 0
for (var channel = 0; channel < channels; channel++) {
// Iterates through input channels and adds the values
// of all the channel so we can compute the
// average value later to reduce them into a mono signal
// Multiplies the channel index by 2 because
// there are 2 bytes per channel sample
value += chunk.readInt16LE(index + channel * 2)
}
// Interpolates index according to the number of input channels
// (also divides it by 2 because there are 2 bytes per channel sample)
// and computes average value as well as the interpolation
// from range [-32768;+32768] to range [-1;+1]
data[(offset + index) / channels / 2] = value / channels / 32768
}
return offset + chunk.length
}, 0)
})

Related

Remove high frequency sound from streaming audio node js

I have a small app that accepts incoming audio stream from the internet and I'm trying to find the frequency of a tone or continuous beep. At the time of the tone / beep it is the only thing that would be playing. The rest of the audio is either silence or talking. I'm using the node-pitchfinder npm module to find the tone and when I use a sample audio clip I made of 2,000Hz the app prints out the frequency within one or two Hz. When I pull the audio stream online I keep getting results like 17,000 Hz. My guess is that there is some "noise" in the audio signal and that's what the node-pitchfinder module is picking up.
Is there any way I can filter out that noise in real time to get an accurate frequency?
The streaming audio file is: http://relay.broadcastify.com/fq85hty701gnm4z.mp3
Code below:
const fs = require('fs');
const fsa = require('fs-extra');
const Lame = require('lame');
const Speaker = require('speaker');
const Volume = require('pcm-volume');
const Analyser = require('audio-analyser')
const request = require('request')
const Chunker = require('stream-chunker');
const { YIN } = require('node-pitchfinder')
const detectPitch = YIN({ sampleRate: 44100})
//const BUFSIZE = 64;
const BUFSIZE = 500;
var decoder = new Lame.Decoder();
decoder.on('format', function(format){onFormat(format)});
var chunker = Chunker(BUFSIZE);
chunker.pipe(decoder);
var options = {
url: 'http://relay.broadcastify.com/fq85hty701gnm4z.mp3',
headers: {
"Upgrade-Insecure-Requests": 1,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
}
}
var audio_stream = request(options);
//var audio_stream = fs.createReadStream('./2000.mp3');
audio_stream.pipe(chunker);
function onFormat(format)
{
//if (volume == "undefined")
volume = 1.0;
vol = new Volume(volume);
speaker = new Speaker(format);
analyser = createAnalyser(format);
analyser.on('data', sample);
console.log(format);
vol.pipe(speaker);
vol.pipe(analyser);
decoder.pipe(vol);
vol.setVolume(volume);
}
function createAnalyser(format)
{
return new Analyser({
fftSize: 8,
bufferSize: BUFSIZE,
'pcm-stream': {
channels: format.channels,
sampleRate: format.sampleRate,
bitDepth: format.bitDepth
}
});
}
var logFile = 'log.txt';
var logOptions = {flag: 'a'};
function sample()
{
if (analyser) {
const frequency = detectPitch(analyser._data)
console.log(frequency)
}
}
My goal is to find the most dominant audio frequency in a chunk of data so I can figure out the tone.
I found some code that supposedly does this with python
def getFreq( pkt ):
#Use FFT to determine the peak frequency of the last chunk
thefreq = 0
if len(pkt) == bufferSize*swidth:
indata = np.array(wave.struct.unpack("%dh"%(len(pkt)/swidth), pkt))*window
# filter out everything outside of our bandpass Hz
bp = np.fft.rfft(indata)
minFilterBin = (bandPass[0]/(sampleRate/bufferSize)) + 1
maxFilterBin = (bandPass[1]/(sampleRate/bufferSize)) - 1
for i in range(len(bp)):
if i < minFilterBin:
bp[i] = 0
if i > maxFilterBin:
bp[i] = 0
# Take the fft and square each value
fftData = abs(bp)**2
# find the maximum
which = fftData[1:].argmax() + 1
# Compute the magnitude of the sample we found
dB = 10*np.log10(1e-20+abs(bp[which]))
#avgdB = 10*np.log10(1e-20+abs(bp[which - 10:which + 10].mean()))
if dB >= minDbLevel:
# use quadratic interpolation around the max
if which != len(fftData)-1:
warnings.simplefilter("error")
try:
y0, y1, y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
except RuntimeWarning:
return(-1)
# find the frequency and output it
warnings.simplefilter("always")
thefreq = (which + x1) * sampleRate/bufferSize
else:
thefreq = which * sampleRate/bufferSize
else:
thefreq = -1
return(thefreq)
Original answer:
I can not provide you with a solution but (hopefully) give you enough advice to solve the problem.
I would recommend that you save a part of the stream you want to analyze to a file and then take a look at the file with a spectrum analyzer (e.g. with Audacity). This allows you to determine if the 17kHz signal is present in the audio stream.
If the 17 kHz signal is present in the audio stream then you can filter the audio stream with a low pass filter (e.g. audio-biquad with type lowpass and frequency at somewhere above 2 kHz).
If the 17 kHz signal is not present in the audio then you could try to increase the buffer size BUFSIZE (currently set to 500 in your code). In the example on node-pitchfinder's GitHub page they use a complete audio file for pitch detection. Depending on how the pitch detection algorithm is implemented the result might be different for larger chunks of audio data (i.e. a few seconds) compared to very short chunks (500 samples is around 11 ms at sample rate 44100). Start with a large value for BUFSIZE (e.g. 44100 -> 1 second) and see if it makes a difference.
Explanation of the python code: The code uses FFT (fast fourier transform) to find out which frequencies are present in the audio signal and then searches for the frequency with the highest value. This usually works well for simple signals like a 2 kHz sine wave. You could use dsp.js which provides an FFT implementation if you want to implement it in javascript. However, it is quite a challenge to get this right without some knowledge of digital signal processing theory.
As a side note: the YIN algorithm does not use FFT, it is based on autocorrelation.
Update
The following script uses the fft data of audio-analyser and searches for the maximum frequency. This approach is very basic and only works well for signals where just one frequency is very dominant. The YIN algorithm is much better suited for pitch detection than this example.
const fs = require('fs');
const Lame = require('lame');
const Analyser = require('audio-analyser')
const Chunker = require('stream-chunker');
var analyser;
var fftSize = 4096;
var decoder = new Lame.Decoder();
decoder.on('format', format => {
analyser = createAnalyser(format);
decoder.pipe(analyser);
analyser.on('data', processSamples);
console.log(format);
});
var chunker = Chunker(fftSize);
var audio_stream = fs.createReadStream('./sine.mp3');
audio_stream.pipe(chunker);
chunker.pipe(decoder);
function createAnalyser(format) {
return new Analyser({
fftSize: fftSize,
frequencyBinCount: fftSize / 2,
sampleRate: format.sampleRate,
channels: format.channels,
bitDepth: format.bitDepth
});
}
function processSamples() {
if (analyser) {
var fftData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(fftData);
var maxBin = fftData.indexOf(Math.max(...fftData));
var thefreq = maxBin * analyser.sampleRate / analyser.fftSize;
console.log(maxBin + " " + thefreq);
}
}

How to specify bit depth and sample rate when recording microphone using mediaRecorder in javascript?

In general, a standard CD's duration is 74 min, sample rate is 44.1KHZ, left and right two-channel (stereo). Its capacity can be calculated using the following formula:
(44100*16*2)/8*(74*60)=783216000bytes
Does 16 mean the bit depth?
Below is the code snippet that I use to do the recording.
const options = {
mimeType: 'audio/webm;codecs=pcm',
audioBitsPerSecond: 128
};
const recordedChunks = []
const mediaRecorder = new MediaRecorder(stream, options)
mediaRecorder.addEventListener('dataavailable', function (e) {
if (e.data.size > 0) {
recordedChunks.push(e.data)
}
})
mediaRecorder.addEventListener('stop', function () {
fileReader.readAsDataURL(new Blob(recordedChunks))
})
mediaRecorder.start()
After searching the documentation, I didn't find the bit depth, channel and sample rate option. Any ideas?
You can specify the sample rate, channel count, and bit depth by modifying a MediaTrackConstraints object, then applying it to a MediaStreamTrack using MediaStreamTrack.applyConstraint(constraints). The MediaStreamTrack should be accessible through the MediaStream going into your MediaRecorder.
(The 16 in your equation does refer to the bit-depth)
(44100 samples * 16 bits per sample * 2 channels of audio) / 8 bits per byte * (74 minutes * 60 seconds per minute) = 783216000 bytes

javascript: smallest JSON.stringify for Float32Array?

FireFox 46.0.1: I am using 3rd-party (easyrtc) software to send 15KB chunks of Float32Arrays between peers. Easyrtc insists that the data be JSON-able. Unfortunately, JSON.stringify yields a string more than twice as long as the original data: 16384 bytes of data becomes a string of length 35755. Below is my test code followed by the console output. What if anything can I do to reduce the stringify'd size? Is there a way to send the values only (no keys)? Can I use the 'replacer' argument to send only the values, and if so, don't I need to use a replacer on the corresponding JSON.parse on the receiving end?
var g_testBufferNBytes = 4096 * 4;
var g_testBuffer = new ArrayBuffer(g_testBufferNBytes);
var g_testBufferView = new Float32Array(g_testBuffer);
console.log("array byte length " + g_testBuffer.byteLength);
console.log("view byte length " + g_testBufferView.byteLength);
var j = JSON.stringify(g_testBufferView);
console.log("j length " + j.length);
var newBuf = JSON.parse(j);
console.log("newBuf length " + Object.keys(newBuf).length);
CONSOLE:
array byte length 16384
view byte length 16384
j length 35755
newBuf length 4096
Yes
ES6: Assume that your data are in let f32 = g_testBufferView (array Float32Array) ) - whe can save it as JSON array in at leas 4 ways:
// code
let f32json = JSON.stringify(f32);
let f32jsonArr = JSON.stringify(Array.from(f32));
let f32base64 = btoa(String.fromCharCode(...(new Uint8Array(f32.buffer))));
let f32base128 = ... // not trivial, look below
// decode
let df32json = new Float32Array(Object.values(JSON.parse(f32json)));
let df32jsonArr = new Float32Array(JSON.parse(f32jsonArr));
let df32base64 = new Float32Array(new Uint8Array([...atob(f32base64)].map(c => c.charCodeAt(0))).buffer);
let df32base128 = ... // not trivial, look below
Note that Object.values return values sorted by numeric keys (look here).
Here is working example. You can also use base128 do decode but I not use in this example (to not complicate it) - more details here.
If your Float32Array- f32 has 4096 elements equals to 0.3 then:
f32 has 16384 bytes,
f32json (j from your question) has 109483 bytes (which is >6x bigger than f32)
f32jsonArr has 81921 bytes (which is >5x bigger than f32)
f32base64 has 21848 bytes(which is ~1.3x bigger than f32)
f32base128 has 18725 bytes (whis is <1.15x bigger than f32) but chrome will send ~2x bigger request (depends on input data)
If your Float32Array- f32 has 4096 elements equals integer from 1 to 9 then:
f32 has 16384 bytes - CONST,
f32json (j from your question) has 35755 bytes (which is >2x bigger than f32)
f32jsonArr has 8193 bytes (which is 2x SMALLER (sic!) than f32)
f32base64 has 21848 bytes - CONST (which is ~1.3x bigger than f32)
f32base128 has 18725 bytes - CONST (whis is <1.15x bigger than f32) but chrome will send ~2x bigger request (depends on input data)
Conclusion
The smallest result which not depends of array values (result size is constant) we get for f32base64 ~33% bigger than input array size. For f32base128 - it contains valid JSON (string) which is something about <15% bigger than input, but chrome during sending increase this size (look here - on 'update' section). So use f32base64 - this is probably the smallest JSON that you can get without more sophisticated methods.

Javascript Convert int value to octet stream Array

I want convert an integer (signed) to 32 bit (big endian) into a octet stream and give the octet stream as a array value to the constructor of a
Buffer Object.
I can create it in the console for example for the value -2000:
<code>
buf = Buffer(4)
buf.writeInt32BE(-2000)
buf // is <Buffer ff ff f8 30>
buf1 = new Buffer([0xff, 0xff, 0xf8, 0x30])
</code>
The value -3000 is for example -3000 : 0xff ,0xff, 0xf4, 0x48
But the framework i use accepts not the writeInt32BE function and throws exception.
How can i convert a 32 bit integer value signed to a octet Array stream without the writeInt32BE ?
A function that takes a value and returns an array of octet stream.
Using a 4 byte array buffer, converted to a data view and calling setInt32 on the view seems to work. This approach supports specification of both little endian and big endian (the default) formats independent of machine architecture.
function bigEnd32( value) {
var buf = new ArrayBuffer(4);
var view = new DataView(buf);
view.setInt32( 0, value);
return view;
}
// quick test (in a browser)
var n = prompt("Signed 32: ");
var view = bigEnd32( +n);
for(var i = 0 ; i < 4; ++i)
console.log(view.getUint8( i));
Documentation was located searching for "MDN ArrayBuffer" "MDN Dataview" etc. Check out DataView in detail for properties that access the underlying array buffer - you may be able to tweak the code to suite your application.

How to manipulate the contents of an audio tag and create derivative audio tags from it?

On my webpage, I have an audio file inside of an tag.
<!DOCTYPE html>
<html>
<audio src="myTrack.mp3" controls preload="auto"></audio>
</html>
I want to chop up this file stored in an tag into multiple 10 second audio files that I could then insert into the webpage as their own audio files in seperate <audio> tags.
Is it possible to do this in javascript?
Yes, of course this is possible! :)
Make sure the audio fulfill CORS-requirements so we can load it with AJAX (loading from same origin as the page will of course fulfill this).
Load the file as ArrayBuffer and decode it with AudioContext
Calculate the number of segments and length of each (I use a time based length independent of channels below)
Split the main buffer into smaller buffers
Create a file-wrapper for the new buffer (below I made a simple WAVE wrapper for the demo)
Feed that as Blob via an Object-URL to a new instance of the Audio element
Keep keep track of the object-URLs so you can free them up when not needed anymore (revokeObjectURL()).
One drawback is of course that you would have to load the entire file into memory before processing it.
Example
Hopefully the file I'm using for the demo will be available through the current CDN that is used to allow CORS usage (I own the copyright, feel free to use it for testing, but only testing!! :) ). The loading and decoding can take some time depending on your system and connection, so please be patient...
Ideally you should use an asynchronous approach splitting the buffers, but the demo targets only the needed steps to make the buffer segments available as new file fragments.
Also note that I did not take into consideration the last segment to be shorter than the others (I use floor, you should use ceil for the segment count and cut the last block length short). I'll leave that as an exercise for the reader...
var actx = new(AudioContext || webkitAudioContext)(),
url = "//dl.dropboxusercontent.com/s/7ttdz6xsoaqbzdl/war_demo.mp3";
// STEP 1: Load audio file using AJAX ----------------------------------
fetch(url).then(function(resp) {return resp.arrayBuffer()}).then(decode);
// STEP 2: Decode the audio file ---------------------------------------
function decode(buffer) {
actx.decodeAudioData(buffer, split);
}
// STEP 3: Split the buffer --------------------------------------------
function split(abuffer) {
// calc number of segments and segment length
var channels = abuffer.numberOfChannels,
duration = abuffer.duration,
rate = abuffer.sampleRate,
segmentLen = 10,
count = Math.floor(duration / segmentLen),
offset = 0,
block = 10 * rate;
while(count--) {
var url = URL.createObjectURL(bufferToWave(abuffer, offset, block));
var audio = new Audio(url);
audio.controls = true;
audio.volume = 0.75;
document.body.appendChild(audio);
offset += block;
}
}
// Convert a audio-buffer segment to a Blob using WAVE representation
function bufferToWave(abuffer, offset, len) {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [], i, sample,
pos = 0;
// write WAVE header
setUint32(0x46464952); // "RIFF"
setUint32(length - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
setUint32(abuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit (hardcoded in this demo)
setUint32(0x61746164); // "data" - chunk
setUint32(length - pos - 4); // chunk length
// write interleaved data
for(i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while(pos < length) {
for(i = 0; i < numOfChan; i++) { // interleave channels
sample = Math.max(-1, Math.min(1, channels[i][offset])); // clamp
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767)|0; // scale to 16-bit signed int
view.setInt16(pos, sample, true); // update data chunk
pos += 2;
}
offset++ // next source sample
}
// create Blob
return new Blob([buffer], {type: "audio/wav"});
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
}
audio {display:block;margin-bottom:1px}

Categories