Weird rhythmic static when realtime audio streaming MediaStreamAPI - javascript

I have a small issue, This code below live streams audio from one client to another in the same room. This works by the first client sending audio data to the server which sends it to a separate client. When a different client receives the incoming audioData, the arraybuffer is changed to a float32array and then into an audio buffer that is then connected to an output device and played. The result of this is a realtime audio feed from one client to another. However, there is a static/click that seems to happen when one audiobuffer stops playing and the next one starts. However, when I increase the sample rate of the "audioC" audioContext, this static appears more frequently.
Does anyone know why this is happening or how I can fix this?
const socket = io();
var AudioContext = window.AudioContext || window.webkitAudioContext;
const audioCtx = new AudioContext({
latencyHint: 'interactive',
sampleRate: 16384,
});
const audioC = new AudioContext()
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia
if (navigator.getUserMedia) {
console.log('getUserMedia supported.')
navigator.getUserMedia(
{ audio: true },
stream => {
const source = audioCtx.createMediaStreamSource(stream)
const processor = audioCtx.createScriptProcessor(16384, 1, 1)
source.connect(processor)
processor.connect(audioCtx.destination)
processor.onaudioprocess = e => {
socket.emit('Voice', e.inputBuffer.getChannelData(0))
}
socket.on('Voice', msg => {
var z = new Float32Array(msg, 0, 16384);
console.log(z)
const audioBuffer = new AudioBuffer({
length: 16384,
sampleRate: 16384
});
audioBuffer.copyToChannel(z, 0, 0);
const source = audioC.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioC.destination);
source.start();
})
audioCtx.resume()
},
err => console.error(err)
)
}
The server side just sends data to another client upon receiving the data.

Related

How can I play an arrayBuffer as an audio file?

I am receiving an arrayBuffer via a socket.io event and want to be able to process and play the stream as an audio file.
I am receiving the buffer like so:
retrieveAudioStream = () => {
this.socket.on('stream', (arrayBuffer) => {
console.log('arrayBuffer', arrayBuffer)
})
}
Is it possible to set the src attribute of an <audio/> element to a buffer? If not how can I play the the incoming buffer stream?
edit:
To show how I am getting my audio input and streaming it:
window.navigator.getUserMedia(constraints, this.initializeRecorder, this.handleError);
initializeRecorder = (stream) => {
const audioContext = window.AudioContext;
const context = new audioContext();
const audioInput = context.createMediaStreamSource(stream);
const bufferSize = 2048;
// create a javascript node
const recorder = context.createScriptProcessor(bufferSize, 1, 1);
// specify the processing function
recorder.onaudioprocess = this.recorderProcess;
// connect stream to our recorder
audioInput.connect(recorder);
// connect our recorder to the previous destination
recorder.connect(context.destination);
}
This is where I receive the inputBuffer event and stream via a socket.io event
recorderProcess = (e) => {
const left = e.inputBuffer.getChannelData(0);
this.socket.emit('stream', this.convertFloat32ToInt16(left))
}
EDIT 2:
Adding Raymonds suggestion:
retrieveAudioStream = () => {
const audioContext = new window.AudioContext();
this.socket.on('stream', (buffer) => {
const b = audioContext.createBuffer(1, buffer.length, audioContext.sampleRate);
b.copyToChannel(buffer, 0, 0)
const s = audioContext.createBufferSource();
s.buffer = b
})
}
Getting error: NotSupportedError: Failed to execute 'createBuffer' on 'BaseAudioContext': The number of frames provided (0) is less than or equal to the minimum bound (0).
Based on a quick read of what initializeRecorder and recorderProcess do, it looks like you're converting the float32 samples to int16 in some say and that gets sent to retrieveAudioStream in some way.
If this is correct, then the arrayBuffer is an array of int16 values. Convert them to float32 (most likely by dividing each value by 32768) and save them in a Float32Array. Then create an AudioBuffer of the same lenght and copyToChannel(float32Array, 0, 0) to write the values to the AudioBuffer. Use an AudioBufferSourceNode with this buffer to play out the audio.

Recording browser audio using navigator.mediaDevices.getUserMedia

I am recording browser audio input from the microphone, and sending it via websocket to a nodeJs service that writes the stream to a .wav file.
My problem is that the first recording comes out fine, but any subsequent recordings come out sounding very slow, about half the speed and are therefore unusable.
If I refresh the browser the first recording works again, and subsequent recordings are slowed down which is why I am sure the problem is not in the nodeJs service.
My project is an Angular 5 project.
I have pasted the code I am trying below.
I am using binary.js ->
https://cdn.jsdelivr.net/binaryjs/0.2.1/binary.min.js
this.client = BinaryClient(`ws://localhost:9001`)
createStream() {
window.Stream = this.client.createStream();
window.navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
this.success(stream);
})
}
stopRecording() {
this.recording = false;
this.win.Stream.end();
}
success(e) {
var audioContext = window.AudioContext || window.webkitAudioContext;
var context = new audioContext();
// the sample rate is in context.sampleRate
var audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
var recorder = context.createScriptProcessor(bufferSize, 1, 1);
}
recorder.onaudioprocess = (e) => {
if (!this.recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
this.win.Stream.write(this.convertoFloat32ToInt16(left));
}
audioInput.connect(recorder)
recorder.connect(context.destination);
}
convertoFloat32ToInt16(buffer) {
var l = buffer.length;
var buf = new Int16Array(l)
while (l--) {
buf[l] = buffer[l] * 0xFFFF; //convert to 16 bit
}
return buf.buffer
}
I am stumped as to what can be going wrong so if anyone has experience using this browser tech I would appreciate any help.
Thanks.
I've had this exact problem - your problem is the sample rate you are writing your WAV file with is incorrect.
You need to pass the sample rate used by the browser and the microphone to the node.js which writes the binary WAV file.
Client side:
After a successfull navigator.mediaDevices.getUserMedia (in your case, success function), get the sampleRate variable from the AudioContext element:
var _smapleRate = context.sampleRate;
Then pass it to the node.js listener as a parameter. In my case I used:
binaryClient.createStream({ SampleRate: _smapleRate });
Server (Node.js) side:
Use the passed SampleRate to set the WAV file's sample rate. In my case this is the code:
fileWriter = new wav.FileWriter(wavPath, {
channels: 1,
sampleRate: meta.SampleRate,
bitDepth: 16
});
This will prevent broken sounds, low pitch sounds, low or fast WAV files.
Hope this helps.

Web Audio API getFloatFrequencyData function setting Float32Array argument data to array of -Infinity values

I'm currently playing around with the Web Audio API in Chrome (60.0.3112.90) to possibly build a sound wave of a given file via FilerReader, AudioContext, createScriptProcessor, and createAnalyser. I have the following code:
const visualize = analyser => {
analyser.fftSize = 256;
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Float32Array(bufferLength);
analyser.getFloatFrequencyData(dataArray);
}
loadAudio(file){
// creating FileReader to convert audio file to an ArrayBuffer
const fileReader = new FileReader();
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
fileReader.addEventListener('loadend', () => {
const fileArrayBuffer = fileReader.result;
let audioCtx = new (window.AudioContext || window.webkitAudioContext)();
let processor = audioCtx.createScriptProcessor(4096, 1, 1);
let analyser = audioCtx.createAnalyser();
analyser.connect(processor);
let data = new Float32Array(analyser.frequencyBinCount);
let soundBuffer;
let soundSource = audioCtx.createBufferSource();
// loading audio track into buffer
audioCtx.decodeAudioData(
fileArrayBuffer,
buffer => {
soundBuffer = buffer;
soundSource.buffer = soundBuffer;
soundSource.connect(analyser);
soundSource.connect(audioCtx.destination);
processor.onaudioprocess = () => {
// data becomes array of -Infinity values after call below
analyser.getFloatFrequencyData(data);
};
visuaulize(analyser);
},
error => 'error with decoding audio data: ' + error.err
);
});
fileReader.readAsArrayBuffer(file);
}
Upon loading a file, I get all the way to analyser.getFloatFrequencyData(data). Upon reading the Web audio API docs, it says that the parameter is:
The Float32Array that the frequency domain data will be copied to.
For any sample which is silent, the value is -Infinity.
In my case, I have both an mp3 and wav file I'm using to test this and after invoking analyser.getFloatFrequency(data), both files end up giving me data which becomes an array of `-Infinity' values.
This may be due to my ignorance with Web Audio's API, but my question is why are both files, which contain loud audio, giving me an array that represents silent samples?
The Web Audio AnalyserNode is only designed to work in realtime. (It used to be called RealtimeAnalyser.) Web Audio doesn't have the ability to do analysis on buffers; take a look at another library, like DSP.js.
Instead of:
soundSource.connect(analyser);
soundSource.connect(audioCtx.destination);
try:
soundSource.connect(analyser);
analyser.connect(audioCtx.destination);
Realising I sould do a source ==> anlalsyser ==>> destination chain solved this problem when I encountered it.

HTML 5: AudioContext AudioBuffer

I need to understand how audio buffer works and to do it I want to make the following sequence: Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers. Auto means auto data transfer and manual I do myself via the code in processor.onaudioprocess. So I have the following code:
navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
navigator.getUserMedia(
{audio: true},
function(stream) {
audioContext = new AudioContext();
//STEP 1 - we create buffer and its node
speakers = audioContext.destination;
myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
var bufferNode = audioContext.createBufferSource();
bufferNode.buffer = myAudioBuffer;
bufferNode.connect(speakers);
bufferNode.start();
//STEP 2- we create microphone and processor
microphone = audioContext.createMediaStreamSource(stream);
var processor = (microphone.context.createScriptProcessor ||
microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
processor.onaudioprocess = function(audioProcessingEvent) {
var inputBuffer = audioProcessingEvent.inputBuffer;
var inputData = inputBuffer.getChannelData(0); // we have only one channel
var nowBuffering = myAudioBuffer.getChannelData(0);
for (var sample = 0; sample < inputBuffer.length; sample++) {
nowBuffering[sample] = inputData[sample];
}
}
microphone.connect(processor);
},
function() {
console.log("Error 003.")
});
}
However, this code doesn't work. No errors, only silence. Where is my mistake?
EDIT
So since the OP definitely wants to use a buffer. I wrote some more code which you can try out on JSFiddle. The trick part definitely was that you somehow have to pass the input from the microphone through to some "destination" to get it to process.
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
captureMicrophone();
}
// First Step - Capture microphone and process the input
function captureMicrophone() {
// process input from microphone
const processAudio = ev =>
processBuffer(ev.inputBuffer.getChannelData(CHANNEL));
// setup media stream from microphone
const microphoneStream = stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
// #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
processor.connect(mute);
};
// TODO: Handle error properly (see todo above - but probably more specific)
const userMediaError = err => console.error(err);
// Second step - Process buffer and output to speakers
const processBuffer = buffer => {
audioBuffer.getChannelData(CHANNEL).set(buffer);
// We could move this out but that would affect audio quality
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(speakers);
source.start();
}
const audioContext = new AudioContext();
const speakers = audioContext.destination;
// We currently only operate on this channel we might need to add a couple
// lines of code if this fact changes
const CHANNEL = 0;
const CHANNELS = 1;
const BUFFER_SIZE = 4096;
const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);
const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);
// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
The code I wrote up there looks quite dirty to me. But since you have a large project you can definitely structure it much more cleanly.
I've no clue what you're trying to achieve but I definitely also recommend to have a look at Recorder.js
Previous answer
The main point you're missing is that you'll get an output buffer passed into createScriptProcessor so all the createBuffer stuff you do is unnecessary. Apart from that you're on the right track.
This would be a working solution. Try it out on JSFiddle!
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia) {
captureMicrophone();
}
function captureMicrophone() {
const audioContext = new AudioContext();
const speaker = audioContext.destination;
const processor = audioContext.createScriptProcessor(4096, 1, 1);
const processAudio =
ev => {
const CHANNEL = 0;
const inputBuffer = ev.inputBuffer;
const outputBuffer = ev.outputBuffer;
const inputData = inputBuffer.getChannelData(CHANNEL);
const outputData = outputBuffer.getChannelData(CHANNEL);
// TODO: manually do something with the audio
for (let i = 0; i < inputBuffer.length; ++i) {
outputData[i] = inputData[i];
}
};
const microphoneStream =
stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
processor.connect(speaker);
};
// TODO: handle error properly
const userMediaError = err => console.error(err);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
Are you getting silence (i.e. your onprocess is getting called, but the buffers are empty) or nothing (i.e. your onprocess is never getting called)?
If the latter, try connecting the scriptprocessor to the context.destination. Even if you don't use the output, some implementations currently need that connection to pull data through.

Streaming microphone through sockets using socket IO and Node JS

I am making an application where I want the user to use their mic (on their phone) and be able to talk to each other in the game lobby. However, this has proven to be more than difficult.
I am using Node JS socket io and socket io stream
on my client I am using the audio api to take my microphones input ( I am not really worried about this all that much because I am going to make this a Native IOS app)
navigator.getUserMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
if (navigator.getUserMedia) {
navigator.getUserMedia (
// constraints
{
video: false,
audio: true
},
function(localMediaStream) {
var video = document.querySelector('audio');
video.src = window.URL.createObjectURL(localMediaStream);
lcm = localMediaStream;
var audioContext = window.AudioContext;
var context = new audioContext();
var audioInput = context.createMediaStreamSource(localMediaStream);
var bufferSize = 2048;
// create a javascript node
var recorder = context.createScriptProcessor(bufferSize, 1, 1);
// specify the processing function
recorder.onaudioprocess = recorderProcess;
// connect stream to our recorder
audioInput.connect(recorder);
// connect our recorder to the previous destination
recorder.connect(context.destination);
},
// errorCallback
function(err) {
console.log("The following error occured: " + err);
$("video").remove();
alert("##");
}
);
} else {
console.log("getUserMedia not supported");
}
function recorderProcess(e) {
var left = e.inputBuffer.getChannelData(0);
window.stream.write(convertFloat32ToInt16(left));
//var f = $("#aud").attr("src");
var src = window.URL.createObjectURL(lcm);
ss(socket).emit('file', src, {size: src.size});
ss.createBlobReadStream(src).pipe(window.stream);
//ss.createReadStream(f).pipe(widnow.stream);
}
function convertFloat32ToInt16(buffer)
{
l = buffer.length;
buf = new Int16Array(l);
while (l--) {
buf[l] = Math.min(1, buffer[l])*0x7FFF;
}
return buf.buffer;
}
});
ss(socket).on('back', function(stream, data) {
//console.log(stream);
var video = document.querySelector('audio');
video.src = window.URL.createObjectURL(stream);
console.log("getting mic data");
});
i which I can successfully listen to my self speak on the microphone. I am using the stream socket to create a blob to upload to my server...
index.ss(socket).on('file', function(stream, data) {
console.log("getting stream");
var filename = index.path.basename(data.name);
//var myfs = index.fs.createWriteStream(filename);
var fileWriter = new index.wav.FileWriter('demo.wav', {
channels: 1,
sampleRate: 48000,
bitDepth: 16
});
var streams = index.ss.createStream();
streams.pipe(fileWriter);
index.ss(socket).emit('back', fileWriter, {size: fileWriter.size});
});
I cannot get the stream to write to a file or even a temporary buffer, and Then stream back to a client so I can then play or "stream" the audio real time. After a while the server crashes with saying that the pipe is not writable.
Has anyone else encountered this?
By using SFMediaStream library you can socket.io and Nodejs server for live streaming your microphone from a browser. But this library still need some improvement before release to the production.
For the presenter
var mySocket = io("/", {transports:['websocket']});
// Set latency to 100ms (Equal with streamer)
var presenterMedia = new ScarletsMediaPresenter({
audio:{
channelCount:1,
echoCancellation: false
}
}, 100);
// Every new client streamer must receive this header buffer data
presenterMedia.onRecordingReady = function(packet){
mySocket.emit('bufferHeader', packet);
}
// Send buffer to the server
presenterMedia.onBufferProcess = function(streamData){
mySocket.emit('stream', streamData);
}
presenterMedia.startRecording();
For the streamer
var mySocket = io("/", {transports:['websocket']});
// Set latency to 100ms (Equal with presenter)
var audioStreamer = new ScarletsAudioBufferStreamer(100);
audioStreamer.playStream();
// Buffer header must be received first
mySocket.on('bufferHeader', function(packet){
audioStreamer.setBufferHeader(packet);
});
// Receive buffer and play it
mySocket.on('stream', function(packet){
// audioStreamer.realtimeBufferPlay(packet);
audioStreamer.receiveBuffer(packet);
});
// Request buffer header
mySocket.emit('requestBufferHeader', '');
Or you can test it from your localhost with this example

Categories