I'm working on a project and I require to send an audio stream to a Node.js server. I'm able to capture microphone sound with this function:
function micCapture(){
'use strict';
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
audio: true,
video: false
};
var video = document.querySelector('video');
function successCallback(stream) {
window.stream = stream; // stream available to console
if (window.URL) {
video.src = window.webkitURL.createObjectURL(stream);
} else {
video.src = stream;
}
//Send audio stream
//server.send(stream);
}
function errorCallback(error) {
console.log('navigator.getUserMedia error: ', error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
}
As you can see, I'm able to capture audio and play it on the website.
Now I want to send that audio stream to a Node.js server, and send it back to other clients. Like a voicechat, but I don't want to use WebRTC as I need the stream in the server. How can I achieve this? Can I use socket.io-stream to do this? In the examples I saw, they recorded the audio, and sent a file, but I need "live" audio.
I have recently done live audio upload using socket.io from browser to server. I am going to answer here in case someone else needs it.
var stream;
var socket = io();
var bufferSize = 1024 * 16;
var audioContext = new AudioContext();
// createScriptProcessor is deprecated. Let me know if anyone find alternative
var processor = audioContext.createScriptProcessor(bufferSize, 1, 1);
processor.connect(audioContext.destination);
navigator.mediaDevices.getUserMedia({ video: false, audio: true }).then(handleMicStream).catch(err => {
console.log('error from getUserMedia', err);
});
handleMicStream will run when user accepts the permission to use microphone.
function handleMicStream(streamObj) {
// keep the context in a global variable
stream = streamObj;
input = audioContext.createMediaStreamSource(stream);
input.connect(processor);
processor.onaudioprocess = e => {
microphoneProcess(e); // receives data from microphone
};
}
function microphoneProcess(e) {
const left = e.inputBuffer.getChannelData(0); // get only one audio channel
const left16 = convertFloat32ToInt16(left); // skip if you don't need this
socket.emit('micBinaryStream', left16); // send to server via web socket
}
// Converts data to BINARY16
function convertFloat32ToInt16(buffer) {
let l = buffer.length;
const buf = new Int16Array(l / 3);
while (l--) {
if (l % 3 === 0) {
buf[l / 3] = buffer[l] * 0xFFFF;
}
}
return buf.buffer;
}
Have your socket.io server listen to micBinaryStream and you should get the data. I needed the data as a BINARY16 format for google api if you do not need this you can skip the function call to convertFloat32ToInt16().
Important
When you need to stop listening you MUST disconnect the the processor and end the stream. Run the function closeAll() below.
function closeAll() {
const tracks = stream ? stream.getTracks() : null;
const track = tracks ? tracks[0] : null;
if (track) {
track.stop();
}
if (processor) {
if (input) {
try {
input.disconnect(processor);
} catch (error) {
console.warn('Attempt to disconnect input failed.');
}
}
processor.disconnect(audioContext.destination);
}
if (audioContext) {
audioContext.close().then(() => {
input = null;
processor = null;
audioContext = null;
});
}
}
it's an old time question, i see. I'm doing the same thing (except my server doesn't run node.js and is written in C#) and stumbled upon this.
Don't know if someone is still interested but i've elaborated a bit. The current alternative to the deprecated createScriptProcessor is the AudioWorklet interface.
From: https://webaudio.github.io/web-audio-api/#audioworklet
1.32.1. Concepts
The AudioWorklet object allows developers to supply scripts (such as JavaScript or >WebAssembly code) to process audio on the rendering thread, supporting custom >AudioNodes. This processing mechanism ensures synchronous execution of the script >code with other built-in AudioNodes in the audio graph.
You cannot implement interfaces in Javascript as far as i know but you can extend a class derived from it.
And the one we need is: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor
So i did write a processor that just mirrors the output with the input values and displays them.
class CustomAudioProcessor extends AudioWorkletProcessor {
process (inputs, outputs, parameters) {
const input = inputs[0];
const output = output[0];
for (let channel = 0; channel < input.length; ++channel) {
for (let i = 0; i < input[channel].length; ++i) {
// Just copying all the data from input to output
output[channel][i] = input[channel][i];
// The next one will make the app crash but yeah, the values are there
// console.log(output[channel][i]);
}
}
}
}
The processor must then be placed into the audio pipeline, after the microphone and before the speakers.
function record() {
constraints = { audio: true };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(stream);
audioCtx.audioWorklet.addModule("custom-audio-processor.js").then(() => {
customAudioProcessor = new AudioWorkletNode(audioCtx, "custom-audio-processor");
source.connect(customAudioProcessor);
customAudioProcessor.connect(audioCtx.destination);
})
audioCtx.destination.play();
Works! Good luck! :)
Related
I am pretty sure I did everything correct but when I try to play or download the file nothing plays. I am using web audio api to record audio from the microphone to a WAV format. I am using this library to create the .wav file. It seems like nothing is being encoded.
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
var data
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var scriptNode = context.createScriptProcessor(8192, 1, 1)
source.connect(scriptNode)
scriptNode.connect(context.destination)
encoder = new WavAudioEncoder(16000,1)
scriptNode.onaudioprocess = function(e){
data = e.inputBuffer.getChannelData('0')
console.log(data)
encoder.encode(data)
}
$('#stop').click(()=>{
source.disconnect()
scriptNode.disconnect()
blob = encoder.finish()
console.log(blob)
url = window.URL.createObjectURL(blob)
// audio source
$('#player').attr('src',url)
// audio control
$("#pw")[0].load()
})
})
I figured it out! To help anyone who needs to do the same thing. It uses Web Audio API and this javascript library
navigator.mediaDevices.getUserMedia({
audio: true,video:false
})
.then((stream) => {
context = new AudioContext()
var source = context.createMediaStreamSource(stream)
var rec = new Recorder(source)
rec.record()
$('#stop').click(()=>{
rec.stop()
blob = rec.exportWAV(somefunction) // exportWAV() returns your file
})
use recordRTC for recording video and audio, I used in my project, it's working well, here is the code to record audio using recordrtc.org
startRecording(event) { // call this to start recording the Audio( or video or Both)
this.recording = true;
let mediaConstraints = {
audio: true
};
// Older browsers might not implement mediaDevices at all, so we set an empty object first
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
// Some browsers partially implement mediaDevices. We can't just assign an object
// with getUserMedia as it would overwrite existing properties.
// Here, we will just add the getUserMedia property if it's missing.
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
// First get ahold of the legacy getUserMedia, if present
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// Some browsers just don't implement it - return a rejected promise with an error
// to keep a consistent interface
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
}
// Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(successCallback.bind(this), errorCallback.bind(this));
}
successCallback(stream: MediaStream) {
var options = {
type: 'audio'
};
this.stream = stream;
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
}
errorCallback(stream: MediaStream) {
console.log(stream);
}
stopRecording() { // call this to stop recording
this.recording = false;
this.converting = true;
let recordRTC = this.recordRTC;
if(!recordRTC) return;
recordRTC.stopRecording(this.processAudio.bind(this));
this.stream.getAudioTracks().forEach(track => track.stop());
}
processAudio(audioVideoWebMURL) {
let recordRTC = this.recordRTC;
var recordedBlob = recordRTC.getBlob(); // you can save the recorded media data in various formats, refer the link below.
console.log(recordedBlob)
this.recordRTC.save('audiorecording.wav');
let base64Data = '';
this.recordRTC.getDataURL((dataURL) => {
base64Data = dataURL.split('base64,')[1];
console.log(RecordRTC.getFromDisk('audio', function(dataURL,type) {
type == 'audio'
}));
console.log(dataURL);
})
}
Note that you cannot record the audio/video from the live site in Google Chrome unless your site is https enabled
I'm using the new v2 Twilio Javascript SDK to make calls from the browser to other people.
This works fine but I've been asked to add volume controls for the incoming audio stream.
After some research it seems that I need to take the remote stream from the call and feed it through a gain node to reduce the volume.
Unfortunately the result from call.getRemoteStream is always null even when I can hear audio from the call.
I've tested this on latest Chrome and Edge and they have the same behavior.
Is there something else I need to do to access the remote stream?
Code:
async function(phoneNumber, token)
{
console.log("isSecureContext: " + window.isSecureContext); //check we can get the stream
var options = {
edge: 'ashburn', //us US endpoint
closeProtection: true // will warn user if you try to close browser window during an active call
};
var device = new Device(token, options);
const connectionParams = {
"phoneNumber": phoneNumber
};
var activeCall = await device.connect({ params: connectionParams });
//Setup gain (volume) control for incoming audio
//Note, getRemoteStream always returns null.
var remoteStream = activeCall.getRemoteStream();
if(remoteStream)
{
var audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(remoteStream);
var gainNode = audioCtx.createGain();
source.connect(gainNode)
gainNode.connect(audioCtx.destination);
}
else
{
console.log("No remote stream on call");
}
}
The log output is:
isSecureContext: true
then
No remote stream on call
Twilio support gave me the answer: you need to wait until you start receiving volume events before requesting the stream.
ie
call.on('volume', (inputVolume, outputVolume) => {
if(inputVolume > 0)
{
var remoteStream = activeCall.getRemoteStream();
....
}
});
I want to get the audio buffer while talking , I did this method to detect it , but I receive message this method onaudioprocess is deprecated and is not fired, is there any alternative for it with an example.
audioContext = new AudioContext({ sampleRate: 16000 });
scriptNode = (audioContext.createScriptProcessor || audioContext.createJavaScriptNode).call(audioContext, 1024, 1, 1);
scriptNode.onaudioprocess = function (audioEvent) {
if (recording) {
input = audioEvent.inputBuffer.getChannelData(0);
// convert float audio data to 16-bit PCM
var buffer = new ArrayBuffer(input.length * 2);
var output = new DataView(buffer);
for (var i = 0, offset = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
}
ws.send(buffer);
}
};
With the MediaStream Recording API and the MediaDevices.getUserMedia() method you're able to stream audio from your microphone and stream that into a recorder. The recorder can then send Blob objects through WebSockets whenever the ondataavailable event fires on the recorder.
The function below creates a stream and passes that to a MediaRecorder instance. That instance will record your microphone audio and is able to send that to your WebSocket. The instance of the MediaRecorder is returned to control the recorder.
async function streamMicrophoneAudioToSocket(ws) {
let stream;
const constraints = { video: false, audio: true };
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (error) {
throw new Error(`
MediaDevices.getUserMedia() threw an error.
Stream did not open.
${error.name} -
${error.message}
`);
}
const recorder = new MediaRecorder(stream);
recorder.addEventListener('dataavailable', ({ data }) => {
ws.send(data);
});
recorder.start();
return recorder;
});
That way you can also stop recording if you'd like by calling the stop() method on the recorder.
(async () => {
const ws = new WebSocket('ws://yoururl.com');
const recorder = await streamMicrophoneAudioToSocket(ws);
document.addEventListener('click', event => {
recorder.stop();
});
}());
Sidenote: Although my earlier answer did help some people, it didn't provide an alternative to the deprecated onaudioprocess event and ScriptProcessorNode Interface. This is answer should provide an alternative to the question by OP.
The answer should be using Audio Worklets with enables us to create custom audio processing nodes to which can implemented like a regular AudioNode.
The AudioWorkletNode interface of the Web Audio API represents a base class for a user-defined AudioNode, which can be connected to an audio routing graph along with other nodes. It has an associated AudioWorkletProcessor, which does the actual audio processing in a Web Audio rendering thread.
It works by extending the AudioWorkletProcessor class and providing the mandatory process method. The process method exposes the inputs, outputs and parameters set in the static parameterDescriptors getter.
In here you can insert the same logic as in the onaudioprocess callback. But you do have to make some modifications to work properly.
One catch of using worklets is that you have include this script as a file from the worklets interface. This means that any dependencies, like the ws variable, needs to be injected at later stage. We can extend the class to add any values or dependencies to the instance of the worklet.
Note: The process needs to return a boolean to let the browser know if the audio node should be kept alive or not.
registerProcessor('buffer-detector', class extends AudioWorkletProcessor {
process (inputs, outputs, parameters) {
if (this.#socket === null) {
return false;
}
if (this.#isRecording === true) {
const [input] = inputs;
const buffer = new ArrayBuffer(input.length * 2);
const output = new DataView(buffer);
for (let i = 0, offset = 0; i < input.length; i++, offset += 2) {
const s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
}
this.#socket.send(buffer);
}
return true;
}
static get parameterDescriptors() {
return [{
name: 'Buffer Detector',
}]
}
#socket = null;
#isRecording = false;
constructor() {
super();
}
get socket() {
return this.#socket;
}
set socket(value) {
if (value instanceof WebSocket) {
this.#socket = value;
}
}
get recording() {
return this.#isRecording;
}
set recording(value) {
if ('boolean' === typeof value) {
this.#isRecording = value;
}
}
});
Now all we have to do is include the worklet in your script and create an instance of the node. We can do this with the addModule method that exists on the BaseAudioContext.audioWorklet property.
Important: Adding the module only works in secure (HTTPS) contexts.
When the module has been added successfully, create the new node with the AudioWorkletNode constructor. Assign the WebSocket instance, set the recording flag and you're good to go.
const ws = new WebSocket('ws://...');
const audioContext = new AudioContext();
const source = new MediaStreamAudioSourceNode(audioContext, {
mediaStream: stream // Your stream here.
});
(async () => {
try {
// Register the worklet.
await audioContext.audioWorklet.addModule('buffer-detector.js');
// Create our custom node.
const bufferDetectorNode = new AudioWorkletNode(audioContext, 'buffer-detector');
// Assign the socket and the recording state.
bufferDetectorNode.socket = ws;
bufferDetectorNode.recording = true;
// Connect the node.
source.connect(bufferDetectorNode);
} catch (error) {
console.error(error);
}
})();
I am recording browser audio input from the microphone, and sending it via websocket to a nodeJs service that writes the stream to a .wav file.
My problem is that the first recording comes out fine, but any subsequent recordings come out sounding very slow, about half the speed and are therefore unusable.
If I refresh the browser the first recording works again, and subsequent recordings are slowed down which is why I am sure the problem is not in the nodeJs service.
My project is an Angular 5 project.
I have pasted the code I am trying below.
I am using binary.js ->
https://cdn.jsdelivr.net/binaryjs/0.2.1/binary.min.js
this.client = BinaryClient(`ws://localhost:9001`)
createStream() {
window.Stream = this.client.createStream();
window.navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
this.success(stream);
})
}
stopRecording() {
this.recording = false;
this.win.Stream.end();
}
success(e) {
var audioContext = window.AudioContext || window.webkitAudioContext;
var context = new audioContext();
// the sample rate is in context.sampleRate
var audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
var recorder = context.createScriptProcessor(bufferSize, 1, 1);
}
recorder.onaudioprocess = (e) => {
if (!this.recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
this.win.Stream.write(this.convertoFloat32ToInt16(left));
}
audioInput.connect(recorder)
recorder.connect(context.destination);
}
convertoFloat32ToInt16(buffer) {
var l = buffer.length;
var buf = new Int16Array(l)
while (l--) {
buf[l] = buffer[l] * 0xFFFF; //convert to 16 bit
}
return buf.buffer
}
I am stumped as to what can be going wrong so if anyone has experience using this browser tech I would appreciate any help.
Thanks.
I've had this exact problem - your problem is the sample rate you are writing your WAV file with is incorrect.
You need to pass the sample rate used by the browser and the microphone to the node.js which writes the binary WAV file.
Client side:
After a successfull navigator.mediaDevices.getUserMedia (in your case, success function), get the sampleRate variable from the AudioContext element:
var _smapleRate = context.sampleRate;
Then pass it to the node.js listener as a parameter. In my case I used:
binaryClient.createStream({ SampleRate: _smapleRate });
Server (Node.js) side:
Use the passed SampleRate to set the WAV file's sample rate. In my case this is the code:
fileWriter = new wav.FileWriter(wavPath, {
channels: 1,
sampleRate: meta.SampleRate,
bitDepth: 16
});
This will prevent broken sounds, low pitch sounds, low or fast WAV files.
Hope this helps.
Is it possible to access the microphone (built-in or auxiliary) from a browser using client-side JavaScript?
Ideally, it would store the recorded audio in the browser. Thanks!
Here we capture microphone audio as a Web Audio API event loop buffer using getUserMedia() ... time domain and frequency domain snippets of each audio event loop buffer are printed (viewable in browser console just hit key F12 or ctrl+shift+i )
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone audio into buffer</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_fft_node = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
var max_index = num_row_to_display;
console.log("__________ " + label);
for (; index < max_index && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
}
function process_microphone_buffer(event) { // invoked by event loop
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
// microphone_output_buffer <-- this buffer contains current gulp of data size BUFF_SIZE
show_some_data(microphone_output_buffer, 5, "from getChannelData");
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_fft_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_fft_node.connect(gain_node);
analyserNode = audioContext.createAnalyser();
analyserNode.smoothingTimeConstant = 0;
analyserNode.fftSize = 2048;
microphone_stream.connect(analyserNode);
analyserNode.connect(script_processor_fft_node);
script_processor_fft_node.onaudioprocess = function() {
// get the average for the first channel
var array = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(array);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array, 5, "from fft");
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
Since this code exposes microphone data as a buffer you could add ability to stream using websockets or simply aggregate each event loop buffer into a monster buffer then download the monster to a file
Notice the call to
var audioContext = new AudioContext();
which indicates its using the Web Audio API which is baked into all modern browsers (including mobile browsers) to provide an extremely powerful audio platform of which tapping into the mic is but a tiny fragment ... NOTE the CPU usage jumps up due to this demo writing each event loop buffer into browser console log which is for testing only so actual use is far less resource intensive even when you mod this to stream audio to elsewhere
Links to some Web Audio API documentation
Basic concepts behind Web Audio API
SO wiki on Web Audio API
nice Web Audio API demos ... some with github links
Yes you can.
Using the getUserMedia() API, you can capture raw audio input from your microphone.
In a secure context, to query the devices.
getUserMedia() is a powerful feature which can only be used in secure
contexts; in insecure contexts, navigator.mediaDevices is undefined,
preventing access to getUserMedia(). A secure context is, in short, a
page loaded using HTTPS or the file:/// URL scheme, or a page loaded
from localhost.
async function getMedia(constraints) {
let stream = null;
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
console.log(stream)
} catch(err) {
document.write(err)
}
}
getMedia({ audio: true, video: true })
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
This is a simple way:
//event:
const micButtonClicked = () => {
//check the access:
isMicrophoneAllowed(isAllowed => {
if(isAllowed)
record();
else
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => record())
.catch(err => alert('need permission to use microphone'));
});
}
//isMicrophoneAllowed:
const isMicrophoneAllowed = callback => {
navigator.permissions.query({name: 'microphone'})
.then(permissionStatus => Strings.runCB(callback, permissionStatus.state === 'granted'));
}
//record:
const record = () => {
// start recording...
}