How can i publish audio by web socket - javascript

I am developing an application which publish audio stream from mic through web sockets i am not able to play web socket response in audio control or can anyone tell how to play audio buffer in audio control please help me out?

I use the following code to play the sounds created with a software-synth.
The samples need to be in the range [-1.0 .. 1.0]. You should initialize context in the page init function.
var context = new webkitAudioContext();
function playSound(buffer, freq, vol) // buffer, sampleRate, 0-100
{
var mBuffer = context.createBuffer(1, buffer.length, freq);
var dataBuffer = mBuffer.getChannelData(0);
var soundBuffer = buffer;
var i, n = buffer.length;
for (i=0;i<n;i++)
dataBuffer[i] = soundBuffer[i];
var node = context.createBufferSource();
node.buffer = mBuffer;
node.gain.value = 0.5 * vol/100.0;
node.connect(context.destination);
node.noteOn(0);
}

Related

Record internal audio of a website via javascript

i made this webapp to compose music, i wanted to add a feature to download the composition as .mp3/wav/whateverFileFormatPossible, i've been searching on how to do this for many times and always gave up as i couldn't find any examples on how to do it, only things i found were microphone recorders but i want to record the final audio destination of the website.
I play audio in this way:
const a_ctx = new(window.AudioContext || window.webkitAudioContext)()
function playAudio(buf){
const source = a_ctx.createBufferSource()
source.buffer = buf
source.playbackRate.value = pitchKey;
//Other code to modify the audio like adding reverb and changing volume
source.start(0)
}
where buf is the AudioBuffer.
To sum up, i want to record the whole window audio but can't come up with a way.
link to the whole website code on github
Maybe you could use the MediaStream Recording API (https://developer.mozilla.org/en-US/docs/Web/API/MediaStream_Recording_API):
The MediaStream Recording API, sometimes simply referred to as the Media Recording API or the MediaRecorder API, is closely affiliated with the Media Capture and Streams API and the WebRTC API. The MediaStream Recording API makes it possible to capture the data generated by a MediaStream or HTMLMediaElement object for analysis, processing, or saving to disk. It's also surprisingly easy to work with.
Also, you may take a look at this topic: new MediaRecorder(stream[, options]) stream can living modify?. It seems to discuss a related issue and might provide you with some insights.
The following code generates some random noise, applies some transform, plays it and creates an audio control, which allows the noise to be downloaded from the context menu via "Save audio as..." (I needed to change the extension of the saved file to .wav in order to play it.)
<html>
<head>
<script>
const context = new(window.AudioContext || window.webkitAudioContext)()
async function run()
{
var myArrayBuffer = context.createBuffer(2, context.sampleRate, context.sampleRate);
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < myArrayBuffer.numberOfChannels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < myArrayBuffer.length; i++) {
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
playAudio(myArrayBuffer)
}
function playAudio(buf){
const streamNode = context.createMediaStreamDestination();
const stream = streamNode.stream;
const recorder = new MediaRecorder( stream );
const chunks = [];
recorder.ondataavailable = evt => chunks.push( evt.data );
recorder.onstop = evt => exportAudio( new Blob( chunks ) );
const source = context.createBufferSource()
source.onended = () => recorder.stop();
source.buffer = buf
source.playbackRate.value = 0.2
source.connect( streamNode );
source.connect(context.destination);
source.start(0)
recorder.start();
}
function exportAudio( blob ) {
const aud = new Audio( URL.createObjectURL( blob ) );
aud.controls = true;
document.body.prepend( aud );
}
</script>
</head>
<body onload="javascript:run()">
<input type="button" onclick="context.resume()" value="play"/>
</body>
</html>
Is this what you were looking for?

Stream audio over websocket with low latency and no interruption

I'm working on a project which requires the ability to stream audio from a webpage to other clients. I'm already using websocket and would like to channel the data there.
My current approach uses Media Recorder, but there is a problem with sampling which causes interrupts. It registers 1s audio and then send's it to the server which relays it to other clients. Is there a way to capture a continuous audio stream and transform it to base64?
Maybe if there is a way to create a base64 audio from MediaStream without delay it would solve the problem. What do you think?
I would like to keep using websockets, I know there is webrtc.
Have you ever done something like this, is this doable?
--> Device 1
MediaStream -> MediaRecorder -> base64 -> WebSocket -> Server --> Device ..
--> Device 18
Here a demo of the current approach... you can try it here: https://jsfiddle.net/8qhvrcbz/
var sendAudio = function(b64) {
var message = 'var audio = document.createElement(\'audio\');';
message += 'audio.src = "' + b64 + '";';
message += 'audio.play().catch(console.error);';
eval(message);
console.log(b64);
}
navigator.mediaDevices.getUserMedia({
audio: true
}).then(function(stream) {
setInterval(function() {
var chunks = [];
var recorder = new MediaRecorder(stream);
recorder.ondataavailable = function(e) {
chunks.push(e.data);
};
recorder.onstop = function(e) {
var audioBlob = new Blob(chunks);
var reader = new FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = function() {
var b64 = reader.result
b64 = b64.replace('application/octet-stream', 'audio/mpeg');
sendAudio(b64);
}
}
recorder.start();
setTimeout(function() {
recorder.stop();
}, 1050);
}, 1000);
});
Websocket is not the best. I solved by using WebRTC instead of websocket.
The solution with websocket was obtained while recording 1050ms instead of 1000, it causes a bit of overlay but still better than hearing blanks.
Although you have solved this through WebRTC, which is the industry recommended approach, I'd like to share my answer on this.
The problem here is not websockets in general but rather the MediaRecorder API. Instead of using it one can use PCM audio capture and then submit the captured array buffers into a web worker or WASM for encoding to MP3 chunks or similar.
const context = new AudioContext();
let leftChannel = [];
let rightChannel = [];
let recordingLength = null;
let bufferSize = 512;
let sampleRate = context.sampleRate;
const audioSource = context.createMediaStreamSource(audioStream);
const scriptNode = context.createScriptProcessor(bufferSize, 1, 1);
audioSource.connect(scriptNode);
scriptNode.connect(context.destination);
scriptNode.onaudioprocess = function(e) {
// Do something with the data, e.g. convert it to WAV or MP3
};
Based on my experiments this would give you "real-time" audio. My theory with the MediaRecorder API is that it does some buffering first before emitting out anything that causes the observable delay.

Recording browser audio using navigator.mediaDevices.getUserMedia

I am recording browser audio input from the microphone, and sending it via websocket to a nodeJs service that writes the stream to a .wav file.
My problem is that the first recording comes out fine, but any subsequent recordings come out sounding very slow, about half the speed and are therefore unusable.
If I refresh the browser the first recording works again, and subsequent recordings are slowed down which is why I am sure the problem is not in the nodeJs service.
My project is an Angular 5 project.
I have pasted the code I am trying below.
I am using binary.js ->
https://cdn.jsdelivr.net/binaryjs/0.2.1/binary.min.js
this.client = BinaryClient(`ws://localhost:9001`)
createStream() {
window.Stream = this.client.createStream();
window.navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
this.success(stream);
})
}
stopRecording() {
this.recording = false;
this.win.Stream.end();
}
success(e) {
var audioContext = window.AudioContext || window.webkitAudioContext;
var context = new audioContext();
// the sample rate is in context.sampleRate
var audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
var recorder = context.createScriptProcessor(bufferSize, 1, 1);
}
recorder.onaudioprocess = (e) => {
if (!this.recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
this.win.Stream.write(this.convertoFloat32ToInt16(left));
}
audioInput.connect(recorder)
recorder.connect(context.destination);
}
convertoFloat32ToInt16(buffer) {
var l = buffer.length;
var buf = new Int16Array(l)
while (l--) {
buf[l] = buffer[l] * 0xFFFF; //convert to 16 bit
}
return buf.buffer
}
I am stumped as to what can be going wrong so if anyone has experience using this browser tech I would appreciate any help.
Thanks.
I've had this exact problem - your problem is the sample rate you are writing your WAV file with is incorrect.
You need to pass the sample rate used by the browser and the microphone to the node.js which writes the binary WAV file.
Client side:
After a successfull navigator.mediaDevices.getUserMedia (in your case, success function), get the sampleRate variable from the AudioContext element:
var _smapleRate = context.sampleRate;
Then pass it to the node.js listener as a parameter. In my case I used:
binaryClient.createStream({ SampleRate: _smapleRate });
Server (Node.js) side:
Use the passed SampleRate to set the WAV file's sample rate. In my case this is the code:
fileWriter = new wav.FileWriter(wavPath, {
channels: 1,
sampleRate: meta.SampleRate,
bitDepth: 16
});
This will prevent broken sounds, low pitch sounds, low or fast WAV files.
Hope this helps.

How to draw analyzer from audio url?

How to draw analyzer from audio url?
I am getting the audio url (http://api.server.com/uploads/files/ecae64b511b1266fa3930731ec379d2dcdcc7546.wav) from an API server. I want to draw this sound on canvas, following function works on the blob object (recorded from suer), but it does not work with the url:
$window.AudioContext = $window.AudioContext || $window.webkitAudioContext;
vm.audioContext = new AudioContext();
function gotStream(stream) {
vm.inputPoint = vm.audioContext.createGain();
vm.realAudioInput = vm.audioContext.createMediaStreamSource(stream);
vm.audioInput = vm.realAudioInput;
vm.audioInput.connect(vm.inputPoint);
// audioInput = convertToMono( input );
vm.analyserNode = vm.audioContext.createAnalyser();
vm.analyserNode.fftSize = 2048;
vm.inputPoint.connect(vm.analyserNode);
vm.audioRecorder = new Recorder(vm.inputPoint);
var zeroGain = vm.audioContext.createGain();
zeroGain.gain.value = 0.0;
vm.inputPoint.connect(zeroGain);
zeroGain.connect(vm.audioContext.destination);
updateAnalysers();
}
This is probably a CORS issue if the file is from a different origin from the code. If you don't fix this, MediaStreamSource will probably just output zeroes.

Web Audio API multiple scriptprocessor nodes

I've been searching a solution about nearly two days now for this problem.
I have a web audio api app that catches the microphone input. In one script processor i'm windowing the signal with a hanning window, which works fine when the audio chain looks like this:
source -> windowScriptProcessorNode -> audioContext.destination
Then i wanted to add another script processor to the chain like this:
source -> windowScriptProcessorNode -> otherScriptProcessorNode -> audioContext.destination
but at the inputBuffer of the otherScriptProcessorNode there are just zeros instead of the signal of windowScriptProcessorNode.
Here is some code:
var audioContext = new AudioContext();
//get microphone input via getUserMedia
navigator.getUserMedia({audio: true}, function(stream) {
//set up source
var audioSource = audioContext.createMediaStreamSource(stream);
audioSource.buffer = stream;
//set up hanning window script processor node
var windowScriptProcessorNode = audioContext.createScriptProcessor(BLOCKLENGTH,1,1);
windowScriptProcessorNode.onaudioprocess = function(e){
var windowNodeInput = e.inputBuffer.getChannelData(0);
var windowNodeOutput = e.outputBuffer.getChannelData(0);
if (windowfunction==true) {
windowNodeOutput.set(calc.applyDspWindowFunction(windowNodeInput));
}else{
windowNodeOutput.set(windowNodeInput);
}
}
//some other script processor node, just passing through the signal
var otherScriptProcessorNode = audioContext.createScriptProcessor(BLOCKLENGTH,1,1);
otherScriptProcessorNode.onaudioprocess = function(e){
var otherNodeInput = e.inputBuffer.getChannelData(0);
var otherNodeOutput = e.outputBuffer.getChannelData(0);
otherNodeOutput.set(otherNodeInput);
}
// this connnection works fine!
audioSource.connect(windowScriptProcessorNode);
windowScriptProcessorNode.connect(audioContext.destination);
/* // this connnection does NOT work
audioSource.connect(windowScriptProcessorNode);
windowScriptProcessorNode.connect(otherScriptProcessorNode);
otherScriptProcessorNode.connect(audioContext.destination);
*/
}

Categories