Realtime microphone input mixing with music playback - javascript

I am trying to build an Internet Radio platform and I have battled a lot with the problem that is mentioned on the title.
To explain myself further, what I am trying to achieve is, 1) while recording input from the broadcaster's microphone, to mix it with audio from music playback and 2) at the same time be able to lower or raise the volume of the music playback (also realtime through the UI) so that the broadcaster's voice can blend with the music.
This is to imitate a usual radio broadcaster's behavior where music volume lowers when the person wants to speak and raises back again when he finishes talking! The 2nd feature definitely comes after the 1st but I guess mentioning it helps explain both.
To conclude, I have already managed to write code that receives and reproduces microphone input (though it doesn't work perfectly!). At this point I need to know if there is code or libraries that can help me do exactly what I am trying to do. All this is done in hope I won't need to use IceCast etc.
Below is my code for getting microphone input:
// getting microphone input and sending it to our server
var recordedChunks = [];
var mediaRecorder = null;
let slice = 100; // how frequently we capture sound
const slices = 20; // 20 * => after 2 sec
let sendfreq = slice * slices; // how frequently we send it
/* get microphone button handle */
var microphoneButton = document.getElementById('console-toggle-microphone');
microphoneButton.setAttribute('on', 'no');
/* initialise mic streaming capability */
navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(stream => {
_stream = stream;
})
.catch(function(err) {
show_error('Error: Microphone access has been denied probably!', err);
});
function toggle_mic() {
if (microphoneButton.getAttribute('on') == 'yes')
{
clearInterval();
microphoneButton.setAttribute('on', 'no');
microphoneButton.innerHTML = 'start mic';
}
else if (microphoneButton.getAttribute('on') == 'no')
{
microphoneButton.setAttribute('on', 'yes');
microphoneButton.innerHTML = 'stop mic';
function record_and_send() {
const recorder = new MediaRecorder(_stream);
const chunks = [];
recorder.ondataavailable = e => chunks.push(e.data);
recorder.onstop = e => socket.emit('console-mic-chunks', chunks);
setTimeout(()=> recorder.stop(), sendfreq); // we'll have a 5s media file
recorder.start();
}
// generate a new file every 5s
setInterval(record_and_send, sendfreq);
}
}
Thanks alot!

In case when your audio track from the microphone doesn't need to be synchronized with audio playback (as for me I do not see any reason for this), then you can just play two separate audio instances and change the volume of the one underway (audio playback in your case).
Shortly speaking, you don't have to mix audio tracks and do complex stuff to solve this task.
Draft example:
<input type="range" id="myRange" value="20" oninput="changeVol(this.value)" onchange="changeVol(this.value)">
// Audio playback
const audioPlayback = new Audio();
const audioPlaybackSrc = document.createElement("source");
audioPlaybackSrc.type = "audio/mpeg";
audioPlaybackSrc.src = "path/to/audio.mp3";
audioPlayback.appendChild(audioPlaybackSrc);
audioPlayback.play();
// Change volume for audio playback on the fly
function changeVol(newVolumeValue) {
audioPlayback.volume = newVolumeValue;
}
// Dealing with the microphone
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
// Start recording the audio
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
// While recording, store the audio data chunks
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
// Play the audio after stop
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
});
// Stop recording the audio
setTimeout(() => {
mediaRecorder.stop();
}, 3000);
});
Play multiple audio files simultaneously
Change audio volume with JS
How to record and play audio in JavaScript

Related

Using events to deliver audio blobs to audio player (js)

The following javascript code records a sound and generates blob with audio every 0,5 second.
After recording has stopped the program plays 1-st blob - data[0].
I need the audio player to fire event after data[0] has played, and event handler will deliver the next portion to the audio player - data[1] (далее - data[2], data[3] etc.).
How can I modify the code and which objects should I use to do this ?
I know that I could pass all data[] array to the audio player, but I need a mechanism allowing the audio player to request next portions using events.
navigator.mediaDevices.getUserMedia({audio:true})
.then(function onSuccess(stream) {
const recorder = new MediaRecorder(stream);
const data = [];
recorder.ondataavailable = (e) => {
data.push(e.data);
};
recorder.start(500); // willfire 'dataavailable ' event every 0,5 second
recorder.onstop = (e) => {
const audio = document.createElement('audio');
audio.src = window.URL.createObjectURL(new Blob( data[0] ));
}
setTimeout(() => {
rec.stop();
}, 5000);
})
.catch(function onError(error) {
console.log(error.message);
});
I guess that's what your looking for ?
navigator.mediaDevices
.getUserMedia({ audio: true })
.then(function onSuccess(stream) {
// create the audio stream
const audio = document.createElement('audio');
audio.srcObject = stream; // Pass the audio stream
audio.controls = true;
audio.play();
document.body.appendChild(audio);
const recorder = new MediaRecorder(stream);
const data = [];
// Set event listener
// ondataavailable will fire when you request stop(), requestData() or after all timeSlice you give to the start function.
recorder.ondataavailable = e => data.push(e.data);
// Start recording
// Will generate blob every 500ms
recorder.start(500);
})
.catch(function onError(error) {
console.log(error.message);
});
You had some mistakes to correct :
When recorder call start event wich timeslice parameters, that will not fire the ondataavailable event. You need to stop the recorder to fire the event and create the blob.
You make a mistake on the recorder name's variable and the time on the settimeout function.
You recreate a audio player all times the recorder stop and never append it on the DOM.

AudioTrack created from AudioStreamDestinationNode does not stop when all source audio tracks are stopped

I merged AudioTracks from getDisplayMedia(system audio) and getUserMedia(microphone) and created a new audio track from them using AudioContext. When I stopped each of the source Audio tracks, the new audio tracks for MediaStreamDestinationNode(outcome of merging two source audio tracks) does not get stopped.
My questions are:
I wonder if it is possible to bubble up ended event of source audio tracks to new audio tracks merged by AudioContext.
Just curious if there is a purpose of keeping MediaStreamDestinatioNode's audio track alive when all source audio tracks are stopped.
var screen = navigator.mediaDevices.getDisplayMedia({
video:{
width:{
ideal:1280,
max:1280
},
height:{
ideal:720,
max:720
}
},
audio:{
echoCancellation: false,
noiseSuppression: true,
sampleRate: 44100,
}
}).then(async (stream)=>{
const ctx = new AudioContext();
mediaStream_01 = await navigator.mediaDevices.getUserMedia({audio: {deviceId: "default"}});
mediaStream_01.getAudioTracks()[0].onended = function () {console.log('mic audiostream ended')};
stream.getAudioTracks()[0].onended = function (){console.log('screenshare audio stream ended')};
stream.getVideoTracks()[0].onended = function (){console.log('screenshare video stream ended')};
mediaStream_02 = stream;
audioIn_01 = ctx.createMediaStreamSource(mediaStream_01);
audioIn_02 = ctx.createMediaStreamSource(mediaStream_02);
dest = ctx.createMediaStreamDestination();
audioIn_01.connect(dest);
audioIn_02.connect(dest);
newStream = new MediaStream([...stream.getVideoTracks(), ...dest.stream.getAudioTracks()]);
newStream.getAudioTracks()[0].onended = function () {console.log('merged audio tracks ended')};
var recorder = new MediaRecorder(newStream);
When I manually stop screen share and default microphone(disconnect), it fires the onended events for the source audio tracks, but it does not fire the onended event for MediaStreamDestinationNode's audio track. Because new audio track does not get stopped, in turn MediaRecorder continues to record the empty stream, and it has to be stopped manually.

Record (and stop) an already running Webcam/Media Stream

I’ve created a minimal WebRTC test site that is able to request the user’s webcam/audio stream, to record it, and to playback the recording after it has been stopped.
Demo: https://output.jsbin.com/tabosipefo/
Edit1: https://jsbin.com/tabosipefo/edit?html,console,output
Since this happens all within one Promise navigator.mediaDevices.getUserMedia(), I was wondering, if it is actually possible to detect and on-going stream and to (a) record it, and (b) to stop and save it.
1 WebRTC does not work in jsbin when in edit view for some reason...
If you use no framework and want to use vanilla JS, your best step is to tack the stream object to the global window.
Preview stream
const showWebcamStream = () => {
navigator.mediaDevices
.getUserMedia({ audio: true, video: true })
.then(stream => {
window.localStream = stream; // ⭠ tack it to the window object
// grab the <video> object
const video = document.querySelector("#video-preview");
video.srcObject = stream;
// Display stream
video.onloadedmetadata = () => video.play();
})
.catch(err => console.log(err.name, err.message));
};
Now the video will be displayed within the video element (id: #videp-preview).
Stop Stream(s)
const hideWebcamStream = () => localStream.getTracks().forEach(track => track.stop());
You should put the mediaRecorder in the window object in order to stop it later.
Record Stream
const startWebcamRecorder = () => {
// check if localStream is in window and if it is active
if ("localStream" in window && localStream.active) {
// save the mediaRecorder also to Window in order independently stop it
window.mediaRecorder = new MediaRecorder(localStream);
window.dataChunks = [];
mediaRecorder.start();
console.log(mediaRecorder.state);
mediaRecorder.ondataavailable = e => dataChunks.push(e.data);
}
};
Stop Recording and Preview the recording
You need another video element to playback your recording #video-playback
const stopWebcamRecorder = () => {
if ("mediaRecorder" in window && mediaRecorder.state === "recording") {
mediaRecorder.stop();
console.log(mediaRecorder.state);
mediaRecorder.onstop = () => {
let blob = new Blob(dataChunks, { type: "video/mp4;" });
dataChunks = [];
let videoURL = window.URL.createObjectURL(blob);
const videoPlayback = document.getElementById("video-playback");
videoPlayback.src = videoURL;
};
}
};

starting/stopping MediaRecorder API causes Chrome to crash

I am implementing the MediaRecorder API as a way to record webm blobs for use as segments in a livestream. I have gotten the functionality I need but ran into a problem with Chrome crashing when calling MediaRecorder.stop() and MediaRecorder.start() multiple times in regular intervals.
Here is the recording code:
let Recorder = null;
let segmentBuffer = [];
let recordInterval = null;
let times = 0; //limiter for crashes
function startRecording() {
Recorder = new MediaRecorder(LocalStream, { mimeType: 'video/webm;codecs=opus, vp8', audioBitsPerSecond: 50000, videoBitsPerSecond: 1000000, });
//error evt
Recorder.onerror = (evt) => {
console.error(evt.error);
}
//push blob data to segments buffer
Recorder.ondataavailable = (evt) => {
segmentBuffer.push(evt.data);
}
//start initial recording
Recorder.start();
//set stop/start delivery interval every 5 seconds
recordInterval = setInterval(() => {
//stop recording
Recorder.stop();
//here to prevent crash
if (times > 5) {
Recorder = null;
console.log('end')
return;
}
times++;
//check if has segments
if (segmentBuffer.length) {
//produce segment, this segment is playable and not just a byte-stream due to start/stop
let webm = segmentBuffer.reduce((a, b) => new Blob([a, b], { type: "video/webm;codecs=opus, vp8" }));
//unset buffer
segmentBuffer = [];
//handle blob ie. send to server
handleBlob(webm)
}
//restart recorder
Recorder.start();
}, 5000);
}
I've also gone into the performance and discovered that a new audio and video encoder thread is started for each start/stop. I think this is the major problem as setting the interval to 10s vs. 5s creates fewer encoding threads. The buildup of multiple encoding threads causes chrome to lag and then finally crash afer a few passes.
How do I prevent multiple encoding threads from occurring while still being able to start/stop MediaRecorder (start/stop is the only way I found to achieve webm files that can be playable separately, otherwise each subsequent blob is missing the webm header part).
It appears that this is a bug in chrome:
https://bugs.chromium.org/p/chromium/issues/detail?id=1012378&q=mediaRecorder%20thread&can=2
I'm not sure there is anything you can do to fix it.

Stream audio over websocket with low latency and no interruption

I'm working on a project which requires the ability to stream audio from a webpage to other clients. I'm already using websocket and would like to channel the data there.
My current approach uses Media Recorder, but there is a problem with sampling which causes interrupts. It registers 1s audio and then send's it to the server which relays it to other clients. Is there a way to capture a continuous audio stream and transform it to base64?
Maybe if there is a way to create a base64 audio from MediaStream without delay it would solve the problem. What do you think?
I would like to keep using websockets, I know there is webrtc.
Have you ever done something like this, is this doable?
--> Device 1
MediaStream -> MediaRecorder -> base64 -> WebSocket -> Server --> Device ..
--> Device 18
Here a demo of the current approach... you can try it here: https://jsfiddle.net/8qhvrcbz/
var sendAudio = function(b64) {
var message = 'var audio = document.createElement(\'audio\');';
message += 'audio.src = "' + b64 + '";';
message += 'audio.play().catch(console.error);';
eval(message);
console.log(b64);
}
navigator.mediaDevices.getUserMedia({
audio: true
}).then(function(stream) {
setInterval(function() {
var chunks = [];
var recorder = new MediaRecorder(stream);
recorder.ondataavailable = function(e) {
chunks.push(e.data);
};
recorder.onstop = function(e) {
var audioBlob = new Blob(chunks);
var reader = new FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = function() {
var b64 = reader.result
b64 = b64.replace('application/octet-stream', 'audio/mpeg');
sendAudio(b64);
}
}
recorder.start();
setTimeout(function() {
recorder.stop();
}, 1050);
}, 1000);
});
Websocket is not the best. I solved by using WebRTC instead of websocket.
The solution with websocket was obtained while recording 1050ms instead of 1000, it causes a bit of overlay but still better than hearing blanks.
Although you have solved this through WebRTC, which is the industry recommended approach, I'd like to share my answer on this.
The problem here is not websockets in general but rather the MediaRecorder API. Instead of using it one can use PCM audio capture and then submit the captured array buffers into a web worker or WASM for encoding to MP3 chunks or similar.
const context = new AudioContext();
let leftChannel = [];
let rightChannel = [];
let recordingLength = null;
let bufferSize = 512;
let sampleRate = context.sampleRate;
const audioSource = context.createMediaStreamSource(audioStream);
const scriptNode = context.createScriptProcessor(bufferSize, 1, 1);
audioSource.connect(scriptNode);
scriptNode.connect(context.destination);
scriptNode.onaudioprocess = function(e) {
// Do something with the data, e.g. convert it to WAV or MP3
};
Based on my experiments this would give you "real-time" audio. My theory with the MediaRecorder API is that it does some buffering first before emitting out anything that causes the observable delay.

Categories