I'm trying to create simple aplication to play stereo / mono music in all 5.1 channels. After studying Web audio specification I spend 4 hours to try code it. Unfortunately I got nowhere. Audio is playing only over 2 channels. If I set merger.channelCountMode = "explicit" it plays only from center channel.
If I set merger.channelInterpretation = "discrete"; it plays only from left channel. What I'm doing wrong? Thank you very much.
My code:
var audio;
function PlaySurround(){
context = new AudioContext();
audio = new Audio();
audio.src = "a.mp3";
var source = context.createMediaElementSource(audio);
context.destination.channelCount = 6;
audio.currentTime = Math.random() * 200;
//Create a splitter to "separete" the stereo audio data to two channels.
var splitter = context.createChannelSplitter(6);
splitter.channelCount = 6;
//splitter.channelInterpretation = "discrete";
//splitter.channelCountMode = "explicit";
console.log(splitter);
//Connect your source to the splitter (usually, you will do it with the last audio node before context destination)
source.connect(splitter);
//Create two gain nodes (one for each side of the stereo image)
var panLeft = context.createGain();
var panRight = context.createGain();
var panLeftSurround = context.createGain();
var panRightSurround = context.createGain();
var panCenter = context.createGain();
var panSubwoofer = context.createGain();
//Connect the splitter channels to the Gain Nodes that we've just created
splitter.connect(panLeft, 0);
splitter.connect(panRight, 1);
splitter.connect(panCenter, 2);
splitter.connect(panSubwoofer, 3);
splitter.connect(panLeftSurround, 4);
splitter.connect(panRightSurround, 5);
panLeft.gain.value = 1;
panRight.gain.value = 1;
panLeftSurround.gain.value = 1;
panRightSurround.gain.value = 1;
panCenter.gain.value = 1;
panSubwoofer.gain.value = 1;
//Create a merger node, to get both signals back together
var merger = context.createChannelMerger(6);
merger.channelCount = 6;
//merger.channelInterpretation = "discrete";
merger.channelCountMode = "explicit";
console.log(merger);
panLeft.connect(merger, 0, 0);
panRight.connect(merger, 0, 1);
panCenter.connect(merger, 0, 2);
panSubwoofer.connect(merger, 0, 3);
panLeftSurround.connect(merger, 0, 4);
panRightSurround.connect(merger, 0, 5);
//Connect the Merger Node to the final audio destination (your speakers)
merger.connect(context.destination);
source.mediaElement.play();
}
Related
I'm working on a project in which I'd like to:
Load a video js and display it on the canvas.
Use filters to alter the appearance of the canvas (and therefore the video).
Use the MediaStream captureStream() method and a MediaRecorder object to record the surface of the canvas and the audio of the original video.
Play the stream of both the canvas and the audio in an HTML video element.
I've been able to display the canvas recording in a video element by tweaking this WebRTC demo code: https://webrtc.github.io/samples/src/content/capture/canvas-record/
That said, I can't figure out how to record the video's audio alongside the canvas. Is it possible to create a MediaStream containing MediaStreamTrack instances from two different sources/elements?
According to the MediaStream API's specs there should theoretically be some way to accomplish this:
https://w3c.github.io/mediacapture-main/#introduction
"The two main components in the MediaStream API are the MediaStreamTrack and MediaStream interfaces. The MediaStreamTrack object represents media of a single type that originates from one media source in the User Agent, e.g. video produced by a web camera. A MediaStream is used to group several MediaStreamTrack objects into one unit that can be recorded or rendered in a media element."
Is it possible to create a MediaStream containing MediaStreamTrack instances from two different sources/elements?
Yes, you can do it using the MediaStream.addTrack() method, or new MediaStream([track1, track2]).
OP already known how to get all of it, but here is a reminder for future readers :
To get a video stream track from a <canvas>, you can call canvas.captureStream(framerate) method.
To get an audio stream track from a <video> element you can use the Web Audio API and it's createMediaStreamDestination method.
This will return a MediaStreamAudioDestinationNode node (dest) containing our audio stream. You'll then have to connect a MediaElementAudioSourceNode created from your <video> element, to this dest.
If you need to add more audio tracks to this stream, you should connect all these sources to dest.
Now that we've got two streams, one for the <canvas> video and one for the audio, we can either add the audio track to the canvas stream before we initialize the recorder:
canvasStream.addTrack(audioStream.getAudioTracks()[0]);
const recorder = new MediaRecorder(canvasStream)
or we can create a third MediaStream object from these two tracks:
const [videoTrack] = canvasStream.getVideoTracks();
const [audioTrack] = audioStream.getAudioTracks();
const recordedStream = new MediaStream(videoTrack, audioTrack)
const recorder = new MediaRecorder(recordedStream);
Here is a complete example:
var
btn = document.querySelector("button"),
canvas,
cStream,
aStream,
vid,
recorder,
analyser,
dataArray,
bufferLength,
chunks = [];
function clickHandler() {
btn.textContent = 'stop recording';
if (!aStream) {
initAudioStream();
}
cStream = canvas.captureStream(30);
cStream.addTrack(aStream.getAudioTracks()[0]);
recorder = new MediaRecorder(cStream);
recorder.start();
recorder.ondataavailable = saveChunks;
recorder.onstop = exportStream;
btn.onclick = stopRecording;
};
function exportStream(e) {
if (chunks.length) {
var blob = new Blob(chunks, { type: chunks[0].type });
var vidURL = URL.createObjectURL(blob);
var vid = document.createElement('video');
vid.controls = true;
vid.src = vidURL;
vid.onend = function() {
URL.revokeObjectURL(vidURL);
}
document.body.insertBefore(vid, canvas);
} else {
document.body.insertBefore(document.createTextNode('no data saved'), canvas);
}
}
function saveChunks(e) {
e.data.size && chunks.push(e.data);
}
function stopRecording() {
vid.pause();
btn.remove();
recorder.stop();
}
function initAudioStream() {
var audioCtx = new AudioContext();
// create a stream from our AudioContext
var dest = audioCtx.createMediaStreamDestination();
aStream = dest.stream;
// connect our video element's output to the stream
var sourceNode = audioCtx.createMediaElementSource(vid);
sourceNode.connect(dest)
// start the video
vid.play();
// just for the fancy canvas drawings
analyser = audioCtx.createAnalyser();
sourceNode.connect(analyser);
analyser.fftSize = 2048;
bufferLength = analyser.frequencyBinCount;
dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);
// output to our headphones
sourceNode.connect(audioCtx.destination)
startCanvasAnim();
}
function enableButton() {
vid.oncanplay = null;
btn.onclick = clickHandler;
btn.disabled = false;
};
var loadVideo = function() {
vid = document.createElement('video');
vid.crossOrigin = 'anonymous';
vid.oncanplay = enableButton;
vid.src = 'https://dl.dropboxusercontent.com/s/bch2j17v6ny4ako/movie720p.mp4';
}
function startCanvasAnim() {
// from MDN https://developer.mozilla.org/en/docs/Web/API/AnalyserNode#Examples
canvas = Object.assign(document.createElement("canvas"), { width: 500, height: 200});
document.body.prepend(canvas);
var canvasCtx = canvas.getContext('2d');
canvasCtx.fillStyle = 'rgb(200, 200, 200)';
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = 'rgb(0, 0, 0)';
var draw = function() {
var drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.beginPath();
var sliceWidth = canvas.width * 1.0 / bufferLength;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = v * canvas.height / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
draw();
}
loadVideo();
button { vertical-align: top }
<button disabled>record</button>
Kaiido's demo is brilliant. For those just looking for the tl;dr code to add an audio stream to their existing canvas stream:
let videoOrAudioElement = /* your audio source element */;
// get the audio track:
let ctx = new AudioContext();
let dest = ctx.createMediaStreamDestination();
let sourceNode = ctx.createMediaElementSource(videoOrAudioElement);
sourceNode.connect(dest);
sourceNode.connect(ctx.destination);
let audioTrack = dest.stream.getAudioTracks()[0];
// add it to your canvas stream:
canvasStream.addTrack(audioTrack);
// use your canvas stream like you would normally:
let recorder = new MediaRecorder(canvasStream);
// ...
I want to get the frequencies of an audio file with JS in non-real-time, e.g before the file is played, and store it in an array.
I used this code:
var context = new AudioContext();
src = context.createMediaElementSource(source);
analyser = context.createAnalyser();
var listen = context.createGain();
src.connect(listen);
listen.connect(analyser);
analyser.connect(context.destination);
analyser.fftSize = 2 ** 12;
var frequencyBins = analyser.fftSize / 2;
var bufferLength = analyser.frequencyBinCount;
console.log(bufferLength);
dataArray = new Float32Array(bufferLength);
// dataArray = new Uint8Array(bufferLength);
var scale = bufferLength/WIDTH;
And then in an animation frame,
dataArraya = new Float32Array(2048);
analyser.getFloatFrequencyData(dataArraya)
This works great in real-time, but I'd like to get the audio data before, how can I do that?
Lets say I have a source node that's connected to the destination node.
Even if the audio is mono, I want to be able to control each ear's volume independently, like I can do when I have stereo audio with splitter and merger node.
Already tried to use splitter and merger nodes on the mono source node, but right channel comes out empty.
example for stereo:
var audioCtx = new AudioContext();
var source = audioCtx.createMediaElementSource(myAudio);
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var splitter = audioCtx.createChannelSplitter(2);
var merger = audioCtx.createChannelMerger(2);
source.connect(splitter);
splitter.connect(gainNodeL, 0);
splitter.connect(gainNodeR, 1);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.createMediaStreamDestination());
When I do this with mono audio, the right channel comes out empty.
If a signal is only mono (or in other words its channelCount is 1) the ChannelSplitterNode is not necessary. I modified the example a bit. It does now split the mono signal of an Oscillator.
var audioCtx = new AudioContext();
var oscillator = audioCtx.createOscillator();
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var merger = audioCtx.createChannelMerger(2);
oscillator.connect(gainNodeL);
oscillator.connect(gainNodeR);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.destination);
oscillator.start();
function left () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 0;
}
function right () {
gainNodeL.gain.value = 0;
gainNodeR.gain.value = 1;
}
function center () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 1;
}
I'm working on a project in which I'd like to:
Load a video js and display it on the canvas.
Use filters to alter the appearance of the canvas (and therefore the video).
Use the MediaStream captureStream() method and a MediaRecorder object to record the surface of the canvas and the audio of the original video.
Play the stream of both the canvas and the audio in an HTML video element.
I've been able to display the canvas recording in a video element by tweaking this WebRTC demo code: https://webrtc.github.io/samples/src/content/capture/canvas-record/
That said, I can't figure out how to record the video's audio alongside the canvas. Is it possible to create a MediaStream containing MediaStreamTrack instances from two different sources/elements?
According to the MediaStream API's specs there should theoretically be some way to accomplish this:
https://w3c.github.io/mediacapture-main/#introduction
"The two main components in the MediaStream API are the MediaStreamTrack and MediaStream interfaces. The MediaStreamTrack object represents media of a single type that originates from one media source in the User Agent, e.g. video produced by a web camera. A MediaStream is used to group several MediaStreamTrack objects into one unit that can be recorded or rendered in a media element."
Is it possible to create a MediaStream containing MediaStreamTrack instances from two different sources/elements?
Yes, you can do it using the MediaStream.addTrack() method, or new MediaStream([track1, track2]).
OP already known how to get all of it, but here is a reminder for future readers :
To get a video stream track from a <canvas>, you can call canvas.captureStream(framerate) method.
To get an audio stream track from a <video> element you can use the Web Audio API and it's createMediaStreamDestination method.
This will return a MediaStreamAudioDestinationNode node (dest) containing our audio stream. You'll then have to connect a MediaElementAudioSourceNode created from your <video> element, to this dest.
If you need to add more audio tracks to this stream, you should connect all these sources to dest.
Now that we've got two streams, one for the <canvas> video and one for the audio, we can either add the audio track to the canvas stream before we initialize the recorder:
canvasStream.addTrack(audioStream.getAudioTracks()[0]);
const recorder = new MediaRecorder(canvasStream)
or we can create a third MediaStream object from these two tracks:
const [videoTrack] = canvasStream.getVideoTracks();
const [audioTrack] = audioStream.getAudioTracks();
const recordedStream = new MediaStream(videoTrack, audioTrack)
const recorder = new MediaRecorder(recordedStream);
Here is a complete example:
var
btn = document.querySelector("button"),
canvas,
cStream,
aStream,
vid,
recorder,
analyser,
dataArray,
bufferLength,
chunks = [];
function clickHandler() {
btn.textContent = 'stop recording';
if (!aStream) {
initAudioStream();
}
cStream = canvas.captureStream(30);
cStream.addTrack(aStream.getAudioTracks()[0]);
recorder = new MediaRecorder(cStream);
recorder.start();
recorder.ondataavailable = saveChunks;
recorder.onstop = exportStream;
btn.onclick = stopRecording;
};
function exportStream(e) {
if (chunks.length) {
var blob = new Blob(chunks, { type: chunks[0].type });
var vidURL = URL.createObjectURL(blob);
var vid = document.createElement('video');
vid.controls = true;
vid.src = vidURL;
vid.onend = function() {
URL.revokeObjectURL(vidURL);
}
document.body.insertBefore(vid, canvas);
} else {
document.body.insertBefore(document.createTextNode('no data saved'), canvas);
}
}
function saveChunks(e) {
e.data.size && chunks.push(e.data);
}
function stopRecording() {
vid.pause();
btn.remove();
recorder.stop();
}
function initAudioStream() {
var audioCtx = new AudioContext();
// create a stream from our AudioContext
var dest = audioCtx.createMediaStreamDestination();
aStream = dest.stream;
// connect our video element's output to the stream
var sourceNode = audioCtx.createMediaElementSource(vid);
sourceNode.connect(dest)
// start the video
vid.play();
// just for the fancy canvas drawings
analyser = audioCtx.createAnalyser();
sourceNode.connect(analyser);
analyser.fftSize = 2048;
bufferLength = analyser.frequencyBinCount;
dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);
// output to our headphones
sourceNode.connect(audioCtx.destination)
startCanvasAnim();
}
function enableButton() {
vid.oncanplay = null;
btn.onclick = clickHandler;
btn.disabled = false;
};
var loadVideo = function() {
vid = document.createElement('video');
vid.crossOrigin = 'anonymous';
vid.oncanplay = enableButton;
vid.src = 'https://dl.dropboxusercontent.com/s/bch2j17v6ny4ako/movie720p.mp4';
}
function startCanvasAnim() {
// from MDN https://developer.mozilla.org/en/docs/Web/API/AnalyserNode#Examples
canvas = Object.assign(document.createElement("canvas"), { width: 500, height: 200});
document.body.prepend(canvas);
var canvasCtx = canvas.getContext('2d');
canvasCtx.fillStyle = 'rgb(200, 200, 200)';
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = 'rgb(0, 0, 0)';
var draw = function() {
var drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.beginPath();
var sliceWidth = canvas.width * 1.0 / bufferLength;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = v * canvas.height / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
draw();
}
loadVideo();
button { vertical-align: top }
<button disabled>record</button>
Kaiido's demo is brilliant. For those just looking for the tl;dr code to add an audio stream to their existing canvas stream:
let videoOrAudioElement = /* your audio source element */;
// get the audio track:
let ctx = new AudioContext();
let dest = ctx.createMediaStreamDestination();
let sourceNode = ctx.createMediaElementSource(videoOrAudioElement);
sourceNode.connect(dest);
sourceNode.connect(ctx.destination);
let audioTrack = dest.stream.getAudioTracks()[0];
// add it to your canvas stream:
canvasStream.addTrack(audioTrack);
// use your canvas stream like you would normally:
let recorder = new MediaRecorder(canvasStream);
// ...
I'm trying to convolve a mono impulse with a stereo audio file using the web audio api. The problem is instead of getting true stereo output, I'm getting what looks and sounds like the same track duplicated to both channels. Here's my code:
var context = new AudioContext();
var source = context.createBufferSource();
source.buffer = BUFFERS.user; // stereo file
var splitter = context.createChannelSplitter(2);
var convolverL = context.createConvolver();
convolverL.normalize = false;
convolverL.buffer = BUFFERS.impulse; // mono impulse
var convolverR = context.createConvolver();
convolverR.normalize = false;
convolverR.buffer = BUFFERS.impulse; // same mono impulse
var merger = context.createChannelMerger(2);
var gain = context.createGain();
gain.gain.value = 0.75;
// make connections
source.connect(splitter);
splitter.connect(convolverL, 0);
splitter.connect(convolverR, 1);
convolverL.connect(merger, 0, 0);
convolverR.connect(merger, 0, 1);
merger.connect(gain);
gain.connect(context.destination);
source.start(0);