Im using RTCMultiConnection v3 plugin i can't figure it out how to get current stream reopened.
Scenario;
UserA opens cam, appends the stream automaticly to $("#webcam"). Stream is open.
UserA wants to join UserB and does connection.join("UserB") (pretending UserB has also a cam stream). Now this html() to $("#webcam") so UserA ``$("#webcam")` is overrided by UserB stream but UserA stream is a live. So far so good.
Now i want to reappend UserA stream as UserA does connection.join("UserA") his own stream.
I hope someone knows how to do this ?
I don't want to reopen the whole stream.
The person who opens a room, must NOT join same room.
You can access the videos (media-streams) using following methods:
1) if you know the "video-id" commonly named as "stream-id"
var streamEvent = connection.streamEvents['stream-id'];
var mediaStreamObject = streamEvent.stream;
var videoFreshURL = URL.createObjectURL(mediaStreamObject);
yourVideo.src = videoFreshURL;
2) if you know th remote user's Unique-ID commonly named as "userid"
var userPeer = connection.peers['user-id'].peer;
var allIncomingVideos = userPeer.getRemoteStreams();
var firstIncomingVideo = allIncomingVideos[0];
var firstVideoFreshURL = URL.createObjectURL(firstIncomingVideo);
yourVideo.src = firstVideoFreshURL;
3) store event video in your own object
window.yourOwnGlobalObject = {};
connection.onstream = function(event) {
window.yourOwnGlobalObject[event.userid] = event;
document.body.appendChild( event.mediaElement );
};
function getUserXYZVideo(userid) {
var userVideo = window.yourOwnGlobalObject[userid].mediaElement;
// var userStream = window.yourOwnGlobalObject[userid].stream;
return userVideo;
}
Related
I'm using the new v2 Twilio Javascript SDK to make calls from the browser to other people.
This works fine but I've been asked to add volume controls for the incoming audio stream.
After some research it seems that I need to take the remote stream from the call and feed it through a gain node to reduce the volume.
Unfortunately the result from call.getRemoteStream is always null even when I can hear audio from the call.
I've tested this on latest Chrome and Edge and they have the same behavior.
Is there something else I need to do to access the remote stream?
Code:
async function(phoneNumber, token)
{
console.log("isSecureContext: " + window.isSecureContext); //check we can get the stream
var options = {
edge: 'ashburn', //us US endpoint
closeProtection: true // will warn user if you try to close browser window during an active call
};
var device = new Device(token, options);
const connectionParams = {
"phoneNumber": phoneNumber
};
var activeCall = await device.connect({ params: connectionParams });
//Setup gain (volume) control for incoming audio
//Note, getRemoteStream always returns null.
var remoteStream = activeCall.getRemoteStream();
if(remoteStream)
{
var audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(remoteStream);
var gainNode = audioCtx.createGain();
source.connect(gainNode)
gainNode.connect(audioCtx.destination);
}
else
{
console.log("No remote stream on call");
}
}
The log output is:
isSecureContext: true
then
No remote stream on call
Twilio support gave me the answer: you need to wait until you start receiving volume events before requesting the stream.
ie
call.on('volume', (inputVolume, outputVolume) => {
if(inputVolume > 0)
{
var remoteStream = activeCall.getRemoteStream();
....
}
});
MediaRecorder only lets you record media of one type per track. So I'm using JQuery to get a list off all audio elements and connect them to the same audio context destination in order to mix all the audio tracks into one audio stream to later be recorded by MediaRecorder. What I have so far works but only captures the first track and none of the others.
Any idea why only one track comes through?
my code:
function gettracks(stream){
var i = 0;
var audioTrack ;
var audioctx = new AudioContext();
var SourceNode = [];
var dest = audioctx.createMediaStreamDestination();
$('audio').each( function() {
//the audio element id
var afid = $(this).attr('id');
var audio = $('#'+afid)[0];
console.log('audio id '+ afid+' Audio= '+audio);
SourceNode[i] = audioctx.createMediaElementSource(audio);
//dont forget to connect the wires!
SourceNode[i].connect(audioctx.destination);
SourceNode[i].connect(dest);
audioTrack = dest.stream.getAudioTracks()[0];
stream.addTrack(audioTrack);
i++;
});
}
//from a mousedown event I call
stream = canvas.captureStream();
video.srcObject = stream;
gettracks(stream);
startRecording()
function startRecording() {
recorder = new MediaRecorder(stream, {
mimeType: 'video/webm'
});
recorder.start();
}
I would do it like this:
var ac = new AudioContext();
var mediaStreamDestination = new MediaStreamAudioDestinationNode(ac);
document.querySelectorAll("audio").forEach((e) => {
var mediaElementSource = new MediaElementAudioSourceNode(ac, { mediaElement: e });
mediaElementSource.connect(mediaStreamDestination);
});
console.log(mediaStreamDestination.stream.getAudioTracks()[0]); // give this to MediaRecorder
Breaking down what the above does:
var ac = new AudioContext();: create an AudioContext, to be able to route audio to something else than the default audio output.
var mediaStreamDestination = new MediaStreamAudioDestinationNode(ac); from this AudioContext, get a special type of DestinationNode, that, instead of send the output of the AudioContext to the audio output device, sends it to a MediaStream that holds a single track of audio.
document.querySelectorAll("audio").forEach((e) => {, get all the <audio> elements, and iterate over them.
var mediaElementSource = new MediaElementAudioSourceNode(ac, { mediaElement: e });, for each of those media element, capture its output and route it to the AudioContext. This gives you an AudioNode.
mediaElementSource.connect(mediaStreamDestination);, connect our AudioNode that has the output of the media element, connect it to our destination that goes to a MediaStream.
mediaStreamDestination.stream.getAudioTracks()[0] get the first audio MediaStreamTrack from this MediaStream. It has only one anyways.
Now, I suppose you can do something like stream.addTrack(mediaStreamDestination.stream.getAudioTracks()[0]), passing in the audio track above.
What if you create a gain node and connect your source nodes to that:
var gain = audioctx.createGain();
gain.connect(dest);
and in the loop
SourceNode[i].connect(gain);
Then your sources flow into a single gain node, which flows to the your destination.
I have this code
var englishSubtitle = new chrome.cast.media.Track(2,chrome.cast.media.TrackType.TEXT);
englishSubtitle.trackContentId = 'english.vtt';
englishSubtitle.trackContentType = 'text/vtt';
englishSubtitle.subtype = chrome.cast.media.TextTrackType.CAPTIONS;
englishSubtitle.name = 'English';
englishSubtitle.language = 'en-US';
englishSubtitle.customData = null;
var tracks = englishSubtitle;
var mediaInfo = new chrome.cast.media.MediaInfo(app.streamState_.manifest);
mediaInfo.contentType = app.streamState_.type;
mediaInfo.metadata = new chrome.cast.media.GenericMediaMetadata();
mediaInfo.customData = null;
mediaInfo.streamType = chrome.cast.media.StreamType.BUFFERED;
mediaInfo.textTrackStyle = new chrome.cast.media.TextTrackStyle();
mediaInfo.tracks = tracks;
mediaInfo.metadata.metadataType = chrome.cast.media.MetadataType.GENERIC;
var activeTrackIds = [2];
var request = new chrome.cast.media.LoadRequest(mediaInfo);
request.autoplay = true;
request.currentTime = 0;
request.activeTrackIds = activeTrackIds;
session.loadMedia(request,onMediaDiscovered.bind( this, 'loadedMedia'), onMediaError);
I want to show subtitle on chromecast. When I want to set activeTracks on the request, I receive an error
Object {code: "session_error", description: "INVALID_PARAMS", details: Object}
The subtitle it doesn't show and the video doesn't play it at all, because of that error.
Am I doing something wrong?
tracks should be an array when you set
mediaInfo.tracks = tracks;
In your case, you should try
var tracks = [englishSubtitle];
and as was said earlier, use SUBTITLES instead of CAPTIONS. Finally make sure you have CORS headers present from your web server even if you are using mp4.
tracks should be stored inside an array
https://developers.google.com/cast/docs/reference/chrome/chrome.cast.media.MediaInfo#tracks
Array of non-null chrome.cast.media.Track
Array of Track objects.
mediaInfo.tracks = [englishSubtitle, frenchSubtitle, germanSubtitle]
I've created a simple javascript wrapper for the chromecast SDK:
https://github.com/Fenny/ChromecastJS
Might be worth to check out if you stumble upon more problems, good luck!
I am attempting to use a ChannelSplitter node to send an audio signal into both a ChannelMerger node and to the destination, and then trying to use the ChannelMerger node to merge two different audio signals (one from the split source, one from the microphone using getUserMedia) into a recorder using Recorder.js.
I keep getting the following error: "Uncaught SyntaxError: An invalid or illegal string was specified."
The error is at the following line of code:
audioSource.splitter.connect(merger);
Where audioSource is an instance of ThreeAudio.Source from the library ThreeAudio.js, splitter is a channel splitter I instantiated myself by modifying the prototype, and merger is my merger node. The code that precedes it is:
merger = context.createChannelMerger(2);
userInput.connect(merger);
Where userInput is the stream from the user's microphone. That one connects without throwing an error. Sound is getting from the audioSource to the destination (I can hear it), so it doesn't seem like the splitter is necessarily wrong - I just can't seem to connect it.
Does anyone have any insight?
I was struggling to understand the ChannelSplitterNode and ChannelMergerNode API. Finally I find the missing part, the 2nd and 3rd optional parameters of the connect() method - input and output channel.
connect(destinationNode: AudioNode, output?: number, input?: number): AudioNode;
When using the connect() method with Splitter or Merger nodes, spacify the input/output channel. This is how you split and Merge to audio data.
You can see in this example how I load audio data, split it into 2 channels, and control the left/right output. Notice the 2nd and 3rd parameter of the connect() method:
const audioUrl = "https://s3-us-west-2.amazonaws.com/s.cdpn.io/858/outfoxing.mp3";
const audioElement = new Audio(audioUrl);
audioElement.crossOrigin = "anonymous"; // cross-origin - if file is stored on remote server
const audioContext = new AudioContext();
const audioSource = audioContext.createMediaElementSource(audioElement);
const volumeNodeL = new GainNode(audioContext);
const volumeNodeR = new GainNode(audioContext);
volumeNodeL.gain.value = 2;
volumeNodeR.gain.value = 2;
const channelsCount = 2; // or read from: 'audioSource.channelCount'
const splitterNode = new ChannelSplitterNode(audioContext, { numberOfOutputs: channelsCount });
const mergerNode = new ChannelMergerNode(audioContext, { numberOfInputs: channelsCount });
audioSource.connect(splitterNode);
splitterNode.connect(volumeNodeL, 0); // connect OUTPUT channel 0
splitterNode.connect(volumeNodeR, 1); // connect OUTPUT channel 1
volumeNodeL.connect(mergerNode, 0, 0); // connect INPUT channel 0
volumeNodeR.connect(mergerNode, 0, 1); // connect INPUT channel 1
mergerNode.connect(audioContext.destination);
let isPlaying;
function playPause() {
// check if context is in suspended state (autoplay policy)
if (audioContext.state === 'suspended') {
audioContext.resume();
}
isPlaying = !isPlaying;
if (isPlaying) {
audioElement.play();
} else {
audioElement.pause();
}
}
function setBalance(val) {
volumeNodeL.gain.value = 1 - val;
volumeNodeR.gain.value = 1 + val;
}
<h3>Try using headphones</h3>
<button onclick="playPause()">play/pause</button>
<br><br>
<button onclick="setBalance(-1)">Left</button>
<button onclick="setBalance(0)">Center</button>
<button onclick="setBalance(+1)">Right</button>
P.S: The audio track isn't a real stereo track, but a left and right copy of the same Mono playback. You can try this example with a real stereo playback for a real balance effect.
Here's some working splitter/merger code that creates a ping-pong delay - that is, it sets up separate delays on the L and R channels of a stereo signal, and crosses over the feedback. This is from my input effects demo on webaudiodemos.appspot.com (code on github).
var merger = context.createChannelMerger(2);
var leftDelay = context.createDelayNode();
var rightDelay = context.createDelayNode();
var leftFeedback = audioContext.createGainNode();
var rightFeedback = audioContext.createGainNode();
var splitter = context.createChannelSplitter(2);
// Split the stereo signal.
splitter.connect( leftDelay, 0 );
// If the signal is dual copies of a mono signal, we don't want the right channel -
// it will just sound like a mono delay. If it was a real stereo signal, we do want
// it to just mirror the channels.
if (isTrueStereo)
splitter.connect( rightDelay, 1 );
leftDelay.delayTime.value = delayTime;
rightDelay.delayTime.value = delayTime;
leftFeedback.gain.value = feedback;
rightFeedback.gain.value = feedback;
// Connect the routing - left bounces to right, right bounces to left.
leftDelay.connect(leftFeedback);
leftFeedback.connect(rightDelay);
rightDelay.connect(rightFeedback);
rightFeedback.connect(leftDelay);
// Re-merge the two delay channels into stereo L/R
leftFeedback.connect(merger, 0, 0);
rightFeedback.connect(merger, 0, 1);
// Now connect your input to "splitter", and connect "merger" to your output destination.
I've been searching a solution about nearly two days now for this problem.
I have a web audio api app that catches the microphone input. In one script processor i'm windowing the signal with a hanning window, which works fine when the audio chain looks like this:
source -> windowScriptProcessorNode -> audioContext.destination
Then i wanted to add another script processor to the chain like this:
source -> windowScriptProcessorNode -> otherScriptProcessorNode -> audioContext.destination
but at the inputBuffer of the otherScriptProcessorNode there are just zeros instead of the signal of windowScriptProcessorNode.
Here is some code:
var audioContext = new AudioContext();
//get microphone input via getUserMedia
navigator.getUserMedia({audio: true}, function(stream) {
//set up source
var audioSource = audioContext.createMediaStreamSource(stream);
audioSource.buffer = stream;
//set up hanning window script processor node
var windowScriptProcessorNode = audioContext.createScriptProcessor(BLOCKLENGTH,1,1);
windowScriptProcessorNode.onaudioprocess = function(e){
var windowNodeInput = e.inputBuffer.getChannelData(0);
var windowNodeOutput = e.outputBuffer.getChannelData(0);
if (windowfunction==true) {
windowNodeOutput.set(calc.applyDspWindowFunction(windowNodeInput));
}else{
windowNodeOutput.set(windowNodeInput);
}
}
//some other script processor node, just passing through the signal
var otherScriptProcessorNode = audioContext.createScriptProcessor(BLOCKLENGTH,1,1);
otherScriptProcessorNode.onaudioprocess = function(e){
var otherNodeInput = e.inputBuffer.getChannelData(0);
var otherNodeOutput = e.outputBuffer.getChannelData(0);
otherNodeOutput.set(otherNodeInput);
}
// this connnection works fine!
audioSource.connect(windowScriptProcessorNode);
windowScriptProcessorNode.connect(audioContext.destination);
/* // this connnection does NOT work
audioSource.connect(windowScriptProcessorNode);
windowScriptProcessorNode.connect(otherScriptProcessorNode);
otherScriptProcessorNode.connect(audioContext.destination);
*/
}