WebRTC Multiple Peer connection video is not working - javascript

So I found a way to make a peer connection multiple times by a lot..but I can't make the video work although there is no error shown...
UPDATE: DEMO but this demo is not allow working localStream so try it in your own browser index.html
First let say we have this html file
// This one is for multiple videos
<div class="item-videos">
//This first video is a start video
<video id="video1" playsinline autoplay muted></video>
//This is join videos
</div>
<div>
<button id="start"> Start </button>
<button id="join"> Join </button>
<button id="hangup"> Hang Up </button>
</div>
First I will takes the initial inputs for starter in script.js
let containers = document.querySelector('.item-videos');
const startButton = document.querySelector('#start')
const joinButton = document.querySelector("#join")
const video1 = document.querySelector('video#video1');
let localStream;
// This is the RTCPeerConnections arrays.
let pcLocals = [];
let pcRemotes = [];
const offerOptions = {
offerToReceiveAudio: 1,
offerToReceiveVideo: 1
};
const servers = {
iceServers: [
{
urls: ['stun:stun1.l.google.com:19302', 'stun:stun2.l.google.com:19302'],
},
],
iceCandidatePoolSize: 10,
};
And then let say first we will start our call server..which will be created.
So now we will make a start click then our code
...
function gotStream(stream) {
console.log('Received local stream');
video1.srcObject = stream;
localStream = stream;
joinButton.disabled = false;
}
function start() {
console.log('Requesting local stream');
startButton.disabled = true;
navigator.mediaDevices
.getUserMedia({
audio: true,
video: true
})
.then(gotStream)
.catch(e => console.log('getUserMedia() error: ', e));
}
startButton.addEventListener("click",start)
Now this is for join button in the server...let say I have let count = 0
and I will createElement each video I click the button
So our code for the join button click is
let count = 0;
joinButton.addEventListener("click",() => {
count += 1
//Creating Video Element
const addVideo = document.createElement('video')
addVideo.setAttribute('id',`video${count + 1}`)
addVideo.setAttribute('class',`try-${count + 1}`)
// Here I believe this part was my error where in the video is set up yet for the RTCPeerConnection functions.
containers.appendChild(addVideo)
const videoCall = containers.querySelectorAll('video')[count]
// Here I will create RTCPeerConnections and push it in the pcLocals and pcRemotes;
const init_localStreams = new RTCPeerConnection(servers);
const init_remoteStreams = new RTCPeerConnection(servers);
pcLocals.push(init_localStreams)
pcRemotes.push(init_remoteStreams)
console.log(pcLocals)
console.log(pcRemotes)
//Here I'm passing the stream videos in RTCPeer Arrays...
pcRemotes[count - 1].ontrack = (ev) => {
function gotRemoteStream(e,video,idx) {
if (video.srcObject !== e.streams[0]) {
video.srcObject = e.streams[0]
console.log(`pc${idx+1}: received remote stream`);
}
}
gotRemoteStream(ev,videoCall,count - 1)
}
//Here I'm passing the tracks of the video in each locals
localStream.getTracks().forEach((track) =>
{
pcLocals[count - 1].addTrack(track, localStream)
});
function onAddIceCandidateSuccess() {
console.log('AddIceCandidate success.');
}
function onAddIceCandidateError(error) {
console.log(`Failed to add ICE candidate: ${error.toString()}`);
}
function handleCandidate(candidate, dest, prefix, type) {
dest.addIceCandidate(candidate)
.then(onAddIceCandidateSuccess, onAddIceCandidateError);
console.log(`${prefix}New ${type} ICE candidate: ${candidate ? candidate.candidate : '(null)'}`);
}
function iceCallbackRemote(e,local_) {
handleCandidate(e.candidate,local_,`pc${count}: `, 'remote')
}
function iceCallbackLocal(e,remote_) {
handleCandidate(e.candidate,remote_,`pc${count}: `, 'local')
}
pcLocals[count - 1].onicecandidate = (ev) => {
iceCallbackRemote(ev,pcLocals[count - 1])
}
pcRemotes[count - 1].onicecandidate = (ev) => {
iceCallbackLocal(ev,pcRemotes[count - 1])
}
function gotDescriptionRemote(desc) {
pcRemotes[count-1].setLocalDescription(desc);
// console.log(`Answer from pc1Remote\n${desc.sdp}`);
pcLocals[count-1].setRemoteDescription(desc);
}
function gotDescriptionLocal(desc) {
pcLocals[count-1].setLocalDescription(desc);
// console.log(`Answer from pc1Remote\n${desc.sdp}`);
pcRemotes[count-1].setRemoteDescription(desc);
pcRemotes[count-1].createAnswer().then(gotDescriptionRemote,onCreateSessionDescriptionError)
}
function onCreateSessionDescriptionError(error) {
console.log(`Failed to create session description: ${error.toString()}`);
}
pcLocals[count - 1].
createOffer(offerOptions)
.then(gotDescriptionLocal, onCreateSessionDescriptionError)
})
I'm somehow doubt my if my video was not to pass yet before the RTCPeerConnection operations happening..I don't know if where my errors here... I just tried to make a multiple peerconnection that was documentary here at WEBRTC TUTORIAL

I've looked at you codesandbox code and found two issues:
The playback of your created video is never started
Your icecandidates are set on the wrong peerconnection
To adress the first problem, you need to start your playback either with autoplay or by starting it with addVideo.play(). For the autoplay solution, you can simply add:
addVideo.setAttribute("autoplay", true);
addVideo.setAttribute("playsinline", true);
To address the second problem, you need to change the passed peerconnections in the onicecandidate event handlers:
pcLocals[count - 1].onicecandidate = (ev) => {
//old: iceCallbackRemote(ev, pcLocals[count - 1]);
//new:
iceCallbackRemote(ev, pcRemotes[count - 1]);
};
pcRemotes[count - 1].onicecandidate = (ev) => {
//old: iceCallbackRemote(ev, pcRemotes[count - 1]);
//new:
iceCallbackLocal(ev, pcLocals[count - 1]);
};
The ice candidates need to be exchanged, meaning, ice candidates gathered by local must be passed to the remote and vice versa. Before, you added the ice candidates from local to your local connection, thats why it didn't work. This why the connectionState was "connecting", and never changed to "connected", as the connection was never fully connected and was still expecting ice candidate exchange.

Related

MediaRecorder API gets undefined after X amount of time -- Javascript/ReactJs

i'm trying to record the user voice in ReactJs using the MediaRecorder browser API.
the declaration of "audioRecording" is outside of "startRecordingAudio()" because i need the user to choose when to stop recording, so "stopRecordingAudio()" must be able to reach "audioRecording.stop()".
Everything works fine for audios between 00:00-00:59 seconds, but when i try to leave it for a long time like 3-6 mins, suddenly "audioRecording" becomes undefined.
That's the code:
// Audio Recording
const audioData = [];
let audioRecording;
function startRecodingAudio() {
// Start Recording
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
// Defining the recorder
audioRecording = new MediaRecorder(stream);
audioRecording.ondataavailable = e => {
audioData.push(e.data);
// Pushing the data to the array
};
// Start recording
audioRecording.start();
console.log('Gravando...');
// when recording stop, save audio file
audioRecording.onstop = () => {
console.log('Parei gravação. Salvando audio...')
setIsRecording(false);
const audioBlob = new Blob(audioData, { type: 'audio/wav' });
const audioReader = new FileReader();
audioReader.readAsDataURL(audioBlob);
audioReader.onloadend = () => {
setAudioFile(audioReader.result);
console.log('setei audio');
}
}
}).catch(err => {
console.log(err)
});
}
// Stop audio recording
function stopRecordingAudio() {
audioRecording.stop();
}
I'm Starting and Stoping this way:
<div>
<div>
Play/Stop
</div>
<div>
<button type='button' onClick={startRecodingAudio}>Record</button>
<button type='button' onClick={stopRecordingAudio}>Cancelar</button>
<button type='button' onClick={() => console.log(audioRecording)}>Check State</button>
</div>
</div>
I started the recording and manually logging the audioRecording variable:
Console print that shows the mediaRecorder suddenly getting undefined.
This problem also occurs if set some State after the recording, but in this case it doesn't even last 1 second.
Like this:
function startRecodingAudio() {
// Start Recording
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
// Defining the recorder
audioRecording = new MediaRecorder(stream);
audioRecording.ondataavailable = e => {
audioData.push(e.data);
// Pushing the data to the array
};
// Start recording
audioRecording.start();
setIsRecording(true);
console.log('Gravando...');
// when recording stop, save audio file
audioRecording.onstop = () => {
console.log('Parei gravação. Salvando audio...')
setIsRecording(false);
const audioBlob = new Blob(audioData, { type: 'audio/wav' });
const audioReader = new FileReader();
audioReader.readAsDataURL(audioBlob);
audioReader.onloadend = () => {
setAudioFile(audioReader.result);
console.log('setei audio');
}
}
}).catch(err => {
console.log(err)
});
}
Does anyone have any idea on what could it be?
I would really appreciate it.

webRTC datachannel's onopen event is not firing even with twilio TURN server

I am using twilio TURN server for webRTC peer connecting two browsers located on different sides of the world, still the connection does not open.
Log shows the local and remote descriptions are set on both sides. Audio/video tracks are also pushed and received, but the "onopen" method on either of the data channels are not firing. Below is the code extract.
create offer code
async createOffer(){
this.initiated = true
this.conn = new RTCPeerConnection(this.servers);
if (this.conn){
this.conn.ontrack = e => {
e.streams[0].getTracks().forEach(track => {
this.calleeStream?.addTrack(track);
this.logs.push('received track:' + track.label);
})
}
if (this.callerStream)
{
const s = this.callerStream;
this.callerStream.getTracks().forEach(track =>
{
this.conn?.addTrack(track,s);
this.logs.push('pushed track:' + track.label);
});
}
}
this.channel = this.conn.createDataChannel('channelX');
this.channel.onmessage = e => this.logs.push('received =>'+ e.data);
this.channel.onopen = e => {
this.logs.push('connection OPENED!!!');
this.enabletestmessage = true;
};
this.conn.onicecandidate = async e=> {
if (e.candidate===null && !this.iceCandiSent){
this.iceCandiSent = true;
this.logs.push('new ICE candidate received- reprinting SDP'+JSON.stringify(this.conn?.localDescription));
await this.dataService.createOffer(this.data.callerid,this.data.calleeid,JSON.stringify(this.conn?.localDescription));
this.logs.push('offer set in db');
this.logs.push('waiting for answer...');
}
}
const offer = await this.conn.createOffer();
await this.conn?.setLocalDescription(offer);
this.logs.push('local description (offer) set');
}
create answer code
async createAnswer(offerSDP:string){
this.initiated = true;
this.conn = new RTCPeerConnection(this.servers);
if (this.conn)
{
this.conn.ontrack = e => {
e.streams[0].getTracks().forEach(track => {
this.callerStream?.addTrack(track);
this.logs.push('received track:' + track.label);
})
}
if (this.calleeStream)
{
const s = this.calleeStream;
this.calleeStream.getTracks().forEach(track =>
{
this.conn?.addTrack(track,s);
this.logs.push('pushed track:' + track.label);
});
}
}
await this.conn.setRemoteDescription(JSON.parse(offerSDP));
this.logs.push('remote description (offer) set');
this.conn.onicecandidate = async e => {
if (e.candidate === null && !this.iceCandiSent){
this.iceCandiSent=true;
this.logs.push('new ICE candidate received- reprinting SDP'+JSON.stringify(this.conn?.localDescription));
await this.dataService.updateAnswer(this.data.callerid,this.data.calleeid,JSON.stringify(this.conn?.localDescription));
this.logs.push('answer set in db');
}
}
this.conn.ondatachannel = e => {
this.channel = e.channel;
this.channel.onmessage = e => this.logs.push('received =>'+ e.data);
this.channel.onopen = e => {
this.logs.push('connection RECEIVED!!!');
this.enabletestmessage = true;
};
}
const answer = await this.conn.createAnswer();
await this.conn.setLocalDescription(answer);
this.logs.push('local description (answer) set');
}
server side code for retrieving ice servers from Twillio
const twilio = require('twilio');
const client = twilio(<MY ACCOUNT SID>,<MY AUTH TOKEN>);
const result = await client.tokens.create();
return result.iceServers; //this is set to this.servers in the code above
Everything works when I run on two browser windows in my local machine. However even afer implementing TURN they dont work between browsers in Nepal and USA. The onopen event handlers on data channel does notfire even though local and remote descriptions are set on both sides. What am I missing ?
NOTE: signalling is done inside the onicecandidate event handler ( the line that calls dataService createOffer/updateAnswer methods)

WebRTC manual signal DOMException unknown ufrag

I'm trying to learn about webrtc and I have some code that is meant to be run in two different firefox windows on my laptop with me copying relevant info between the developer consoles, but I run into the error DOMException: "Unknown ufrag (..fragsnippet..)" and I can't tell what I'm messing up. Anyone know what causes this?
I know that signaling servers can be setup to do what I'm doing, but I wanted to break the steps down to understand the order and inspect the result of each step. I included the source for anyone to see if there's any clear issues happening. For explanation each window will be called pc1 or pc2.
steps
pc1 calls initConnection which creates the variable localcon of
type RTCPeerConnection using no stunServers because the example is
supposed to use local ice candidates
then we copy the locOffer variable which already contains
icecandidate entries
in window pc2 i execute reactConnection which makes a localcon
for this window and then I call ingestOffer providing the copied
locOffer from the pc1 window this sets remoteDescription for the
localcon of pc2 and then creates an answer that I would like to
copy back to pc1,
the steps end there because I run into the dom exception error and
localcon for pc2 changes to iceConnectionState:fail
Any ideas about how to fix this would be very helpful!
/* new process
create a single pc, and setup binding of candidate stuff and offer stuff to be set, and trigger other parts of the process to continue,
hopefully no servers is going to work
*/
'use strict'
var localcon, remotecon, datachannel, pcconstraint, dataconstraint, recchannel, config
export function setup() {
var servers = null
pcconstraint = null
dataconstraint = null
config = {
iceServers: [],
iceTransportPolicy: "all",
iceCandidatePoolSize: "0"
}
window.ingestOffer = ingestOffer
window.handleAnswer = handleAnswer
window.desc1 = desc1
window.setAnswer = setAnswer
window.initConnection = initConnection
window.reactConnection = reactConnection
// answer will be created in desc1
}
function reactConnection() {
window.localcon = localcon = new RTCPeerConnection(config)
localcon.onicecandidate = iceCB2
// bind different channel events
localcon.ondatachannel = handleDC
}
function initConnection() {
window.localcon = localcon = new RTCPeerConnection(config)
window.datachannel = datachannel = localcon.createDataChannel('sendDataChannel', dataconstraint)
// bind the ice events
// attach the datachannel events
datachannel.onopen = openDC
datachannel.onclose = closeDC
localcon.onicecandidate = iceCB1
localcon.oniceconnectionstatechange = handleConnectionChange
window.locCanInfo = []
// start the offers
localcon.createOffer().then(
desc1,
descriptionErr
)
}
function handleConnectionChange(event) {
console.log("connection change",event);
console.log("peer connection ",event.target.iceConnectionState);
}
function openDC () {
console.log("opening first data channel");
}
function closeDC() {
console.log("closing first data channel");
}
function handleDC(event) {
console.log("handling rec channel connect")
recchannel = event.channel
recchannel.onmessage = recmessage
recchannel.onopen = recopen
recchannel.onclose = recclose
}
function recmessage(event) {
console.log("rec channel got",event.data);
}
function recopen() {
console.log("rec channel open");
}
function recclose() {
console.log("rec channel close");
}
function setAnswer(desc) {
localcon.setRemoteDescription(new RTCSessionDescription(desc)).then(()=> console.log("success"))
}
function ingestCandidate(msg) {
console.log("ingesting candidate")
let candidate = new RTCIceCandidate({
sdpMLineIndex: msg.label,
candidate: msg.candidate
})
localcon.addIceCandidate(candidate)
}
function ingestOffer(desc,ocand) {
// ?? what part of desc should be provided?
window.theirOffer = desc
console.log("setting remote desc")
localcon.setRemoteDescription(new RTCSessionDescription(desc)).then(()=> console.log("ingest success")).catch(e=> console.log("ingest error",e))
// set window otherCands and use them at add time
window.otherCands = ocand
// create an answer
localcon.createAnswer().then(handleAnswer)
}
function handleAnswer(desc) {
window.locAnswer = desc
localcon.setLocalDescription(desc).then(()=> console.log("handle answer success")).catch(e=> console.log("handle answer error",e))
// now copy and use in other window
}
function descriptionErr(e) {
console.log("error ", e)
}
// no send Data func, because we will do this from the window
function closeChannels() {
console.log("closing send");
datachannel.close()
console.log("closing rec");
recchannel.close()
localcon.close()
remotecon.close()
}
function desc1(desc) {
// assign desc to remote, as remote desc and local to local
localcon.setLocalDescription(desc)
console.log("local offer", desc.sdp)
// put the offer in text
window.locOffer = desc
}
function desc2(desc) {
// this is using the answer
console.log("answer text", desc.sdp)
remotecon.setLocalDescription(desc)
localcon.setRemoteDescription(desc)
}
function iceCB1(event) {
// this is local dealing with ice candidate
console.log("lcal ice callbac", event.candidate);
if (event.candidate != null && event.candidate.candidate != "") {
window.locCanInfo.push({
type: 'candidate',
label: event.candidate.sdpMLineIndex,
id: event.candidate.sdpMid,
candidate: event.candidate.candidate
})
//localcon.addIceCandidate(event.candidate)
}
}
function iceCB2(event) {
console.log("remote ice callback");
if (event.candidate) {
localcon.addIceCandidate(
event.candidate
).then(
iceSuccess,
iceFail
)
console.log("remote ice candidate", event.candidate, "and", event.candidate.candidate)
}
}
function iceSuccess() {
console.log("addice success");
}
function iceFail(e) {
console.log("ice fail", e);
}
function onChannelStateChange() {
let readystate = datachannel.readyState
if (readystate === "open") {
console.log("og channel ope");
} else {
console.log("og channel is not open");
}
}
function onRecChannelStateChange() {
console.log("rec channel state is ", recchannel.readyState);
}

How can I add predefined length to audio recorded from MediaRecorder in Chrome?

I am in the process of replacing RecordRTC with the built in MediaRecorder for recording audio in Chrome. The recorded audio is then played in the program with audio api. I am having trouble getting the audio.duration property to work. It says
If the video (audio) is streamed and has no predefined length, "Inf" (Infinity) is returned.
With RecordRTC, I had to use ffmpeg_asm.js to convert the audio from wav to ogg. My guess is somewhere in the process RecordRTC sets the predefined audio length. Is there any way to set the predefined length using MediaRecorder?
This is a chrome bug.
FF does expose the duration of the recorded media, and if you do set the currentTimeof the recorded media to more than its actual duration, then the property is available in chrome...
var recorder,
chunks = [],
ctx = new AudioContext(),
aud = document.getElementById('aud');
function exportAudio() {
var blob = new Blob(chunks);
aud.src = URL.createObjectURL(new Blob(chunks));
aud.onloadedmetadata = function() {
// it should already be available here
log.textContent = ' duration: ' + aud.duration;
// handle chrome's bug
if (aud.duration === Infinity) {
// set it to bigger than the actual duration
aud.currentTime = 1e101;
aud.ontimeupdate = function() {
this.ontimeupdate = () => {
return;
}
log.textContent += ' after workaround: ' + aud.duration;
aud.currentTime = 0;
}
}
}
}
function getData() {
var request = new XMLHttpRequest();
request.open('GET', 'https://upload.wikimedia.org/wikipedia/commons/4/4b/011229beowulf_grendel.ogg', true);
request.responseType = 'arraybuffer';
request.onload = decodeAudio;
request.send();
}
function decodeAudio(evt) {
var audioData = this.response;
ctx.decodeAudioData(audioData, startRecording);
}
function startRecording(buffer) {
var source = ctx.createBufferSource();
source.buffer = buffer;
var dest = ctx.createMediaStreamDestination();
source.connect(dest);
recorder = new MediaRecorder(dest.stream);
recorder.ondataavailable = saveChunks;
recorder.onstop = exportAudio;
source.start(0);
recorder.start();
log.innerHTML = 'recording...'
// record only 5 seconds
setTimeout(function() {
recorder.stop();
}, 5000);
}
function saveChunks(evt) {
if (evt.data.size > 0) {
chunks.push(evt.data);
}
}
// we need user-activation
document.getElementById('button').onclick = function(evt){
getData();
this.remove();
}
<button id="button">start</button>
<audio id="aud" controls></audio><span id="log"></span>
So the advice here would be to star the bug report so that chromium's team takes some time to fix it, even if this workaround can do the trick...
Thanks to #Kaiido for identifying bug and offering the working fix.
I prepared an npm package called get-blob-duration that you can install to get a nice Promise-wrapped function to do the dirty work.
Usage is as follows:
// Returns Promise<Number>
getBlobDuration(blob).then(function(duration) {
console.log(duration + ' seconds');
});
Or ECMAScript 6:
// yada yada async
const duration = await getBlobDuration(blob)
console.log(duration + ' seconds')
A bug in Chrome, detected in 2016, but still open today (March 2019), is the root cause behind this behavior. Under certain scenarios audioElement.duration will return Infinity.
Chrome Bug information here and here
The following code provides a workaround to avoid the bug.
Usage : Create your audioElement, and call this function a single time, providing a reference of your audioElement. When the returned promise resolves, the audioElement.duration property should contain the right value. ( It also fixes the same problem with videoElements )
/**
* calculateMediaDuration()
* Force media element duration calculation.
* Returns a promise, that resolves when duration is calculated
**/
function calculateMediaDuration(media){
return new Promise( (resolve,reject)=>{
media.onloadedmetadata = function(){
// set the mediaElement.currentTime to a high value beyond its real duration
media.currentTime = Number.MAX_SAFE_INTEGER;
// listen to time position change
media.ontimeupdate = function(){
media.ontimeupdate = function(){};
// setting player currentTime back to 0 can be buggy too, set it first to .1 sec
media.currentTime = 0.1;
media.currentTime = 0;
// media.duration should now have its correct value, return it...
resolve(media.duration);
}
}
});
}
// USAGE EXAMPLE :
calculateMediaDuration( yourAudioElement ).then( ()=>{
console.log( yourAudioElement.duration )
});
Thanks #colxi for the actual solution, I've added some validation steps (As the solution was working fine but had problems with long audio files).
It took me like 4 hours to get it to work with long audio files turns out validation was the fix
function fixInfinity(media) {
return new Promise((resolve, reject) => {
//Wait for media to load metadata
media.onloadedmetadata = () => {
//Changes the current time to update ontimeupdate
media.currentTime = Number.MAX_SAFE_INTEGER;
//Check if its infinite NaN or undefined
if (ifNull(media)) {
media.ontimeupdate = () => {
//If it is not null resolve the promise and send the duration
if (!ifNull(media)) {
//If it is not null resolve the promise and send the duration
resolve(media.duration);
}
//Check if its infinite NaN or undefined //The second ontime update is a fallback if the first one fails
media.ontimeupdate = () => {
if (!ifNull(media)) {
resolve(media.duration);
}
};
};
} else {
//If media duration was never infinity return it
resolve(media.duration);
}
};
});
}
//Check if null
function ifNull(media) {
if (media.duration === Infinity || media.duration === NaN || media.duration === undefined) {
return true;
} else {
return false;
}
}
//USAGE EXAMPLE
//Get audio player on html
const AudioPlayer = document.getElementById('audio');
const getInfinity = async () => {
//Await for promise
await fixInfinity(AudioPlayer).then(val => {
//Reset audio current time
AudioPlayer.currentTime = 0;
//Log duration
console.log(val)
})
}
I wrapped the webm-duration-fix package to solve the webm length problem, which can be used in nodejs and web browsers to support video files over 2GB with not too much memory usage.
Usage is as follows:
import fixWebmDuration from 'webm-duration-fix';
const mimeType = 'video/webm\;codecs=vp9';
const blobSlice: BlobPart[] = [];
mediaRecorder = new MediaRecorder(stream, {
mimeType
});
mediaRecorder.ondataavailable = (event: BlobEvent) => {
blobSlice.push(event.data);
}
mediaRecorder.onstop = async () => {
// fix blob, support fix webm file larger than 2GB
const fixBlob = await fixWebmDuration(new Blob([...blobSlice], { type: mimeType }));
// to write locally, it is recommended to use fs.createWriteStream to reduce memory usage
const fileWriteStream = fs.createWriteStream(inputPath);
const blobReadstream = fixBlob.stream();
const blobReader = blobReadstream.getReader();
while (true) {
let { done, value } = await blobReader.read();
if (done) {
console.log('write done.');
fileWriteStream.close();
break;
}
fileWriteStream.write(value);
value = null;
}
blobSlice = [];
};
//If you want to modify the video file completely, you can use this package "webmFixDuration", Other methods are applied at the display level only on the video tag With this method, the complete video file is modified
webmFixDuration github example
mediaRecorder.onstop = async () => {
const duration = Date.now() - startTime;
const buggyBlob = new Blob(mediaParts, { type: 'video/webm' });
const fixedBlob = await webmFixDuration(buggyBlob, duration);
displayResult(fixedBlob);
};

RTCDataChannel for signaling?

I've been reading this article for a signaling solution. The author mentions about signaling with RTCDataChannel when connections are established.
Using RTCDataChannel for signaling
A signaling service is required to initiate a WebRTC session.
However, once a connection has been established between two peers, RTCDataChannel could, in theory, take over as the signaling channel. This might reduce latency for signaling — since messages fly direct — and help reduce signaling server bandwidth and processing costs. We don't have a demo, but watch this space!
Why is signaling needed since connections are already established?
Each side initially declares which audio and/or video tracks it is going to send, so that the right number of ports can be opened, and resolutions and formats that work for both peers can be determined. A signaling channel is needed to send the resulting SDP offer/answer, as well as trickle ICE candidates for each of the ports, to the other side.
Once connected, if you leave this setup alone - basically never add tracks to the connection, remove any, or alter track attributes significantly - then you wont need the signaling server again.
If you do change any of those things however, then a re-negotiation is needed, which is just what it sounds like: another round over the signaling channel much like the first one.
Reasons to add a track may be a second camera, another video-source (from another participant perhaps), or maybe screen-sharing, something like that.
The article is correct that a data channel may be used. Here's a demo! (Firefox only for now.)
The article is wrong about a signaling service being required - provided you have another means of discovery - as this demo lamely proves.
The initial connection is chat-only, but either side can push to add video to the mix. The re-negotiation for this is done over a data channel (since there's no signaling server!)
Instructions for using the fiddle:
There is no server (since it's a fiddle), so press the Offer button and copy the offer.
Paste the offer to the same spot in the same fiddle in another tab or on another machine.
Press ENTER, then copy the answer you get and paste it back in the first fiddle.
Press ENTER again (not addTrack yet!)
You are now connected with two data-channels: one for chat and another for signaling.
Now press addTrack on either end and video should show up on the other end.
Press addTrack in the other direction, and you should have video going both ways.
var dc = null, sc = null, pc = new mozRTCPeerConnection(), live = false;
pc.onaddstream = e => v2.mozSrcObject = e.stream;
pc.ondatachannel = e => dc? scInit(sc = e.channel) : dcInit(dc = e.channel);
v2.onloadedmetadata = e => { log("Face time!"); };
function addTrack() {
navigator.mediaDevices.getUserMedia({video:true, audio:true})
.then(stream => pc.addStream(v1.mozSrcObject = stream));
}
pc.onnegotiationneeded = e => {
pc.createOffer().then(d => pc.setLocalDescription(d)).then(() => {
if (live) sc.send(JSON.stringify({ "sdp": pc.localDescription }));
}).catch(failed);
};
function scInit() {
sc.onmessage = e => {
var msg = JSON.parse(e.data);
if (msg.sdp) {
var desc = new mozRTCSessionDescription(JSON.parse(e.data).sdp);
if (desc.type == "offer") {
pc.setRemoteDescription(desc).then(() => pc.createAnswer())
.then(answer => pc.setLocalDescription(answer)).then(() => {
sc.send(JSON.stringify({ "sdp": pc.localDescription }));
}).catch(failed);
} else {
pc.setRemoteDescription(desc).catch(failed);
}
} else if (msg.candidate) {
pc.addIceCandidate(new mozRTCIceCandidate(msg.candidate)).catch(failed);
}
};
}
function dcInit() {
dc.onopen = () => { live = true; log("Chat!"); };
dc.onmessage = e => log(e.data);
}
function createOffer() {
button.disabled = true;
dcInit(dc = pc.createDataChannel("chat"));
scInit(sc = pc.createDataChannel("signaling"));
pc.createOffer().then(d => pc.setLocalDescription(d)).catch(failed);
pc.onicecandidate = e => {
if (e.candidate) return;
if (!live) {
offer.value = pc.localDescription.sdp;
offer.select();
answer.placeholder = "Paste answer here";
} else {
sc.send(JSON.stringify({ "candidate": e.candidate }));
}
};
};
offer.onkeypress = e => {
if (e.keyCode != 13 || pc.signalingState != "stable") return;
button.disabled = offer.disabled = true;
var obj = { type:"offer", sdp:offer.value };
pc.setRemoteDescription(new mozRTCSessionDescription(obj))
.then(() => pc.createAnswer()).then(d => pc.setLocalDescription(d))
.catch(failed);
pc.onicecandidate = e => {
if (e.candidate) return;
if (!live) {
answer.focus();
answer.value = pc.localDescription.sdp;
answer.select();
} else {
sc.send(JSON.stringify({ "candidate": e.candidate }));
}
};
};
answer.onkeypress = e => {
if (e.keyCode != 13 || pc.signalingState != "have-local-offer") return;
answer.disabled = true;
var obj = { type:"answer", sdp:answer.value };
pc.setRemoteDescription(new mozRTCSessionDescription(obj)).catch(failed);
};
chat.onkeypress = e => {
if (e.keyCode != 13) return;
dc.send(chat.value);
log(chat.value);
chat.value = "";
};
var log = msg => div.innerHTML += "<p>" + msg + "</p>";
var failed = e => log(e.name + ": " + e.message + " line " + e.lineNumber);
<video id="v1" height="120" width="160" autoplay muted></video>
<video id="v2" height="120" width="160" autoplay></video><br>
<button id="button" onclick="createOffer()">Offer:</button>
<textarea id="offer" placeholder="Paste offer here"></textarea><br>
Answer: <textarea id="answer"></textarea><br>
<button id="button" onclick="addTrack()">AddTrack</button>
<div id="div"></div><br>
Chat: <input id="chat"></input><br>

Categories