unmute-mute and stop video in videocall - javascript

This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
const socket = io("/");
const chatInputBox = document.getElementById("chat_message");
const all_messages = document.getElementById("all_messages");
const main__chat__window = document.getElementById("main__chat__window");
const videotable = document.getElementById("video-table");
const myVideo = document.createElement("video");
myVideo.muted = true;//for not recieving own voice
var peer = new Peer(undefined, {
path: "/peerjs",
host: "/",
port: "3000",
});
let myVideoStream;
const peers = {};
var getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
//setting initial controls of video and passing them as constraints
const constraints = {
'video': true,
'audio': true
}
navigator.mediaDevices
.getUserMedia(constraints)
.then((stream) => {
myVideoStream = stream;
addVideoStream(myVideo,myVideoStream);//call to function addVideoStream
//answering to calls
peer.on("call", (call) => {
call.answer(myVideoStream);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
addVideoStream(video, userVideoStream);// Show stream in some video/canvas element.
});
});
socket.on("user_connected", (userId) => {//recieving info
connectToNewUser(userId, stream);//call function with id and stream
});
//adding event for messages of chat
document.addEventListener("keydown", (e) => {
if (e.which === 13 && chatInputBox.value != "") {
socket.emit("message", chatInputBox.value);
chatInputBox.value = "";
}
});
//adding text to chat window
socket.on("createMessage", (msg) => {
//console.log(msg);
let li = document.createElement("li");
li.innerHTML = msg;
all_messages.append(li);
main__chat__window.scrollTop = main__chat__window.scrollHeight;//scrolled to latest message
});
});
//For disconnecting user
socket.on('user_disconnected', userId => {
if (peers[userId]) peers[userId].close()
});
peer.on("call", function (call) {
getUserMedia(constraints,
function (stream) {
call.answer(stream); // Answer the call with an A/V stream.
const video = document.createElement("video");
call.on("stream", function (remoteStream) {
addVideoStream(video, remoteStream); // Show stream in some video/canvas element.
});
},
function (err) {
console.log("Failed to get local stream", err);
}
);
});
peer.on("open", (id) => {//send with an id for user
// on open will be launch when you successfully connect to PeerServ
socket.emit("join_room", ROOM_ID, id);//emiting event
});
// Fetch an array of devices of a certain type
async function getConnectedDevices(type) {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter(device => device.kind === type)
}
// Open camera with echoCancellation for better audio
async function openCamera(cameraId) {
const constraints = {
'audio': {'echoCancellation': true}
}
return await navigator.mediaDevices.getUserMedia(constraints);
}
const cameras = getConnectedDevices('videoinput');
if (cameras && cameras.length > 0) {
const stream = openCamera(cameras[0].deviceId);
}
function connectToNewUser (userId, streams) {
const call = peer.call(userId, streams);
//console.log(call);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
//console.log(userVideoStream);
addVideoStream(video, userVideoStream);
});
call.on('close', () => {
video.remove()//for removing video elemnt on closing call
});
peers[userId] = call;
};
const addVideoStream = (videoEl, stream) => {
videoEl.srcObject = stream;
videoEl.addEventListener("loadedmetadata", () => {
videoEl.play();
});
videotable.append(videoEl);//adding video to front-end
let totalUsers = document.getElementsByTagName("video").length;
if (totalUsers > 1) {
for (let index = 0; index < totalUsers; index++) {
document.getElementsByTagName("video")[index].style.width =
100 / totalUsers + "%";
}
}
};
//js for pause and play of video
const playStop = () => {
let enabled = myVideoStream.getVideoTracks()[0].enabled;
if (enabled) {
myVideoStream.getVideoTracks()[0].enabled = false;
setPlayVideo();
} else {
myVideoStream.getVideoTracks()[0].enabled = true;
setStopVideo();
}
};
//js of pause and play of audio
const muteUnmute = () => {
let enabled = myVideoStream.getAudioTracks()[0].enabled;
if (enabled) {
myVideoStream.getAudioTracks()[0].enabled = false;
setUnmuteButton();
} else {
myVideoStream.getAudioTracks()[0].enabled = true;
setMuteButton();
}
};
//setting icon for representing current state of video
const setPlayVideo = () => {
const html = `<i class="unmute fa fa-pause-circle"></i>
<span class="unmute">Resume Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of video
const setStopVideo = () => {
const html = `<i class=" fa fa-video-camera"></i>
<span class="">Pause Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of audio
const setUnmuteButton = () => {
const html = `<i class="unmute fa fa-microphone-slash"></i>
<span class="unmute">Unmute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
//setting icon for representing current state of audio
const setMuteButton = () => {
const html = `<i class="fa fa-microphone"></i>
<span>Mute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.

Related

Can't receive video on webrtc when I toggle camera

I'm trying to write a method to toggle camera for turn on/off the camera.
Call Peer
const callPeer = (myPeer, peerId, myVideoStream, addToCalls = true) => {
const call = myPeer.call(peerId, myVideoStream);
if (addToCalls) {
let existingCall = myPeer.calls.find((existingCall) => existingCall.peer == peerId);
if (existingCall)
myPeer.calls.splice(myPeer.calls.indexOf(existingCall), 1);
myPeer.calls.push(call);
call.on('stream', peerVideoStream => {
let userVideoElement = $("video[data-peer='" + peerId + "']")[0];
userVideoElement.srcObject = peerVideoStream;
});
}
}
Answer Call
const answerCall = (call, addToCalls = true) => {
call.answer(myVideoStream);
if (addToCalls) {
let existingCall = myPeer.calls.find((existingCall) => existingCall.peer == call.peer);
if (existingCall)
myPeer.calls.splice(myPeer.calls.indexOf(existingCall), 1);
myPeer.calls.push(call);
}
call.on('stream', userVideoStream => {
let userVideoElement = $("video[data-peer='" + call.peer + "']")[0];
userVideoElement.srcObject = userVideoStream;
});
}
Toggle Camera
const toggleCamera = async (isOpened, peerId, isMuted) => {
let oldStream = myVideoStream;
myVideoStream = await navigator.mediaDevices.getUserMedia({
video: !isOpened,
audio: true
});
myPeer.calls.forEach((call) => {
call.peerConnection.removeStream(oldStream);
call.peerConnection.addStream(myVideoStream);
});
oldStream.getTracks().forEach((track) => {
track.stop();
});
let myVideoElement = document.querySelector("video[data-peer='" + peerId + "']");
myVideoElement.srcObject = myVideoStream;
myVideoElement.muted = true;
if (isOpened)
$("#video").removeClass("bg-primary").attr("data-video", "closed");
else
$("#video").addClass("bg-primary").attr("data-video", "opened");
ws.send(JSON.stringify({
type: isOpened ? "video-off" : "video-on"
}));
if (isMuted)
myVideoStream.getAudioTracks()[0].enabled = false;
};
When I print the localstream at peer connection, it seems like localstream is switched but when I print the remote stream from other client, Its not changing. So stream event is not runnig.
I also tried to write a event for onaddstream but it also didn't work.

How can I differentiate the sound level of an audio track in a mediastream in JavaScript? [duplicate]

I'm using the Google Cloud API for Speech-to-text, with a NodeJS back-end.
The app needs to be able to listen for voice commands, and transmit them to the back-end as a buffer. For this, I need to send the buffer of the preceding audio when silence is detected.
Any help would be appreciated. Including the js code below
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function () {
recording = true;
};
window.stopRecording = function () {
recording = false;
// window.Stream.end();
};
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 4096;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function (e) {
if (!recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
console.log(convertoFloat32ToInt16(left));
};
audioInput.connect(recorder);
recorder.connect(context.destination);
}
I'm not too sure as to what exactly is being asked in the question, so this answer is only intended to give a way to detect silences in an AudioStream.
To detect silence in an AudioStream, you can use an AudioAnalyser node, on which you will call the getByteFrequencyData method at regular intervals, and check whether there were sounds higher than than your expected level for a given time.
You can set the threshold level directly with the minDecibels property of the AnalyserNode.
function detectSilence(
stream,
onSoundEnd = _=>{},
onSoundStart = _=>{},
silence_delay = 500,
min_decibels = -80
) {
const ctx = new AudioContext();
const analyser = ctx.createAnalyser();
const streamNode = ctx.createMediaStreamSource(stream);
streamNode.connect(analyser);
analyser.minDecibels = min_decibels;
const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
let silence_start = performance.now();
let triggered = false; // trigger only once per silence event
function loop(time) {
requestAnimationFrame(loop); // we'll loop every 60th of a second to check
analyser.getByteFrequencyData(data); // get current data
if (data.some(v => v)) { // if there is data above the given db limit
if(triggered){
triggered = false;
onSoundStart();
}
silence_start = time; // set it to now
}
if (!triggered && time - silence_start > silence_delay) {
onSoundEnd();
triggered = true;
}
}
loop();
}
function onSilence() {
console.log('silence');
}
function onSpeak() {
console.log('speaking');
}
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
detectSilence(stream, onSilence, onSpeak);
// do something else with the stream
})
.catch(console.error);
And as a fiddle since stackSnippets may block gUM.
You can use SpeechRecognition result event to determine when a word or phrase has been recognized, for example, ls, cd, pwd or other commands, pass the .transcript of SpeechRecognitionAlternative to speechSynthesis.speak() where at attached start and end event of SpeechSynthesisUtterance call .start() or .resume() on MediaRecorder object where MediaStream is passed; convert the Blob at dataavailable event to an ArrayBuffer using FileReader or Response.arrayBuffer().
We could alternatively use audiostart or soundstart with audioend or soundend events of SpeechRecognition to record the users' actual voice, though the ends may not be fired consistently in relation to the actual start and end of audio captured by only a standard system microphone.
<!DOCTYPE html>
<html>
<head>
<title>Speech Recognition Recording</title>
</head>
<body>
<input type="button" value="Stop speech command recognition" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
const recognition = new webkitSpeechRecognition();
const synthesis = new SpeechSynthesisUtterance();
const handleResult = e => {
recognition.onresult = null;
console.log(e.results);
const result = e.results[e.results.length - 1];
if (result.isFinal) {
const [{transcript}] = result;
console.log(transcript);
synthesis.text = transcript;
window.speechSynthesis.speak(synthesis);
}
}
synthesis.onstart = () => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}
synthesis.onend = () => {
recorder.pause();
recorder.requestData();
}
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
console.log(blobURL, ab);
recognition.onresult = handleResult;
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
}
recognition.continuous = true;
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onend = e => {
console.log("recognition ended, stream.active", stream.active);
if (stream.active) {
console.log(e);
// the service disconnects after a period of time
recognition.start();
}
}
recognition.onresult = handleResult;
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("stop")
.onclick = () => {
console.log("stream.active:", stream.active);
if (stream && stream.active && recognition) {
recognition.abort();
recorder.stop();
for (let track of stream.getTracks()) {
track.stop();
}
console.log("stream.active:", stream.active);
}
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview
The simplest approach would be to use .pause() and .resume(), .stop() methods of MediaRecorder() to allow user to start, pause, and stop recording audio captured utilizing navigator.mediaDevices.getUserMedia() and convert the resulting Blob to an ArrayBuffer, if that is what the api is expecting to be POSTed to server
<!DOCTYPE html>
<html>
<head>
<title>User Media Recording</title>
</head>
<body>
<input type="button" value="Start/resume recording audio" id="start">
<input type="button" value="Pause recording audio" id="pause">
<input type="button" value="Stop recording audio" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
// do stuff with `ArrayBuffer` of recorded audio
console.log(blobURL, ab);
// we do not need the `Blob URL`, we can revoke the object
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
recorder.requestData();
}
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("start")
.onclick = () => {
if (recorder.state === "inactive") {
recorder.start();
} else {
recorder.resume();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("pause")
.onclick = () => {
if (recorder.state === "recording") {
recorder.pause();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("stop")
.onclick = () => {
if (recorder.state === "recording" || recorder.state === "paused") {
recorder.stop();
}
for (let track of stream.getTracks()) {
track.stop();
}
document.getElementById("start").onclick = null;
document.getElementById("pause").onclick = null;
console.log("recorder.state:", recorder.state
, "stream.active", stream.active);
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

Webrtc video streaming working properly in localhost but not working in production (using google turn server)

In local server, video streaming is running proper on both ends but while network changed video of one peer is not going to display to other peer.
I am using google stun server for connection of peer with another network, but then video streaming of one user is not visible
here is my code
I am not able to understand what I am doing wrong here.I am novice in webrtc
<!DOCTYPE html>
<html lang="en">
<body>
{####### variable Initialization #######}
<script>
let webSocket;
let mapPeers = {};
let username = 'user_{{ request.user.id }}';
</script>
<script>
const iceConfiguration = {
iceServers: [
{
urls: 'stun:stun.l.google.com:19302',
}
]
}
function webSocketOnMessage(event){
let parsedData = JSON.parse(event.data);
console.log(parsedData);
let peerUserName = parsedData['peer'];
let action = parsedData['action'];
console.log(parsedData);
if(username === peerUserName){
return;
}
let receiver_channel_name = parsedData['message']['receiver_channel_name'];
if(action === 'new-peer'){
if (!(peerUserName in mapPeers)){
createOfferer(peerUserName, receiver_channel_name);
return;
}
else {
setTimeout(() => {
if (!(peerUserName in mapPeers)){
createOfferer(peerUserName, receiver_channel_name);
return
}
}, 1000);
}
}
if(action === 'new-offer'){
let offer = parsedData['message']['sdp'];
createAnswerer(offer, peerUserName, receiver_channel_name);
return;
}
if(action === 'new-answer'){
let answer = parsedData['message']['sdp'];
let peer = mapPeers[peerUserName][0];
peer.setRemoteDescription(answer);
return;
}
if(action === 'video-on'){
manageVideoEl('on', peerUserName)
return;
}
if(action === 'video-off'){
manageVideoEl('off', peerUserName)
return;
}
}
</script>
<script>
$(document).ready(function(){
let loc = window.location;
let wsStart = 'ws://';
if(loc.protocol === 'https:'){
wsStart = 'wss://';
}
let endPoint = wsStart + loc.host + '/ws/video/meet/'+ '{{ group_name }}/';
webSocket = new WebSocket(endPoint);
webSocket.addEventListener('open', (e)=>{
sendSignal('new-peer', {'receiver_channel_name': '{{ group_name }}'});
});
webSocket.addEventListener('message', webSocketOnMessage);
webSocket.addEventListener('close', (e)=>{
});
webSocket.addEventListener('error', (e)=>{
});
{% if is_group_creator %}
sendNotificationOnMessage();
{% endif %}
});
let localStream = new MediaStream();
const constraints = {
'video': true,
'audio': true
}
const localVideo = document.querySelector('#local-video');
const btnToggleAudio = document.querySelector('#btn-toggle-audio');
const btnToggleVideo = document.querySelector('#btn-toggle-video');
let userMedia = navigator.mediaDevices.getUserMedia(constraints)
.then(stream => {
localStream = stream;
localVideo.srcObject = localStream;
localVideo.muted = true;
let audioTracks = stream.getAudioTracks();
let videoTracks = stream.getVideoTracks();
audioTracks[0].enabled = true;
videoTracks[0].enabled = true;
btnToggleAudio.addEventListener('click', ()=>{
audioTracks[0].enabled = !audioTracks[0].enabled;
if(audioTracks[0].enabled){
btnToggleAudio.classList.replace('mic-off', 'mic-on')
return;
}
btnToggleAudio.classList.replace('mic-on','mic-off');
});
btnToggleVideo.addEventListener('click', ()=>{
videoTracks[0].enabled = !videoTracks[0].enabled;
if(videoTracks[0].enabled){
btnToggleVideo.classList.replace('camera-off','camera-on');
sendSignal('video-on', {})
localVideo.srcObject = localStream;
return;
}
sendSignal('video-off', {})
localVideo.srcObject = null;
btnToggleVideo.classList.replace('camera-on','camera-off');
});
})
.catch(error =>{
{#console.log('Error accessing media devices', error);#}
});
</script>
<script>
function sendSignal(action, message){
console.log("Sending message to other end");
let jsonStr = JSON.stringify({
'peer': username,
'action': action,
'message': message
});
console.log(jsonStr);
webSocket.send(jsonStr);
}
function createOfferer(peerUserName, receiver_channel_name){
console.log("creating offer");
let peer = new RTCPeerConnection(iceConfiguration);
addLocalTracks(peer);
peer.addEventListener("icegatheringstatechange", ev => {
switch(peer.iceGatheringState) {
case "new":
console.log("gathering is either just starting or has been reset");
break;
case "gathering":
console.log("gathering has begun or is ongoing");
break;
case "complete":
console.log("gathering has ended");
break;
}
});
peer.addEventListener('icecandidate', (event)=>{
if(event.candidate){
console.log('new ice candidate', JSON.stringify(peer.localDescription));
return;
}
sendSignal('new-offer', {
'sdp':peer.localDescription,
'receiver_channel_name': receiver_channel_name
});
// to notify video status of other users when new users join
if(!localStream.getVideoTracks()[0].enabled){
sendSignal('video-off', {})
}
});
let dc = peer.createDataChannel('channel');
dc.addEventListener('open', ()=>{
console.log("dc connection opened");
});
dc.addEventListener('message', dcOnMessage);
console.log("Creating video");
let remoteVideo = createVideo(peerUserName);
console.log("video created, setting track");
setOnTrack(peer, remoteVideo);
console.log("track setted");
mapPeers[peerUserName] = [peer, dc];
peer.addEventListener('iceconnectionstatechange', ()=>{
let iceconnectionState = peer.iceConnectionState;
if(iceconnectionState === 'failed' || iceconnectionState === 'disconnected' || iceconnectionState === 'closed'){
delete mapPeers[peerUserName];
if(iceconnectionState !== 'closed'){
peer.close();
}
removeVideo(remoteVideo);
}
});
peer.createOffer()
.then(o => peer.setLocalDescription(o))
.then(() => {
{#console.log("local description set successfully");#}
});
}
function createAnswerer(offer, peerUserName, receiver_channel_name){
let peer = new RTCPeerConnection(iceConfiguration);
addLocalTracks(peer);
let remoteVideo = createVideo(peerUserName);
setOnTrack(peer, remoteVideo);
peer.addEventListener('datachannel', e=>{
peer.dc = e.channel;
peer.dc.addEventListener('open', ()=>{
{#console.log("dc connection opened");#}
});
peer.dc.addEventListener('message', dcOnMessage);
mapPeers[peerUserName] = [peer, peer.dc];
});
peer.addEventListener('iceconnectionstatechange', ()=>{
let iceconnectionState = peer.iceConnectionState;
if(iceconnectionState === 'failed' || iceconnectionState === 'disconnected' || iceconnectionState === 'closed'){
delete mapPeers[peerUserName];
if(iceconnectionState !== 'closed'){
peer.close();
}
removeVideo(remoteVideo);
}
});
peer.addEventListener('icecandidate', (event)=>{
if(event.candidate){
{#console.log('new ice candidate', JSON.stringify(peer.localDescription));#}
return;
}
sendSignal('new-answer', {
'sdp':peer.localDescription,
'receiver_channel_name': receiver_channel_name
});
});
peer.setRemoteDescription(offer)
.then(() => {
{#console.log('Remote description set successfully for %s', peerUserName);#}
return peer.createAnswer();
})
.then(a => {
{#console.log('Answer created');#}
peer.setLocalDescription(a);
})
}
function addLocalTracks(peer){
localStream.getTracks().forEach(track => {
peer.addTrack(track, localStream);
});
return;
}
function createVideo(peerUserName){
userId = peerUserName.split('_')[1]
// Video element Creation
let remoteVideo = document.createElement('video');
remoteVideo.id = peerUserName + '-video';
remoteVideo.autoplay = true;
remoteVideo.playsInline = true;
remoteVideo.classList.add('custom-video');
remoteVideo.setAttribute('data-id', userId);
addVideoToDOM(remoteVideo, userId)
return remoteVideo;
}
function addVideoToDOM(video, userId){
let videoContainer = document.querySelector('#video-container');
$.getJSON(`/chat/call/participant/${userId}`, function(data){
// Styling Elements
video.style.backgroundImage = `url('${data.profile_image_url}')` // if video off then show this bg
let nameTag = document.createElement('span');
nameTag.classList.add('name-tag');
nameTag.innerText = data.username;
let participantActionsEl = document.createElement('div');
participantActionsEl.classList.add('participant-actions');
let videoParticipantEl = document.createElement('div');
videoParticipantEl.classList.add('video-participant');
videoParticipantEl.appendChild(participantActionsEl);
videoParticipantEl.appendChild(nameTag);
videoParticipantEl.appendChild(video);
videoParticipantEl.setAttribute('data-delete', 'true') // For removing element
videoParticipantEl.setAttribute('data-id', userId) // For showing feature
videoContainer.appendChild(videoParticipantEl);
addMemberToList(data)
})
}
function manageVideoEl(status, peerUserName) {
const userId = peerUserName.split('_')[1]
// After element in DOM update video element
setTimeout(() => {
let videoEl = document.querySelector(`video[data-id="${userId}"]`)
if (videoEl !== null){
// Saving source
if (mapPeers[peerUserName]){
if (videoEl.srcObject != null){
mapPeers[peerUserName][2] = videoEl.srcObject;
}
if (status==='on'){
videoEl.srcObject = mapPeers[peerUserName][2] || null;
} else if (status==='off'){
videoEl.srcObject = null;
}
}
}
}, 1000);
}
function setOnTrack(peer, remoteVideo){
let remoteStream = new MediaStream();
remoteVideo.srcObject = remoteStream;
peer.addEventListener('track', async (event)=>{
console.log(remoteStream);
remoteStream.addTrack(event.track, remoteStream);
});
}
function removeVideo(video){
removeMemberFromList(video.dataset.id)
video.closest(`[data-delete='true']`).remove()
}
</script>
{######### script to talk with group consumer #######}
<script>
let group_chatSocket_chat;
let receiver_group = "{{group.id }}";
{% if is_call_starter and send_notifications %}
function sendNotificationOnMessage()
{
group_chatSocket_chat = new ReconnectingWebSocket(
'ws://' + window.location.host +
'/ws/chat/group/' + "{{ group.name }}" + '/'+ "{{ request.user.id }}" +'/');
group_chatSocket_chat.onopen = function(e){
{#console.log("Connection open");#}
let url = `${location.protocol + '//' + location.host}/chat/video/{{ join_url }}`;
group_chatSocket_chat.send(JSON.stringify({
'message': `New Video call is started! Join Now Link: ${url}`,
'receiver_id': receiver_group,
'command': 'new_message',
'bucket_id': 0,
}));
}
}
{% endif %}
</script>
</body>
as you move out side of the network the webrtc ICE candidate gathering process may fail because of Routers NAT and firewalls therefore you must have a turn server in your configuration that will relay the traffic if the direct p2p connection establishment fails
I think your network firewall not support STUN ( udp hole punching )
so you have to add turn server address to your iceConfiguration.
you can get turn server address https://www.twilio.com/stun-turn

What to do to store recording stream into the mobile browser using Media Recorder API?

I have a project requirement to record and store the currently running video stream. I have used webRTC for video streaming, and to record the video streaming, I have used MediaRecorder API. It is completely working fine in the desktop system. But it is not working in the mobile browser.
Any idea why it is not working in the mobile browser?
Following is the code snippet:
componentDidMount = async () => {
recordingStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
const mergedAudio = await this.mergeAudioStreams();
console.log("audio track length... ", mergedAudio.length);
const tracks = [
...recordingStream.getVideoTracks(),
...mergedAudio,
];
captureStream = new MediaStream(tracks);
mediaRecorder = new MediaRecorder(captureStream, options);
mediaRecorder.ondataavailable = this.handleDataAvailable;
mediaRecorder.start();
}
handleDataAvailable = async event => {
console.log("data-available", event);
if (event.data.size > 0) {
recordedChunks.push(event.data);
this.download();
} else {
// ...
console.log("in else");
}
};
download = async () => {
console.log("in download fn");
var blob = await new Blob(recordedChunks, {
type: "video/mp4",
});
//called the API to store the recorded video
}
mergeAudioStreams = async () => {
console.log("recordScreen fn called");
const ctx = new AudioContext();
const dest = ctx.createMediaStreamDestination();
let localSource, remoteSource;
if (this.state.localStream.getAudioTracks().length > 0) {
localSource = ctx.createMediaStreamSource(this.state.localStream);
}
if (this.state.selectedVideo.stream.getAudioTracks().length > 0) {
remoteSource = ctx.createMediaStreamSource(
this.state.selectedVideo.stream
);
}
const localGain = ctx.createGain();
const remoteGain = ctx.createGain();
localGain.gain.value = 0.7;
remoteGain.gain.value = 0.7;
localSource.connect(localGain).connect(dest);
remoteSource.connect(remoteGain).connect(dest);
console.log("combine tracks..", dest.stream.getAudioTracks());
return dest.stream.getAudioTracks();
};

Audio Record Problem in JavaScript some of the recording is missing

Hello Everyone I am trying to Record Audio and then after Recording I play it and when I hit the Cut function it just get the second I am on it in the player and cut to the end and start recording again and then overwrite the chunks array till the end with the new recording to sum up all of that all I need is when I record 5 seconds and then I want to overwrite the last 3 seconds so I remove the last 3 elements in the array and push the new chunks to the original array
when I do that and send the new chunk to the audio player it plays the first time as intended and if I pressed play again it just plays the newly recorded part only without the first 2 seconds that I preserved from the old recording
let audioChunks = [];
let Recorder = null;
const recordAudio = () => {
return new Promise(resolve => {
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const start = () => {
mediaRecorder.start(1000);
};
const stop = () => {
return new Promise(resolve => {
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks,{type: 'audio/mpeg-3'});
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
document.getElementById("AudioRecodingPlayer").src = audio.src;
document.getElementById("AudioRecodingPlayer").currentTime = 0;
const play = () => {
audio.play();
};
resolve({ audioBlob, audioUrl, play });
});
mediaRecorder.stop();
});
};
resolve({ start, stop });
});
});
};
async function StartRecording(){
Recorder = await recordAudio();
Recorder.start();
}
async function StopRecording(){
const audio = Recorder.stop();
}
function Cut(){
var Audio = document.getElementById("AudioRecodingPlayer");
var CurrenTime = Math.round(Audio.currentTime);
audioChunks.length = CurrenTime;
StartRecording();
}
On the Cut() function, you are not actually removing the elements of the array, nor changing the index or window. You are only changing the value of length.
What you would have to do is overwrite the audio chunks, or remove the chunks from that point forward.
let audioChunks = [];
let Recorder = null;
const recordAudio = () => {
return new Promise(resolve => {
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const start = () => {
mediaRecorder.start(1000);
};
const stop = () => {
return new Promise(resolve => {
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks,{type: 'audio/mpeg-3'});
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
document.getElementById("AudioRecodingPlayer").src = audio.src;
document.getElementById("AudioRecodingPlayer").currentTime = 0;
const play = () => {
audio.play();
};
resolve({ audioBlob, audioUrl, play });
});
mediaRecorder.stop();
});
};
resolve({ start, stop });
});
});
};
async function StartRecording(){
Recorder = await recordAudio();
Recorder.start();
}
async function StopRecording(){
const audio = Recorder.stop();
}
function Cut(){
var Audio = document.getElementById("AudioRecodingPlayer");
var CurrenTime = Math.round(Audio.currentTime);
audioChunks.splice(CurrentTime); // this will actually remove the elements after the "CurrentTime" index
StartRecording();
}

Categories