Access variable outside stream callback function - Javascript, WebRTC - javascript

I'm streaming a webcam, and want a method to stop the webcam. I'm trying to stop it by calling stop on the streaming track, altough I can't reach the track variable outside the callback function. How do I access track in this function outside the callback-function? Basically like this:
var track;
function successCallback(stream) {
localstream = stream; // stream available to console
track = stream.getTracks()[0];
if (window.URL) {
video.src = window.URL.createObjectURL(stream);
} else {
video.src = stream;
}
}
track.stop();
The complete instance looks like this:
function media(i) {
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
audio: false,
video: true
};
var video = document.querySelectorAll("video")[i];
var localstream;
function successCallback(stream) {
localstream = stream; // stream available to console
var track = stream.getTracks()[0];
if (window.URL) {
video.src = window.URL.createObjectURL(stream);
} else {
video.src = stream;
}
track.stop();
}
function errorCallback(error) {
console.log('navigator.getUserMedia error: ', error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
}

Define track outside scope of media function. Also, consider revoking the Blob URL set at video src when track.stop() is called.
window.onload = function() {
let track, url;
function media(i) {
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
audio: false,
video: true
};
var video = document.querySelectorAll("video")[i];
var localstream;
function successCallback(stream) {
localstream = stream; // stream available to console
track = stream.getTracks()[0];
if (window.URL) {
url = window.URL.createObjectURL(stream);
video.src = url;
} else {
video.src = stream;
}
// track.stop();
}
function errorCallback(error) {
console.log('navigator.getUserMedia error: ', error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
}
var button = document.querySelector("button");
button.onclick = function() {
track.stop();
URL.revokeObjectURL(url);
// document.querySelectorAll("video")[i].src = "";
}
media(0)
}
plnkr https://plnkr.co/edit/FJhEBOCTRcE5NUUaEI7B?p=preview

Related

How can I differentiate the sound level of an audio track in a mediastream in JavaScript? [duplicate]

I'm using the Google Cloud API for Speech-to-text, with a NodeJS back-end.
The app needs to be able to listen for voice commands, and transmit them to the back-end as a buffer. For this, I need to send the buffer of the preceding audio when silence is detected.
Any help would be appreciated. Including the js code below
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function () {
recording = true;
};
window.stopRecording = function () {
recording = false;
// window.Stream.end();
};
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 4096;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function (e) {
if (!recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
console.log(convertoFloat32ToInt16(left));
};
audioInput.connect(recorder);
recorder.connect(context.destination);
}
I'm not too sure as to what exactly is being asked in the question, so this answer is only intended to give a way to detect silences in an AudioStream.
To detect silence in an AudioStream, you can use an AudioAnalyser node, on which you will call the getByteFrequencyData method at regular intervals, and check whether there were sounds higher than than your expected level for a given time.
You can set the threshold level directly with the minDecibels property of the AnalyserNode.
function detectSilence(
stream,
onSoundEnd = _=>{},
onSoundStart = _=>{},
silence_delay = 500,
min_decibels = -80
) {
const ctx = new AudioContext();
const analyser = ctx.createAnalyser();
const streamNode = ctx.createMediaStreamSource(stream);
streamNode.connect(analyser);
analyser.minDecibels = min_decibels;
const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
let silence_start = performance.now();
let triggered = false; // trigger only once per silence event
function loop(time) {
requestAnimationFrame(loop); // we'll loop every 60th of a second to check
analyser.getByteFrequencyData(data); // get current data
if (data.some(v => v)) { // if there is data above the given db limit
if(triggered){
triggered = false;
onSoundStart();
}
silence_start = time; // set it to now
}
if (!triggered && time - silence_start > silence_delay) {
onSoundEnd();
triggered = true;
}
}
loop();
}
function onSilence() {
console.log('silence');
}
function onSpeak() {
console.log('speaking');
}
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
detectSilence(stream, onSilence, onSpeak);
// do something else with the stream
})
.catch(console.error);
And as a fiddle since stackSnippets may block gUM.
You can use SpeechRecognition result event to determine when a word or phrase has been recognized, for example, ls, cd, pwd or other commands, pass the .transcript of SpeechRecognitionAlternative to speechSynthesis.speak() where at attached start and end event of SpeechSynthesisUtterance call .start() or .resume() on MediaRecorder object where MediaStream is passed; convert the Blob at dataavailable event to an ArrayBuffer using FileReader or Response.arrayBuffer().
We could alternatively use audiostart or soundstart with audioend or soundend events of SpeechRecognition to record the users' actual voice, though the ends may not be fired consistently in relation to the actual start and end of audio captured by only a standard system microphone.
<!DOCTYPE html>
<html>
<head>
<title>Speech Recognition Recording</title>
</head>
<body>
<input type="button" value="Stop speech command recognition" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
const recognition = new webkitSpeechRecognition();
const synthesis = new SpeechSynthesisUtterance();
const handleResult = e => {
recognition.onresult = null;
console.log(e.results);
const result = e.results[e.results.length - 1];
if (result.isFinal) {
const [{transcript}] = result;
console.log(transcript);
synthesis.text = transcript;
window.speechSynthesis.speak(synthesis);
}
}
synthesis.onstart = () => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}
synthesis.onend = () => {
recorder.pause();
recorder.requestData();
}
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
console.log(blobURL, ab);
recognition.onresult = handleResult;
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
}
recognition.continuous = true;
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onend = e => {
console.log("recognition ended, stream.active", stream.active);
if (stream.active) {
console.log(e);
// the service disconnects after a period of time
recognition.start();
}
}
recognition.onresult = handleResult;
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("stop")
.onclick = () => {
console.log("stream.active:", stream.active);
if (stream && stream.active && recognition) {
recognition.abort();
recorder.stop();
for (let track of stream.getTracks()) {
track.stop();
}
console.log("stream.active:", stream.active);
}
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview
The simplest approach would be to use .pause() and .resume(), .stop() methods of MediaRecorder() to allow user to start, pause, and stop recording audio captured utilizing navigator.mediaDevices.getUserMedia() and convert the resulting Blob to an ArrayBuffer, if that is what the api is expecting to be POSTed to server
<!DOCTYPE html>
<html>
<head>
<title>User Media Recording</title>
</head>
<body>
<input type="button" value="Start/resume recording audio" id="start">
<input type="button" value="Pause recording audio" id="pause">
<input type="button" value="Stop recording audio" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
// do stuff with `ArrayBuffer` of recorded audio
console.log(blobURL, ab);
// we do not need the `Blob URL`, we can revoke the object
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
recorder.requestData();
}
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("start")
.onclick = () => {
if (recorder.state === "inactive") {
recorder.start();
} else {
recorder.resume();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("pause")
.onclick = () => {
if (recorder.state === "recording") {
recorder.pause();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("stop")
.onclick = () => {
if (recorder.state === "recording" || recorder.state === "paused") {
recorder.stop();
}
for (let track of stream.getTracks()) {
track.stop();
}
document.getElementById("start").onclick = null;
document.getElementById("pause").onclick = null;
console.log("recorder.state:", recorder.state
, "stream.active", stream.active);
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview

How to console.log each frame in the video as a byte array?

the following is the code that I use to stream from my webcam onto a video element.
I want each frame to be logged as an byte array in the console. Basically want to get each frame as a byte array. I'm a newbie so please provide a detailed explanation as to how I can approach this.
var video = document.querySelector("#video");
// Basic settings for the video to get from Webcam
const constraints = {
audio: false,
video: {
width: 475,
height: 475,
},
};
// This condition will ask permission to user for Webcam access
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia(constraints)
.then(function (stream) {
video.srcObject = stream;
})
.catch(function (err0r) {
console.log("Something went wrong!");
});
}
//console.log(context.drawImage(video, 0, 0));
let a = document.addEventListener("keydown", function (e) {
if (e.key === "Escape") {
stop(e);
}
});
function stop(e) {
console.log("video stopped!");
var stream = video.srcObject;
var tracks = stream.getTracks();
for (var i = 0; i < tracks.length; i++) {
var track = tracks[i];
track.stop();
}
video.srcObject = null;
}
Thanks.

Choose WebCams Via getusermedia

I am beginner with getusermedia, just got some codes from Google, and I'm able to work on those. But i have to show options on my webapp, from where a user can select WebCam from Primary (Laptop) or Secondary (Connected via USB).
Tried this, working for primary (Laptop WebCam), but when i am adding USB WebCam, it is auto select USB WebCam.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
imagegrid = document.getElementById("imagegrid"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
var video = document.querySelector("#video");
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;
if (navigator.getUserMedia) {      
navigator.getUserMedia({video: true}, handleVideo, videoError);
}
function handleVideo(stream) {
video.src = window.URL.createObjectURL(stream);
}
function videoError(e) {
// do something
}
// Trigger photo take
document.getElementById("video").addEventListener("click", function() {
draw(video, canvas, imagegrid);
});
Is it possible, i can show options for both webcams.
Thanks
The function navigator.getUserMedia() will only give you the default camera (with the exception of Firefox which gives you an option of which camera to share with the web application)
To avoid this problem you should use navigator.mediaDevices.enumerateDevices() and then navigator.mediaDevices.getUserMedia(constraints).
Example:
navigator.mediaDevices.enumerateDevices()
.then(gotDevices)
.catch(errorCallback);
...
function gotDevices(deviceInfos) {
...
for (var i = 0; i !== deviceInfos.length; ++i) {
var deviceInfo = deviceInfos[i];
var option = document.createElement('option');
option.value = deviceInfo.deviceId;
if (deviceInfo.kind === 'audioinput') {
option.text = deviceInfo.label ||
'Microphone ' + (audioInputSelect.length + 1);
audioInputSelect.appendChild(option);
} else if (deviceInfo.kind === 'audiooutput') {
option.text = deviceInfo.label || 'Speaker ' +
(audioOutputSelect.length + 1);
audioOutputSelect.appendChild(option);
} else if (deviceInfo.kind === 'videoinput') {
option.text = deviceInfo.label || 'Camera ' +
(videoSelect.length + 1);
videoSelect.appendChild(option);
}
...
}
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
var videoTracks = stream.getVideoTracks();
console.log('Got stream with constraints:', constraints);
console.log('Using video device: ' + videoTracks[0].label);
stream.onended = function() {
console.log('Stream ended');
};
window.stream = stream; // make variable available to console
video.srcObject = stream;
})
.catch(function(error) {
// ...
}
The above functions use promises and require a more complex approach than yours. So you need to do some reading in order to adaptate this method. Have a look at the link below for some examples:
https://developers.google.com/web/updates/2015/10/media-devices

WebRTC: Firefox unable to recieve video as callee

I'm new to WebRTC and am trying to create a simple test page that allows for 1-to-1 video conferences.
So far what I have below works for the following:
Chrome -> Chrome
Firefox -> Chrome
It does not work in the following situations:
Firefox -> Firefox
Chrome -> Firefox
Basically, Firefox is unable to receive the remote video stream when it is the callee. The onaddstream handler is never called, but I do not know why.
The only difference I have found between Firefox and Chrome is that Firefox does not trickle ICE candidates; they are included in the initial offer/answer creation causing the onicecandidate handler to never be called with a non-null candidate. On the other hand, Chrome does trickle candidates which are sent to the signaling server as separate messages. I'm not sure if this a/the potential problem or not.
I am testing with Firefox Nightly 34 and Chrome 36. The signaling server is hosted in my LAN and both browsers are running on the same computer.
The following code was based off of http://www.html5rocks.com/en/tutorials/webrtc/basics/.
var localVideo;
var remoteVideo;
var peerConnection;
var peerConnectionConfig = {'iceServers': [{'url': 'stun:stun.services.mozilla.com'}, {'url': 'stun:stun.l.google.com:19302'}]};
var isCaller;
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
window.RTCIceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate || window.webkitRTCIceCandidate;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
function pageReady() {
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('ws://myserver:3434');
serverConnection.onmessage = gotMessageFromServer;
}
function start(_isCaller) {
isCaller = _isCaller
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
var constraints = {
video: true,
audio: true,
};
if(navigator.getUserMedia) {
navigator.getUserMedia(constraints, getUserMediaSuccess, getUserMediaError);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
peerConnection.addStream(stream);
if(isCaller) {
peerConnection.createOffer(gotDescription, createOfferError);
} else {
peerConnection.createAnswer(gotDescription, createAnswerError);
}
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp));
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotIceCandidate(event) {
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate}));
}
}
function gotDescription(description) {
peerConnection.setLocalDescription(description);
serverConnection.send(JSON.stringify({'sdp': description}));
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp));
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotRemoteStream(event) {
console.log("got remote stream");
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
// Error functions....
function getUserMediaError(error) {
console.log(error);
}
function createOfferError(error) {
console.log(error);
}
function createAnswerError(error) {
console.log(error);
}
If it's useful, here is my signaling server. It's a very basic Node.js script that uses the ws WebSocket library to simply broadcast received messages to all connected clients which is only one other client in this case.
var WebSocketServer = require('ws').Server;
var wss = new WebSocketServer({port: 3434});
wss.broadcast = function(data) {
for(var i in this.clients) {
this.clients[i].send(data);
}
};
wss.on('connection', function(ws) {
ws.on('message', function(message) {
console.log('received: %s', message);
wss.broadcast(message);
});
});
Any help is greatly appreciated!
Turns out I had a race condition between the call to setRemoteDescription() and createAnswer(). Firefox asks for permission to use the webcam on every reload of the page whereas Chrome only asks on the first page load. This meant that When I went to call createAnswer() in Firefox, the video stream did not exist yet because the user had not allowed access to the webcam yet. The solution was to move the call to getUserMedia() to when the page is ready. This way, when an incoming call occurs the user has already allowed access to the webcam and the video stream can immediately be shared. It's not perfect, but it allows my little test page to work which is all I'm going for for now.
Here's the updated code if it helps anyone in the future:
var localVideo;
var remoteVideo;
var peerConnection;
var peerConnectionConfig = {'iceServers': [{'url': 'stun:stun.services.mozilla.com'}, {'url': 'stun:stun.l.google.com:19302'}]};
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
window.RTCIceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate || window.webkitRTCIceCandidate;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
function pageReady() {
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('ws://sagan:3434');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if(navigator.getUserMedia) {
navigator.getUserMedia(constraints, getUserMediaSuccess, getUserMediaError);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
}
function start(isCaller) {
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
peerConnection.addStream(localStream);
if(isCaller) {
peerConnection.createOffer(gotDescription, createOfferError);
}
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp), function() {
peerConnection.createAnswer(gotDescription, createAnswerError);
});
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotIceCandidate(event) {
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate}));
}
}
function gotDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description, function () {
serverConnection.send(JSON.stringify({'sdp': description}));
}, function() {console.log('set description error')});
}
function gotRemoteStream(event) {
console.log("got remote stream");
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
// Error functions....
function getUserMediaError(error) {
console.log(error);
}
function createOfferError(error) {
console.log(error);
}
function createAnswerError(error) {
console.log(error);
}

Specify webcam with sourceId in getUserMedia

I have two webcams and am trying to specify which one to show in my video tag. My HTML is simply <video autoplay></video>. Here is my javascript:
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
video: {
optional: [{
sourceId: "64-character-alphanumeric-source-id-here"
}]
}
};
var video = document.querySelector("video");
function successCallback(stream) {
window.stream = stream; // stream available to console
if (window.URL) {
video.src = window.URL.createObjectURL(stream);
} else {
video.src = stream;
}
video.play();
}
function errorCallback(error) {
console.log("navigator.getUserMedia error: ", error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
However, even when I change the sourceId to my second webcam, I can't get it to display that webcam. js.fiddle code
this code is working for me in mobile chrome: It tries to detect the back facing video stream.
MediaStreamTrack.getSources(function(sourceInfos) {
var videoSourceId;
for (var i = 0; i != sourceInfos.length; ++i) {
var sourceInfo = sourceInfos[i];
if(sourceInfo.kind == "video" && sourceInfo.facing == "environment") {
videoSourceId = sourceInfo.id;
}
}
var constraints = {
audio: false,
video: {
optional: [{sourceId: videoSourceId}]
}
};
....
});
note that there is no fallback in this code

Categories