How to restart track in webrtc video calling - javascript

I implemented video calling using webrtc with use of https://github.com/webtutsplus/videoChat-WebFrontend library. But when I implement switch camera functionality on click of sender at that time sender video stop at receiver end. I followed Unable to change camera / stream for WebRTC call link for switch camera.
So anyone please give me a solution regarding this issue.
Thanks.
I tried Unable to change camera / stream for WebRTC call for sitch camera but receiver not getting the sender video when switch the camera.
I followed Unable to change camera / stream for WebRTC call link to switch camera in video calling between 2 users.
This is the code to switch the camera.
``
$(".btn_rear_camera").click(function() {
if (cameratype == "user") {
capture('environment');
} else {
capture('user');
}
});
function capture(facingMode) {
cameratype = facingMode;
localStream.getTracks().forEach(function(track) {
track.stop();
});
var constraints = {
video: {
deviceId: devicesIds[1]
},
audio: true
};
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
replaceTracks(stream);
}).catch(function(error) {
});
}
function replaceTracks(newStream) {
var elementId = "localVideo";
detachMediaStream(elementId);
newStream.getTracks().forEach(function(track) {
localStream.addTrack(track);
});
attachMediaStream(elementId, newStream);
// optionally, if you have active peer connections:
_replaceTracksForPeer(peerConnection);
function _replaceTracksForPeer(peer) {
peer.getSenders().map(function(sender) {
sender.replaceTrack(newStream.getTracks().find(function(track) {
return track.kind === sender.track.kind;
}));
});
}
}
function detachMediaStream(id) {
var elem = document.getElementById(id);
if (elem) {
elem.pause();
if (typeof elem.srcObject === 'object') {
elem.srcObject = null;
} else {
elem.src = '';
}
}
};
function attachMediaStream(id, stream) {
var elem = document.getElementById(id);
if (elem) {
if (typeof elem.srcObject === 'object') {
elem.srcObject = stream;
} else {
elem.src = window.URL.createObjectURL(stream);
}
elem.onloadedmetadata = function(e) {
elem.play();
};
} else {
throw new Error('Unable to attach media stream');
}
};
``

Related

Webrtc - Switch Camera in realtime

I have to switch camera in webrtc when 2 user connecting in the call. I'm having a problem trying to change my camera in real time, It works for the local video, but the remote person cannot see the new camera, and still sees the old one. I tried to stop the stream and init again but still not working. This is just some of my code. I have searched everywhere and I can't find a solution. Can someone help me out?
``
$(".btn_rear_camera").click(function() {
if (cameratype == "user") {
capture('environment');
} else {
capture('user');
}
});
function capture(facingMode) {
cameratype = facingMode;
localStream.getTracks().forEach(function(track) {
track.stop();
});
var constraints = {
video: {
deviceId: devicesIds[1]
},
audio: true
};
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
replaceTracks(stream);
}).catch(function(error) {
});
}
function replaceTracks(newStream) {
var elementId = "localVideo";
detachMediaStream(elementId);
newStream.getTracks().forEach(function(track) {
localStream.addTrack(track);
});
attachMediaStream(elementId, newStream);
// optionally, if you have active peer connections:
_replaceTracksForPeer(peerConnection);
function _replaceTracksForPeer(peer) {
peer.getSenders().map(function(sender) {
sender.replaceTrack(newStream.getTracks().find(function(track) {
return track.kind === sender.track.kind;
}));
});
}
}
function detachMediaStream(id) {
var elem = document.getElementById(id);
if (elem) {
elem.pause();
if (typeof elem.srcObject === 'object') {
elem.srcObject = null;
} else {
elem.src = '';
}
}
};
function attachMediaStream(id, stream) {
var elem = document.getElementById(id);
if (elem) {
if (typeof elem.srcObject === 'object') {
elem.srcObject = stream;
} else {
elem.src = window.URL.createObjectURL(stream);
}
elem.onloadedmetadata = function(e) {
elem.play();
};
} else {
throw new Error('Unable to attach media stream');
}
};
``

convert audio buffer to play as audio element

I'm using the souncloud api to stream audio from soundcloud. However, I'm trying to get the tracks BPM programmatically so I have the following
Soundcloud.prototype.play = function (options) {
options = options || {};
var src;
if (options.streamUrl) {
src = options.streamUrl;
} else if (this._playlist) {
var length = this._playlist.tracks.length;
if (length) {
if (options.playlistIndex === undefined) {
this._playlistIndex = this._playlistIndex || 0;
} else {
this._playlistIndex = options.playlistIndex;
}
// be silent if index is out of range
if (this._playlistIndex >= length || this._playlistIndex < 0) {
this._playlistIndex = 0;
return;
}
src = this._playlist.tracks[this._playlistIndex].stream_url;
}
} else if (this._track) {
src = this._track.stream_url;
}
if (!src) {
throw new Error('There is no tracks to play, use `streamUrl` option or `load` method');
}
if (this._clientId) {
src = _appendQueryParam(src, 'client_id', this._clientId);
}
if (src !== this.audio.src) {
pulse.loadBufferFromURI(src, (event, pulse) => {
this.trackBPM = pulse.beat.bpm
this.audio.src = window.URL.createObjectURL(pulse.renderedBuffer);
this.playing = src;
console.log('trackBPM',pulse.beat.bpm);
})
} else return this.audio.play();
};
Now
pulse.loadBufferFromURI(src, (event, pulse) => {
this.trackBPM = pulse.beat.bpm
this.audio.src = window.URL.createObjectURL(pulse.renderedBuffer);
this.playing = src;
console.log('trackBPM',pulse.beat.bpm);
})
attempts to load the stream_url as aduiobuffer to get the bpm. The question is can I covert the buffer into a source compatible enough to play using <audio> element, reason being I want to take advantage of the event listeners...

GetUserMedia - facingmode

I am currently using an Android tablet and GetUserMedia to take pictures in my program.
Apparently, the default camera used by GetUserMedia is the front camera. How do I use the rear camera as a default?
Here's my code for GetUserMedia:
navigator.getUserMedia({
"audio": false,
"video": {
mandatory: {
minWidth: this.params.dest_width,
minHeight: this.params.dest_height,
//facingMode: "environment",
},
}
},
function(stream) {
// got access, attach stream to video
video.src = window.URL.createObjectURL( stream ) || stream;
Webcam.stream = stream;
Webcam.loaded = true;
Webcam.live = true;
Webcam.dispatch('load');
Webcam.dispatch('live');
Webcam.flip();
},
function(err) {
return self.dispatch('error', "Could not access webcam.");
});
I inserted facingMode in the "mandatory" part but didn't work.
Please help.
Update: facingMode is now available in Chrome for Android through the adapter.js polyfill!
facingMode is not yet implemented in Chrome for Android, but works natively in Firefox for Android.
You must use standard constraints however: (use https fiddle for Chrome):
var gum = mode =>
navigator.mediaDevices.getUserMedia({video: {facingMode: {exact: mode}}})
.then(stream => (video.srcObject = stream))
.catch(e => log(e));
var stop = () => video.srcObject && video.srcObject.getTracks().forEach(t => t.stop());
var log = msg => div.innerHTML += msg + "<br>";
<button onclick="stop();gum('user')">Front</button>
<button onclick="stop();gum('environment')">Back</button>
<div id="div"></div><br>
<video id="video" height="320" autoplay></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
The { exact: } syntax means the constraint is required, and things fail if the user doesn't have the right camera. If you leave it out then the constraint is optional, which in Firefox for Android means it only changes the default in the camera chooser in the permission prompt.
Using Peter's code (https://stackoverflow.com/a/41618462/7723861) I came up with this solution to get the rear camera:
function handleSuccess(stream) {
window.stream = stream; // make stream available to browser console
video.srcObject = stream;
}
function handleError(error) {
console.log('navigator.getUserMedia error: ', error);
}
var DEVICES = [];
var final = null;
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
var arrayLength = devices.length;
for (var i = 0; i < arrayLength; i++)
{
var tempDevice = devices[i];
//FOR EACH DEVICE, PUSH TO DEVICES LIST THOSE OF KIND VIDEOINPUT (cameras)
//AND IF THE CAMERA HAS THE RIGHT FACEMODE ASSING IT TO "final"
if (tempDevice.kind == "videoinput")
{
DEVICES.push(tempDevice);
if(tempDevice.facingMode == "environment" ||tempDevice.label.indexOf("facing back")>=0 )
{final = tempDevice;}
}
}
var totalCameras = DEVICES.length;
//If couldnt find a suitable camera, pick the last one... you can change to what works for you
if(final == null)
{
//console.log("no suitable camera, getting the last one");
final = DEVICES[totalCameras-1];
};
//Set the constraints and call getUserMedia
var constraints = {
audio: false,
video: {
deviceId: {exact: final.deviceId}
}
};
navigator.mediaDevices.getUserMedia(constraints).
then(handleSuccess).catch(handleError);
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});
Deploying our web app to android through Cordova, I tried multiple solutions to access the rear camera. The solution that worked for me was:
constraints = {
audio: false,
video: {
width: 400,
height: 300,
deviceId: deviceId ? {exact: deviceId} : undefined
}
};
Retrieving the deviceId through:
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
// devices is an array of accessible audio and video inputs. deviceId is the property I used to switch cameras
})
.catch(function(err) {
console.log(err.name + ": " + error.message);
});
I chose not to use a Cordova plugin so that if we choose to move away from Cordova, there would not be such a hefty migration.
A pretty dandy snippet you can use is:
var front = false;
document.getElementById('flip-button').onclick = function() { front =` !front; };
var constraints = { video: { facingMode: (front? "user" : "environment") } };
This should work for you hopefully.
In the newer version of Chrome (after v52) the adaper.js solutions seem not working. So I solve the problem by enumerating devices first. Here is my solution. I am not sure if there is a better way to flip the camera and show the video on the screen. But I have to stop the track first and get a new stream.
let Video = function() {
let cameras = [];
let currCameraIndex = 0;
let constraints = {
audio: true,
video: {
deviceId: { exact: "" }
}
};
let videoCanvas = $('video#gum');
this.initialize = function() {
return enumerateDevices()
.then(startVideo);
};
this.flipCamera = function() {
currCameraIndex += 1;
if (currCameraIndex >= cameras.length) {
currCameraIndex = 0;
}
if (window.stream) {
window.stream.getVideoTracks()[0].stop();
}
return startVideo();
};
function enumerateDevices() {
return navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
devices.forEach(function(device) {
console.log(device);
if (device.kind === "videoinput") {
cameras.push(device.deviceId);
}
});
console.log(cameras);
});
}
function startVideo() {
constraints.video.deviceId.exact = cameras[currCameraIndex];
return navigator.mediaDevices.getUserMedia(constraints)
.then(handleSuccess).catch(handleError);
}
function handleSuccess(stream) {
videoCanvas[0].srcObject = stream;
window.stream = stream;
}
function handleError(error) {
alert(error);
}
};
This would work for rear camera :
navigator.mediaDevices.getUserMedia({
video: {facingMode: "environment",
height:{<heightValueHere>},
width : {<widthValueHere>}
}
})
.then(function(stream){
window.stream = stream;
video.srcObject = stream;
})
I use the following for webcam.js, call LoadWebcam ()
async function LoadWebcam () {
var i
var frontdev
var backdev
const tempStream = await navigator.mediaDevices.getUserMedia({video:true})
const devices = await navigator.mediaDevices.enumerateDevices()
//Check all the video input and find the backdev = back Cam
devices.forEach(function(device) {
if (device.kind === 'videoinput') {
if( device.label && device.label.length > 0 ) {
if( device.label.toLowerCase().indexOf( 'back' ) >= 0 )
backdev = device.deviceId
else if( device.label.toLowerCase().indexOf( 'front' ) >= 0 )
frontdev = device.deviceId
}
}
})
//Stop Stream
const tracks = tempStream.getTracks()
if( tracks )
for( let t = 0; t < tracks.length; t++ ) tracks[t].stop()
//Load the webcam,
Webcam.set({
width: 320,
height: 240,
image_format: 'png',
jpeg_quality: 90,
flip_horiz: true,
constraints: {
video: true,
deviceId: {exact: backdev }
}
});
Webcam.attach( '#my_camera' )}

WebRTC: Firefox unable to recieve video as callee

I'm new to WebRTC and am trying to create a simple test page that allows for 1-to-1 video conferences.
So far what I have below works for the following:
Chrome -> Chrome
Firefox -> Chrome
It does not work in the following situations:
Firefox -> Firefox
Chrome -> Firefox
Basically, Firefox is unable to receive the remote video stream when it is the callee. The onaddstream handler is never called, but I do not know why.
The only difference I have found between Firefox and Chrome is that Firefox does not trickle ICE candidates; they are included in the initial offer/answer creation causing the onicecandidate handler to never be called with a non-null candidate. On the other hand, Chrome does trickle candidates which are sent to the signaling server as separate messages. I'm not sure if this a/the potential problem or not.
I am testing with Firefox Nightly 34 and Chrome 36. The signaling server is hosted in my LAN and both browsers are running on the same computer.
The following code was based off of http://www.html5rocks.com/en/tutorials/webrtc/basics/.
var localVideo;
var remoteVideo;
var peerConnection;
var peerConnectionConfig = {'iceServers': [{'url': 'stun:stun.services.mozilla.com'}, {'url': 'stun:stun.l.google.com:19302'}]};
var isCaller;
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
window.RTCIceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate || window.webkitRTCIceCandidate;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
function pageReady() {
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('ws://myserver:3434');
serverConnection.onmessage = gotMessageFromServer;
}
function start(_isCaller) {
isCaller = _isCaller
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
var constraints = {
video: true,
audio: true,
};
if(navigator.getUserMedia) {
navigator.getUserMedia(constraints, getUserMediaSuccess, getUserMediaError);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
peerConnection.addStream(stream);
if(isCaller) {
peerConnection.createOffer(gotDescription, createOfferError);
} else {
peerConnection.createAnswer(gotDescription, createAnswerError);
}
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp));
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotIceCandidate(event) {
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate}));
}
}
function gotDescription(description) {
peerConnection.setLocalDescription(description);
serverConnection.send(JSON.stringify({'sdp': description}));
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp));
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotRemoteStream(event) {
console.log("got remote stream");
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
// Error functions....
function getUserMediaError(error) {
console.log(error);
}
function createOfferError(error) {
console.log(error);
}
function createAnswerError(error) {
console.log(error);
}
If it's useful, here is my signaling server. It's a very basic Node.js script that uses the ws WebSocket library to simply broadcast received messages to all connected clients which is only one other client in this case.
var WebSocketServer = require('ws').Server;
var wss = new WebSocketServer({port: 3434});
wss.broadcast = function(data) {
for(var i in this.clients) {
this.clients[i].send(data);
}
};
wss.on('connection', function(ws) {
ws.on('message', function(message) {
console.log('received: %s', message);
wss.broadcast(message);
});
});
Any help is greatly appreciated!
Turns out I had a race condition between the call to setRemoteDescription() and createAnswer(). Firefox asks for permission to use the webcam on every reload of the page whereas Chrome only asks on the first page load. This meant that When I went to call createAnswer() in Firefox, the video stream did not exist yet because the user had not allowed access to the webcam yet. The solution was to move the call to getUserMedia() to when the page is ready. This way, when an incoming call occurs the user has already allowed access to the webcam and the video stream can immediately be shared. It's not perfect, but it allows my little test page to work which is all I'm going for for now.
Here's the updated code if it helps anyone in the future:
var localVideo;
var remoteVideo;
var peerConnection;
var peerConnectionConfig = {'iceServers': [{'url': 'stun:stun.services.mozilla.com'}, {'url': 'stun:stun.l.google.com:19302'}]};
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
window.RTCIceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate || window.webkitRTCIceCandidate;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
function pageReady() {
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('ws://sagan:3434');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if(navigator.getUserMedia) {
navigator.getUserMedia(constraints, getUserMediaSuccess, getUserMediaError);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
}
function start(isCaller) {
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
peerConnection.addStream(localStream);
if(isCaller) {
peerConnection.createOffer(gotDescription, createOfferError);
}
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp), function() {
peerConnection.createAnswer(gotDescription, createAnswerError);
});
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotIceCandidate(event) {
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate}));
}
}
function gotDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description, function () {
serverConnection.send(JSON.stringify({'sdp': description}));
}, function() {console.log('set description error')});
}
function gotRemoteStream(event) {
console.log("got remote stream");
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
// Error functions....
function getUserMediaError(error) {
console.log(error);
}
function createOfferError(error) {
console.log(error);
}
function createAnswerError(error) {
console.log(error);
}

WebRTC select camera to send [duplicate]

I am studying webRTC application.
My reference is this software
apprtc
https://code.google.com/p/webrtc/source/browse/trunk/samples/js/apprtc/
demo
https://apprtc.appspot.com/
My computer has bult-in video device and apprtc uses this video device .
However I want to use USB-video camera instead.
I am searching the way to change input video devices.
But I couldn't find any clue in source files.
does anyone has information?
On Chrome:
chrome://settings/content/camera
chrome://settings/content/microphone
On Firefox: media.navigator.permission.disabled=false
Try this demo which is capturing all audio/video input devices:
https://www.webrtc-experiment.com/demos/MediaStreamTrack.getSources.html
You can capture any "specific" device using same API.
Edited at March 01, 2014:
MediaStreamTrack.getSources(function (media_sources) {
for (var i = 0; i < media_sources.length; i++) {
var media_source = media_sources[i];
var constraints = {};
// if audio device
if (media_source.kind == 'audio') {
constraints.audio = {
optional: [{
sourceId: media_source.id
}]
};
}
// if video device
if (media_source.kind == 'video') {
constraints.video = {
optional: [{
sourceId: media_source.id
}]
};
}
// invoke getUserMedia to capture this device
navigator.webkitGetUserMedia(constraints, function (stream) {
console.log(stream.id, stream);
}, console.error);
}
});
Updated at Sep 05, 2015:
Now Microsoft Edge, Chrome 44+, Firefox 38+, all these browsers are supporting navigator.mediaDevices.enumerateDevices API.
Here is a reusable script that provides cross-browser shim for all these media-sources APIs. It will work even in old-chrome (43 and older) (even on Android devices):
if (navigator.mediaDevices && navigator.mediaDevices.enumerateDevices) {
// Firefox 38+, Microsoft Edge, and Chrome 44+ seems having support of enumerateDevices
navigator.enumerateDevices = function(callback) {
navigator.mediaDevices.enumerateDevices().then(callback);
};
}
function getAllAudioVideoDevices(successCallback, failureCallback) {
if (!navigator.enumerateDevices && window.MediaStreamTrack && window.MediaStreamTrack.getSources) {
navigator.enumerateDevices = window.MediaStreamTrack.getSources.bind(window.MediaStreamTrack);
}
if (!navigator.enumerateDevices && navigator.mediaDevices.enumerateDevices) {
navigator.enumerateDevices = navigator.mediaDevices.enumerateDevices.bind(navigator);
}
if (!navigator.enumerateDevices) {
failureCallback(null, 'Neither navigator.mediaDevices.enumerateDevices NOR MediaStreamTrack.getSources are available.');
return;
}
var allMdiaDevices = [];
var allAudioDevices = [];
var allVideoDevices = [];
var audioInputDevices = [];
var audioOutputDevices = [];
var videoInputDevices = [];
var videoOutputDevices = [];
navigator.enumerateDevices(function(devices) {
devices.forEach(function(_device) {
var device = {};
for (var d in _device) {
device[d] = _device[d];
}
// make sure that we are not fetching duplicate devics
var skip;
allMdiaDevices.forEach(function(d) {
if (d.id === device.id) {
skip = true;
}
});
if (skip) {
return;
}
// if it is MediaStreamTrack.getSources
if (device.kind === 'audio') {
device.kind = 'audioinput';
}
if (device.kind === 'video') {
device.kind = 'videoinput';
}
if (!device.deviceId) {
device.deviceId = device.id;
}
if (!device.id) {
device.id = device.deviceId;
}
if (!device.label) {
device.label = 'Please invoke getUserMedia once.';
}
if (device.kind === 'audioinput' || device.kind === 'audio') {
audioInputDevices.push(device);
}
if (device.kind === 'audiooutput') {
audioOutputDevices.push(device);
}
if (device.kind === 'videoinput' || device.kind === 'video') {
videoInputDevices.push(device);
}
if (device.kind.indexOf('audio') !== -1) {
allAudioDevices.push(device);
}
if (device.kind.indexOf('video') !== -1) {
allVideoDevices.push(device);
}
// there is no 'videoouput' in the spec.
// so videoOutputDevices will always be [empty]
allMdiaDevices.push(device);
});
if (successCallback) {
successCallback({
allMdiaDevices: allMdiaDevices,
allVideoDevices: allVideoDevices,
allAudioDevices: allAudioDevices,
videoInputDevices: videoInputDevices,
audioInputDevices: audioInputDevices,
audioOutputDevices: audioOutputDevices
});
}
});
}
Here is how to use above reusable cross-browser shim:
getAllAudioVideoDevices(function(result) {
if (result.allMdiaDevices.length) {
console.debug('Number of audio/video devices available:', result.allMdiaDevices.length);
}
if (result.allVideoDevices.length) {
console.debug('Number of video devices available:', result.allVideoDevices.length);
}
if (result.allAudioDevices.length) {
console.debug('Number of audio devices available:', result.allAudioDevices.length);
}
if (result.videoInputDevices.length) {
console.debug('Number of video-input devices available:', result.videoInputDevices.length);
}
if (result.audioInputDevices.length) {
console.debug('Number of audio-input devices available:', result.audioInputDevices.length);
}
if (result.audioOutputDevices.length) {
console.debug('Number of audio-output devices available:', result.audioOutputDevices.length);
}
if (result.allMdiaDevices.length && result.allMdiaDevices[0].label === 'Please invoke getUserMedia once.') {
console.warn('It seems you did not invoke navigator-getUserMedia before using these API.');
}
console.info('All audio input devices:');
result.audioInputDevices.forEach(function(device) {
console.log('Audio input device id:', device.id, 'Device label:', device.label);
});
console.info('All audio output devices:');
result.audioOutputDevices.forEach(function(device) {
console.log('Audio output device id:', device.id, 'Device label:', device.label);
});
console.info('All video input devices:');
result.videoInputDevices.forEach(function(device) {
console.log('Video input device id:', device.id, 'Device label:', device.label);
});
}, function(error) {
alert(error);
});
It turns out that Chrome does support MediaStreamTrack API which allows you to do this. In Firefox this API is still experimental. Here is the Chrome implementation:
if (typeof MediaStreamTrack === 'undefined'){
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources( onSourcesAcquired);
}
function onSourcesAcquired(sources) {
for (var i = 0; i != sources.length; ++i) {
var source = sources[i];
// source.id -> DEVICE ID
// source.label -> DEVICE NAME
// source.kind = "audio" OR "video"
// TODO: add this to some datastructure of yours or a selection dialog
}
}
....
And then when calling getUserMedia, specify the id in the constraints:
var constraints = {
audio: {
optional: [{sourceId: selected_audio_source_id}]
},
video: {
optional: [{sourceId: selected_video_source_id}]
}
};
navigator.getUserMedia(constraints, onSuccessCallback, onErrorCallback);
It sounds to me you are looking for facingMode. You can check it out in this document:
http://www.w3.org/TR/2013/WD-mediacapture-streams-20130516/#idl-def-AllVideoCapabilities
Not sure how well it is supported yet though.

Categories