I am trying to set the getusermedia video constraints like setting min/max frame-rates and resolutions etc... in my peer.js webrtc application which is a simple peer to peer chat application. I have being trying to integrate it into my application but it seems to break it.Any help would be greatly appreciated other online tutorials look different to my app set up. Down at function 1 is where I have been trying to set the constraints it just doesn't show the video anymore. Is this the correct place?
Also will these constraints work on a video-file playing instead of the webcam?. I am using the Google chrome flags that plays a video file instead of a camera.
navigator.getWebcam = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// PeerJS object ** FOR PRODUCTION, GET YOUR OWN KEY at http://peerjs.com/peerserver **
var peer = new Peer({
key: 'XXXXXXXXXXXXXXXX',
debug: 3,
config: {
'iceServers': [{
url: 'stun:stun.l.google.com:19302'
}, {
url: 'stun:stun1.l.google.com:19302'
}, {
url: 'turn:numb.viagenie.ca',
username: "XXXXXXXXXXXXXXXXXXXXXXXXX",
credential: "XXXXXXXXXXXXXXXXX"
}]
}
});
// On open, set the peer id so when peer is on we display our peer id as text
peer.on('open', function(){
$('#my-id').text(peer.id);
});
peer.on('call', function(call) {
// Answer automatically for demo
call.answer(window.localStream);
step3(call);
});
// Click handlers setup
$(function() {
$('#make-call').click(function() {
//Initiate a call!
var call = peer.call($('#callto-id').val(), window.localStream);
step3(call);
});
$('end-call').click(function() {
window.existingCall.close();
step2();
});
// Retry if getUserMedia fails
$('#step1-retry').click(function() {
$('#step1-error').hide();
step();
});
// Get things started
step1();
});
function step1() {
//Get audio/video stream
navigator.getWebcam({audio: true, video: true}, function(stream){
// Display the video stream in the video object
$('#my-video').prop('src', URL.createObjectURL(stream));
// Displays error
window.localStream = stream;
step2();
}, function(){ $('#step1-error').show(); });
}
function step2() { //Adjust the UI
$('#step1', '#step3').hide();
$('#step2').show();
}
function step3(call) {
// Hang up on an existing call if present
if (window.existingCall) {
window.existingCall.close();
}
// Wait for stream on the call, then setup peer video
call.on('stream', function(stream) {
$('#their-video').prop('src', URL.createObjectURL(stream));
});
$('#step1', '#step2').hide();
$('#step3').show();
}
Your JavaScript looks invalid. You can't declare a var inside a function argument list. Did you paste wrong? Try:
var constraints = {
audio: false,
video: { mandatory: { minWidth: 1280, minHeight: 720 } }
};
navigator.getWebcam(constraints, function(stream){ etc. }
Now it's valid JavaScript at least. I'm not familiar with PeerJS, but the constraints you're using look like the Chrome ones, so if you're on Chrome then hopefully they'll work, unless PeerJS does it differently for some reason.
Your subject says "WebRTC Camera constraints" so I should mention that the Chrome constraints are non-standard. See this answer for an explanation.
Related
When sending a message using WebRTC sendDirectlyToAll, the message is never recieved the first time, but every time after that.
I've stripped the code down to a very simple state now, but it's still the same. Anyone got a clue about why this is happening?
Here is the code:
var webrtc = new SimpleWebRTC({
localVideoEl: 'localVideo',
remoteVideosEl: 'remoteVideos',
autoRequestMedia: false,
media: {
video: true,
audio: false
},
localVideo: {
autoplay: true,
mirror: true,
muted: true
}
});
$("#chat-send-button").on("click", function (e) {
sendMessage();
});
function sendMessage() {
console.log("sendMessage");
const chatMessage = $("#chat-message-input");
webrtc.sendDirectlyToAll(
"chat",
"info", {
"chatmessage": chatMessage.val()
}
)
chatMessage.val("");
}
webrtc.on("channelMessage", function (peer, channel, data) {
console.log(peer);
console.log(channel);
console.log("data", data);
$("#chat-message-container").text(data.payload.chatmessage);
});
You probably need for the WebRTC connection to be established before allowing the user to send a message - do you make use of the readyToCall event described in the documentation https://github.com/SimpleWebRTC/SimpleWebRTC#3-tell-it-to-join-a-room-when-ready
(a link to an editable runable code snippet might help)
I have downloaded this library into my project and put it into "lib" folder in my project.
Then I add it into the cotroller of my view, when I want to call it when clicking the button, as described in the documentation
sap.ui.define([
"sap/ui/core/mvc/Controller",
"Test_ScreenRecordingTest_ScreenRecording/lib/RecordRTC"
], function(Controller, RecordRTC) {
"use strict";
return Controller.extend("Test_ScreenRecordingTest_ScreenRecording.controller.View1", {
onStartRecording: function(){
debugger;
var mediaConstraints = { video: true, audio: true };
navigator.mediaDevices.getUserMedia(mediaConstraints).then(this.successCallback.bind(this)).catch(this.errorCallback);
},
successCallback: function(stream) {
// RecordRTC usage goes here
var options = {
mimeType: 'video/webm', // or video/webm\;codecs=h264 or video/webm\;codecs=vp9
audioBitsPerSecond: 128000,
videoBitsPerSecond: 128000,
bitsPerSecond: 128000 // if this line is provided, skip above two
};
//jQuery.sap.require("Test_ScreenRecordingTest_ScreenRecording.lib.RecordRTC");
this.recordRTC = RecordRTC(stream, options);
this.recordRTC.startRecording();
},
errorCallback: function(error) {
console.log(error)
debugger;
},
onStopRecording: function(){
this.recordRTC.stopRecording(function (audioVideoWebMURL) {
video.src = audioVideoWebMURL;
var recordedBlob = this.recordRTC.getBlob();
debugger;
this.recordRTC.getDataURL(function(dataURL) {
debugger;
});
});
}
});
If I don't use the RecordRTC variable, I can see it in the debugger. If I use it, it appears as "undefined". So can never call it.
Could you please help??Ç
EDIT 09-feb-2018: Solved declaring a new variable in the Controller extension
return Controller.extend("Test_ScreenRecordingTest_ScreenRecording.controller.View1", {
//this line solved the issue
RecordRTC: RecordRTC,
onStartRecording: function(){
debugger;
var mediaConstraints = { video: true, audio: true };
navigator.mediaDevices.getUserMedia(mediaConstraints).then(this.successCallback.bind(this)).catch(this.errorCallback);
},
Thank you in advance
The dependency string in your code looks strange:
"Test_ScreenRecordingTest_ScreenRecording/lib/RecordRTC".
Can it be a typo?
Anyway, the dependency path should be like this: "<app ID from manifest.json>/lib/RecordRTC".
I've come across the SimpleWebRTC package. Trying to get it to work, but can't seem to get the remote stream coming through. I'm also using Pusher for signalling, rather than the default that comes with SimpleWebRTC.
I've set up my own connection:
var myConnection = {
pusher: new Pusher('mypusherkey', { cluster: 'ap1' } ),
channel: null,
on: function (event, callback) {
this.pusher.bind (event, callback);
},
emit: function () {
if (arguments.length == 1) {
if (arguments[0] === "join") {
this.channel = this.pusher.subscribe(arguments[1]);
}
}
else
this.channel.trigger(arguments);
},
getSessionId: function() {
return this.pusher.connection.socket_id;
},
disconnect: function() {
this.pusher.disconnect();
}
};
Then I have the SimpleWebRTC initialisation:
var webrtc = new SimpleWebRTC({
// the id/element dom element that will hold "our" video
localVideoEl: 'localVideo',
// the id/element dom element that will hold remote videos
remoteVideosEl: 'remotesVideos',
// immediately ask for camera access
autoRequestMedia: true,
debug: true,
connection: myConnection
});
// we have to wait until it's ready
webrtc.on('readyToCall', function () {
console.log('ready to join');
// you can name it anything
webrtc.joinRoom('test-video-chat');
});
Doing a simple test between 2 PCs, it's not setting up the remote stream. In the dev console apart from the intitial event hook ups, I'm not seeing any other activity happening, especially SimpleWebRTC "readyToCall" not firing.
you probably need to emit a 'connect' signal from your socket adapter to trigger this code
So I'm trying to capture web audio from a tab and pass it into another script that works with DOM elements on the page.
EXTENSION SCRIPT
In the background.js, I use the following script:
chrome.tabCapture.capture(constraints, function(stream) {
console.log("\ngot stream");
console.log(stream);
chrome.tabs.sendMessage(tabID, {
"message": "stream",
"stream": stream
});
});
The Developer Toolkit shows me that the created object is indeed a MediaStream object. (Which I want and appears to be working fine).
EXTENSION CONSOLE:
MediaStream {onremovetrack: null, onaddtrack: null, onended: null, ended: false, id: "c0jm4lYJus3XCwQgesUGT9lpyPQiWlGKHb7q"…}
CONTENT SCRIPT
I use a content script (injected), on the page itself to then pull the JSON serialized object back out:
chrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {
if (request.message === "stream") {
var thisStream = request.stream;
console.log(thisStream);
if (!thisStream) {
console.log("stream is null");
return;
}
loadStream(thisStream);
}
else if (request.message === "statusChanged") {
console.log("statusChanged");
}
});
PAGE CONSOLE
Unfortunately, because of JSON serialization, the object type is lost:
Object {onremovetrack: null, onaddtrack: null, onended: null, ended: false, id: "c0jm4lYJus3XCwQgesUGT9lpyPQiWlGKHb7q"…}
I need the object recast as a MediaStream object and have tried the following things which all failed:
Attempt 1: FAILED
var stream = new webkitMediaStream;
function loadStream(thisStream) {
stream = thisStream;
}
Attempt 2: FAILED
var stream;
function loadStream(thisStream) {
stream = new webkitMediaStream(thisStream);
}
Attempt 3: FAILED
var stream;
function loadStream(thisStream) {
stream = Object.create(webkitMediaStream, thisStream);
}
NOTE:
The constructor for the MediaStream object IS webkitMediaStream.
I need either a better method for passing the object from the extension script (the only place the chrome.tab.capture() method works from) to the content script (the only place that has access to and can modify the DOM elements of the page),
OR
I need a way of recasting the JSON serialized object back into a fully functional MediaStream object.
Thanks in advance!
JRad the Bad
Extension messages are always JSON-serialized, so it's indeed obvious that you cannot send a MediaStream from the background page to the web page. The question is, do you really need to send the MediaStream from the background to the content script?
If you only need to, e.g. display the video, then you can use URL.createObjectURL to get a blob:-URL for the stream and assign it to video.src to see a video. The URL created by URL.createObjectURL can only be used by a page at the same origin, so you need to create the <video> tag in a chrome-extension:// page; either in a tab, or in a frame. If you want to do this in a frame, make sure that the page is listed in web_accessible_resources.
If you DO really need a MediaStream object of the tab in the tab, then RTCPeerConnection can be used to send the stream. This WebRTC API is normally used to exchange media streams between peers in a network, but it can also be used to send streams from one page to another page in another tab or browser.
Here's a full example. Visit any web page, and click on the extension button. Then the extension will insert a video in the page showing the current tab.
background.js
function sendStreamToTab(tabId, stream) {
var pc = new webkitRTCPeerConnection({iceServers:[]});
pc.addStream(stream);
pc.createOffer(function(offer) {
pc.setLocalDescription(offer, function() {
// Use chrome.tabs.connect instead of sendMessage
// to make sure that the lifetime of the stream
// is tied to the lifetime of the consumer (tab).
var port = chrome.tabs.connect(tabId, {name: 'tabCaptureSDP'});
port.onDisconnect.addListener(function() {
stopStream(stream);
});
port.onMessage.addListener(function(sdp) {
pc.setRemoteDescription(new RTCSessionDescription(sdp));
});
port.postMessage(pc.localDescription);
});
});
}
function stopStream(stream) {
var tracks = this.getTracks();
for (var i = 0; i < tracks.length; ++i) {
tracks[i].stop();
}
}
function captureTab(tabId) {
// Note: this method must be invoked by the user as defined
// in https://crbug.com/489258, e.g. chrome.browserAction.onClicked.
chrome.tabCapture.capture({
audio: true,
video: true,
audioConstraints: {
mandatory: {
chromeMediaSource: 'tab',
},
},
videoConstraints: {
mandatory: {
chromeMediaSource: 'tab',
},
},
}, function(stream) {
if (!stream) {
alert('Stream creation failed: ' + chrome.runtime.lastError.message);
}
chrome.tabs.executeScript(tabId, {file: 'contentscript.js'}, function() {
if (chrome.runtime.lastError) {
stopStream(stream);
alert('Script injection failed:' + chrome.runtime.lastError.message);
} else {
sendStreamToTab(tabId, stream);
}
});
});
}
chrome.browserAction.onClicked.addListener(function(tab) {
captureTab(tab.id);
});
contentscript.js
function onReceiveStream(stream) {
// Just to show that we can receive streams:
var video = document.createElement('video');
video.style.border = '1px solid black';
video.src = URL.createObjectURL(stream);
document.body.insertBefore(video, document.body.firstChild);
}
function onReceiveOfferSDP(sdp, sendResponse) {
var pc = new webkitRTCPeerConnection({iceServers:[]});
pc.onaddstream = function(event) {
onReceiveStream(event.stream);
};
pc.setRemoteDescription(new RTCSessionDescription(sdp), function() {
pc.createAnswer(function(answer) {
pc.setLocalDescription(answer);
sendResponse(pc.localDescription);
});
});
}
// Run once to prevent the message from being handled twice when
// executeScript is called multiple times.
if (!window.hasRun) {
window.hasRun = 1;
chrome.runtime.onConnect.addListener(function(port) {
if (port.name === 'tabCaptureSDP') {
port.onMessage.addListener(function(remoteDescription) {
onReceiveOfferSDP(remoteDescription, function(sdp) {
port.postMessage(sdp);
});
});
}
});
}
manifest.json
{
"name": "tabCapture to tab",
"version": "1",
"manifest_version": 2,
"background": {
"scripts": ["background.js"],
"persistent": false
},
"browser_action": {
"default_title": "Capture tab"
},
"permissions": [
"activeTab",
"tabCapture"
]
}
I found very good project on Github, but I can not understand all of it.
I installed a signaling server (socket.io) and a turn server. I'm trying to make an app for IOS and I'm using code like:
<video height="300" id="localVideo"></video>
<video id="remotesVideos"></video>
<script type="text/javascript">
document.addEventListener("deviceready", onDeviceReady, false);
function onDeviceReady() {
var phonertc = cordova.require('com.dooble.phonertc.PhoneRTC');
var socket = io('http://mysait.com:3000');
socket.on("connect", function() {
socket.emit("join", "myroom");
socket.on("message", function(message) {
console.log("GOT MESSAGE:");
message.payload.sdp = message.payload.sdp.replace(/(\r\n|\n|\r)/gm,"");
// when a message is received from the signaling server,
// notify the PhoneRTC plugin.
phonertc.receiveMessage(message.payload);
});
});
socket.on('connect',function() {
alert ('is connect!');
});
phonertc.call({
isInitator: true, // Caller or callee?
turn: {
host: 'turn:mysait.com:3478',
username: 'test',
password: 'test'
},
sendMessageCallback: function (data) {
// PhoneRTC wants to send a message to your target, use
// your signaling server here to send the message.
console.log(data);
socket.emit("message", data);
},
answerCallback: function () {
alert('Callee answered!');
},
disconnectCallback: function () {
alert('Call disconnected!');
},
video: { // Remove this property if you don't want video chat
localVideo: document.getElementById('localVideo'),
remoteVideo: document.getElementById('remoteVideo')
}
});
}
</script>
I have an alert when connection is made to server, but I don't see local and remote video. Can someone suggest what could be wrong? Can you send me example of client-side, I can't find it here.
It looks like this is a very old version of the demo app. The correct link is:
https://github.com/alongubkin/phonertc/tree/master/demo