I've come across the SimpleWebRTC package. Trying to get it to work, but can't seem to get the remote stream coming through. I'm also using Pusher for signalling, rather than the default that comes with SimpleWebRTC.
I've set up my own connection:
var myConnection = {
pusher: new Pusher('mypusherkey', { cluster: 'ap1' } ),
channel: null,
on: function (event, callback) {
this.pusher.bind (event, callback);
},
emit: function () {
if (arguments.length == 1) {
if (arguments[0] === "join") {
this.channel = this.pusher.subscribe(arguments[1]);
}
}
else
this.channel.trigger(arguments);
},
getSessionId: function() {
return this.pusher.connection.socket_id;
},
disconnect: function() {
this.pusher.disconnect();
}
};
Then I have the SimpleWebRTC initialisation:
var webrtc = new SimpleWebRTC({
// the id/element dom element that will hold "our" video
localVideoEl: 'localVideo',
// the id/element dom element that will hold remote videos
remoteVideosEl: 'remotesVideos',
// immediately ask for camera access
autoRequestMedia: true,
debug: true,
connection: myConnection
});
// we have to wait until it's ready
webrtc.on('readyToCall', function () {
console.log('ready to join');
// you can name it anything
webrtc.joinRoom('test-video-chat');
});
Doing a simple test between 2 PCs, it's not setting up the remote stream. In the dev console apart from the intitial event hook ups, I'm not seeing any other activity happening, especially SimpleWebRTC "readyToCall" not firing.
you probably need to emit a 'connect' signal from your socket adapter to trigger this code
Related
So, basically I'm trying to receive a call from provider to my app. For that purpose Quickblox gives us a listener to receive the upcoming calls onCallListener. So here is my code snippet that should work but doesn't.
const calleesIds = [4104]
const sessionType = QB.webrtc.CallType.VIDEO
const additionalOptions = {}
let callSession = QB.webrtc.createNewSession(calleesIds, sessionType, null, additionalOptions)
console.log(callSession, "SESSION")
const mediaParams = {
audio: true,
video: true,
options: {
muted: true,
mirror: true,
},
elemId: "myVideoStream"
}
QB.webrtc.onCallListener = function(session: any, extension: object) {
callSession = session
console.log('asdasd')
// if you are going to take a call
session.getUserMedia(mediaParams, function (error: object, stream: object) {
if (error) {
console.error(error)
} else {
session.accept(extension)
session.attachMediaStream("videoStream", stream)
}
})
}
P.S. I also integrated chat which works perfect!
Found the solution by myself! Whenever you create a user and dialog id, search that user in the quickblox dashboard by the dialogId and change its settings: you will see that userId and providerId is the same which is wrong. So put your userId in the userId field and save that. After that you video calling listeners will work fine!)
P. S. also in the backend replace provider token with user token.
When sending a message using WebRTC sendDirectlyToAll, the message is never recieved the first time, but every time after that.
I've stripped the code down to a very simple state now, but it's still the same. Anyone got a clue about why this is happening?
Here is the code:
var webrtc = new SimpleWebRTC({
localVideoEl: 'localVideo',
remoteVideosEl: 'remoteVideos',
autoRequestMedia: false,
media: {
video: true,
audio: false
},
localVideo: {
autoplay: true,
mirror: true,
muted: true
}
});
$("#chat-send-button").on("click", function (e) {
sendMessage();
});
function sendMessage() {
console.log("sendMessage");
const chatMessage = $("#chat-message-input");
webrtc.sendDirectlyToAll(
"chat",
"info", {
"chatmessage": chatMessage.val()
}
)
chatMessage.val("");
}
webrtc.on("channelMessage", function (peer, channel, data) {
console.log(peer);
console.log(channel);
console.log("data", data);
$("#chat-message-container").text(data.payload.chatmessage);
});
You probably need for the WebRTC connection to be established before allowing the user to send a message - do you make use of the readyToCall event described in the documentation https://github.com/SimpleWebRTC/SimpleWebRTC#3-tell-it-to-join-a-room-when-ready
(a link to an editable runable code snippet might help)
Im using ember with socket.io and I want a computed property that changes to if the socket io connection is connected or disconnected.
I am using ember-websockets and here is what I have tried:
socketIOService: service('socket-io'),
socketRoute: 'http://localhost:8080/',
connected: computed('socketIOService',
function()
{
console.log('changed!');
//return (this.get('socketIOService').socketFor(this.get('socketRoute').socket.connected));
}),
startConnection()
{
this.get('connected');
const socket = this.socketIOService.socketFor(this.get('socketRoute'));
socket.on('initialised', this.initialised, this);
},
So this doesnt work because im guessing the service doesnt change. I would like to be able to computer a value from the following...
this.socketIOService.socketFor(this.get('socketRoute'));
But I cant get the sockerFor property in a computed property.
Looking at readme, I think you can use 'open' and 'close' events, w/co computed properties:
startConnection()
{
const socket = this.socketIOService.socketFor(this.get('socketRoute'));
socket.on('open', () => { this.set('connected', true); });
socket.on('close', () => { this.set('connected', false); });
}
So I'm trying to capture web audio from a tab and pass it into another script that works with DOM elements on the page.
EXTENSION SCRIPT
In the background.js, I use the following script:
chrome.tabCapture.capture(constraints, function(stream) {
console.log("\ngot stream");
console.log(stream);
chrome.tabs.sendMessage(tabID, {
"message": "stream",
"stream": stream
});
});
The Developer Toolkit shows me that the created object is indeed a MediaStream object. (Which I want and appears to be working fine).
EXTENSION CONSOLE:
MediaStream {onremovetrack: null, onaddtrack: null, onended: null, ended: false, id: "c0jm4lYJus3XCwQgesUGT9lpyPQiWlGKHb7q"…}
CONTENT SCRIPT
I use a content script (injected), on the page itself to then pull the JSON serialized object back out:
chrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {
if (request.message === "stream") {
var thisStream = request.stream;
console.log(thisStream);
if (!thisStream) {
console.log("stream is null");
return;
}
loadStream(thisStream);
}
else if (request.message === "statusChanged") {
console.log("statusChanged");
}
});
PAGE CONSOLE
Unfortunately, because of JSON serialization, the object type is lost:
Object {onremovetrack: null, onaddtrack: null, onended: null, ended: false, id: "c0jm4lYJus3XCwQgesUGT9lpyPQiWlGKHb7q"…}
I need the object recast as a MediaStream object and have tried the following things which all failed:
Attempt 1: FAILED
var stream = new webkitMediaStream;
function loadStream(thisStream) {
stream = thisStream;
}
Attempt 2: FAILED
var stream;
function loadStream(thisStream) {
stream = new webkitMediaStream(thisStream);
}
Attempt 3: FAILED
var stream;
function loadStream(thisStream) {
stream = Object.create(webkitMediaStream, thisStream);
}
NOTE:
The constructor for the MediaStream object IS webkitMediaStream.
I need either a better method for passing the object from the extension script (the only place the chrome.tab.capture() method works from) to the content script (the only place that has access to and can modify the DOM elements of the page),
OR
I need a way of recasting the JSON serialized object back into a fully functional MediaStream object.
Thanks in advance!
JRad the Bad
Extension messages are always JSON-serialized, so it's indeed obvious that you cannot send a MediaStream from the background page to the web page. The question is, do you really need to send the MediaStream from the background to the content script?
If you only need to, e.g. display the video, then you can use URL.createObjectURL to get a blob:-URL for the stream and assign it to video.src to see a video. The URL created by URL.createObjectURL can only be used by a page at the same origin, so you need to create the <video> tag in a chrome-extension:// page; either in a tab, or in a frame. If you want to do this in a frame, make sure that the page is listed in web_accessible_resources.
If you DO really need a MediaStream object of the tab in the tab, then RTCPeerConnection can be used to send the stream. This WebRTC API is normally used to exchange media streams between peers in a network, but it can also be used to send streams from one page to another page in another tab or browser.
Here's a full example. Visit any web page, and click on the extension button. Then the extension will insert a video in the page showing the current tab.
background.js
function sendStreamToTab(tabId, stream) {
var pc = new webkitRTCPeerConnection({iceServers:[]});
pc.addStream(stream);
pc.createOffer(function(offer) {
pc.setLocalDescription(offer, function() {
// Use chrome.tabs.connect instead of sendMessage
// to make sure that the lifetime of the stream
// is tied to the lifetime of the consumer (tab).
var port = chrome.tabs.connect(tabId, {name: 'tabCaptureSDP'});
port.onDisconnect.addListener(function() {
stopStream(stream);
});
port.onMessage.addListener(function(sdp) {
pc.setRemoteDescription(new RTCSessionDescription(sdp));
});
port.postMessage(pc.localDescription);
});
});
}
function stopStream(stream) {
var tracks = this.getTracks();
for (var i = 0; i < tracks.length; ++i) {
tracks[i].stop();
}
}
function captureTab(tabId) {
// Note: this method must be invoked by the user as defined
// in https://crbug.com/489258, e.g. chrome.browserAction.onClicked.
chrome.tabCapture.capture({
audio: true,
video: true,
audioConstraints: {
mandatory: {
chromeMediaSource: 'tab',
},
},
videoConstraints: {
mandatory: {
chromeMediaSource: 'tab',
},
},
}, function(stream) {
if (!stream) {
alert('Stream creation failed: ' + chrome.runtime.lastError.message);
}
chrome.tabs.executeScript(tabId, {file: 'contentscript.js'}, function() {
if (chrome.runtime.lastError) {
stopStream(stream);
alert('Script injection failed:' + chrome.runtime.lastError.message);
} else {
sendStreamToTab(tabId, stream);
}
});
});
}
chrome.browserAction.onClicked.addListener(function(tab) {
captureTab(tab.id);
});
contentscript.js
function onReceiveStream(stream) {
// Just to show that we can receive streams:
var video = document.createElement('video');
video.style.border = '1px solid black';
video.src = URL.createObjectURL(stream);
document.body.insertBefore(video, document.body.firstChild);
}
function onReceiveOfferSDP(sdp, sendResponse) {
var pc = new webkitRTCPeerConnection({iceServers:[]});
pc.onaddstream = function(event) {
onReceiveStream(event.stream);
};
pc.setRemoteDescription(new RTCSessionDescription(sdp), function() {
pc.createAnswer(function(answer) {
pc.setLocalDescription(answer);
sendResponse(pc.localDescription);
});
});
}
// Run once to prevent the message from being handled twice when
// executeScript is called multiple times.
if (!window.hasRun) {
window.hasRun = 1;
chrome.runtime.onConnect.addListener(function(port) {
if (port.name === 'tabCaptureSDP') {
port.onMessage.addListener(function(remoteDescription) {
onReceiveOfferSDP(remoteDescription, function(sdp) {
port.postMessage(sdp);
});
});
}
});
}
manifest.json
{
"name": "tabCapture to tab",
"version": "1",
"manifest_version": 2,
"background": {
"scripts": ["background.js"],
"persistent": false
},
"browser_action": {
"default_title": "Capture tab"
},
"permissions": [
"activeTab",
"tabCapture"
]
}
I am trying to set the getusermedia video constraints like setting min/max frame-rates and resolutions etc... in my peer.js webrtc application which is a simple peer to peer chat application. I have being trying to integrate it into my application but it seems to break it.Any help would be greatly appreciated other online tutorials look different to my app set up. Down at function 1 is where I have been trying to set the constraints it just doesn't show the video anymore. Is this the correct place?
Also will these constraints work on a video-file playing instead of the webcam?. I am using the Google chrome flags that plays a video file instead of a camera.
navigator.getWebcam = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// PeerJS object ** FOR PRODUCTION, GET YOUR OWN KEY at http://peerjs.com/peerserver **
var peer = new Peer({
key: 'XXXXXXXXXXXXXXXX',
debug: 3,
config: {
'iceServers': [{
url: 'stun:stun.l.google.com:19302'
}, {
url: 'stun:stun1.l.google.com:19302'
}, {
url: 'turn:numb.viagenie.ca',
username: "XXXXXXXXXXXXXXXXXXXXXXXXX",
credential: "XXXXXXXXXXXXXXXXX"
}]
}
});
// On open, set the peer id so when peer is on we display our peer id as text
peer.on('open', function(){
$('#my-id').text(peer.id);
});
peer.on('call', function(call) {
// Answer automatically for demo
call.answer(window.localStream);
step3(call);
});
// Click handlers setup
$(function() {
$('#make-call').click(function() {
//Initiate a call!
var call = peer.call($('#callto-id').val(), window.localStream);
step3(call);
});
$('end-call').click(function() {
window.existingCall.close();
step2();
});
// Retry if getUserMedia fails
$('#step1-retry').click(function() {
$('#step1-error').hide();
step();
});
// Get things started
step1();
});
function step1() {
//Get audio/video stream
navigator.getWebcam({audio: true, video: true}, function(stream){
// Display the video stream in the video object
$('#my-video').prop('src', URL.createObjectURL(stream));
// Displays error
window.localStream = stream;
step2();
}, function(){ $('#step1-error').show(); });
}
function step2() { //Adjust the UI
$('#step1', '#step3').hide();
$('#step2').show();
}
function step3(call) {
// Hang up on an existing call if present
if (window.existingCall) {
window.existingCall.close();
}
// Wait for stream on the call, then setup peer video
call.on('stream', function(stream) {
$('#their-video').prop('src', URL.createObjectURL(stream));
});
$('#step1', '#step2').hide();
$('#step3').show();
}
Your JavaScript looks invalid. You can't declare a var inside a function argument list. Did you paste wrong? Try:
var constraints = {
audio: false,
video: { mandatory: { minWidth: 1280, minHeight: 720 } }
};
navigator.getWebcam(constraints, function(stream){ etc. }
Now it's valid JavaScript at least. I'm not familiar with PeerJS, but the constraints you're using look like the Chrome ones, so if you're on Chrome then hopefully they'll work, unless PeerJS does it differently for some reason.
Your subject says "WebRTC Camera constraints" so I should mention that the Chrome constraints are non-standard. See this answer for an explanation.