Can't request audio/video IE11 - javascript

I have a problem based on a code that I used to request audio/video , it works on chrome , mozilla firefox however in IE11 It doesn't work.
I've already read this one:
https://github.com/addyosmani/getUserMedia.js
and applied most of the changes to make it work on IE11 , however I don't get the results, what is wrong in my code that is not allowing me to get the video on that browser.
function initiateMedia(obj){
constraint="";
if(obj.video=="true" && obj.audio=="true"){
constraint={
audio:true,
video:{
width:{max:380},
height:{max:260}
}
}
//Set Video Element
$("#mediacontainer").html('<video id ="io-videocam" muted="muted"></video>');
mediactrl = document.querySelector('video');
mediaType="video/webm";
return "Audio and Video input found! You can now start recording!";
}else if(obj.video=="null" && obj.audio=="true"){ //audio input only
constraint={audio:true, video: false}
//Set Audio Element
$("#mediacontainer").html('<audio id ="io-audio" controls muted="muted"></audio>');
mediactrl = document.querySelector('audio');
mediaType="audio/mpeg";
return ("Audio input found! You can now start recording!");
}else if(obj.video=="null" && obj.audio=="null"){ //no audio or video input
console.log("No Audio or Video input found");
return false;
}
}
function listUserMedia(){
if (!navigator.mediaDevices || !navigator.mediaDevices.enumerateDevices) {
console.log("enumerateDevices() not supported.");
return;
}
var media = navigator.mediaDevices.enumerateDevices();
media.then(function(devices) {
var useVideo=null;
var useAudio=null;
devices.forEach(function(device) {
//console.log(device.kind + ": " + device.label + " id = " + device.deviceId);
if(device.kind=="audioinput"){
useAudio="true";
}
if(device.kind=="videoinput"){
useVideo="true";
}
});
if(typeof(Storage) !== "undefined") {
sessionStorage.setItem('useAudio', useAudio);
sessionStorage.setItem('useVideo', useVideo);
} else {
console.log("Unable to set SessionStorage");
}
});
media.catch(function(err) {
console.log(err.name + ": " + err.message);
});
if(typeof(Storage) !== "undefined") {
return {"audio":sessionStorage.getItem('useAudio'),"video":sessionStorage.getItem('useVideo')};
} else {
console.log("Unable to get SessionStorage");
}
return null;
}
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
function requestMedia(string){
if(typeof MediaRecorder === 'undefined' || !navigator.getUserMedia){
alert('MediaRecorder/getUserMedia no supported on your browser, kindly use Firefox 36+');
}else{
//initiateMedia(listUserMedia());
//navigator.getUserMedia(constraint, startRecording, errorCallback);
var media = navigator.mediaDevices.getUserMedia(constraint);
media.then(function(mediaStream){
stream=mediaStream;
var url = window.URL || window.webkitURL;
if(mediaType!=null){
if(mediaType=="video/webm"){
mediactrl.src = url ? url.createObjectURL(stream) : stream;
mediactrl.onloadedmetadata = function(e) {
mediactrl.play();
};
}else{
mediactrl.src = url ? url.createObjectURL(stream) : stream;
mediactrl.onloadedmetadata = function(e) {
mediactrl.play();
}
}
$("#requestmedia").addClass("btndisabled faded-effect");
$("#videocontrol").removeClass("btndisabled").prop("disabled",false);
$("#mediaerror").html(string).css({"display":"block","color":"#003666"});
}
});
media.catch(function(err){
errorCallback(err);
});
}
}
function startRecording(stream){
/*
if (typeof MediaRecorder.isTypeSupported == 'function'){
if (MediaRecorder.isTypeSupported('video/webm;codecs=vp9')) {
var options = {mimeType: 'video/webm;codecs=vp9'};
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
var options = {mimeType: 'video/webm;codecs=vp8'};
}
mediaRecorder = new MediaRecorder(stream, options);
}else{
mediaRecorder = new MediaRecorder(stream); //Default codec for browser
}
*/
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start(10);
mediaRecorder.ondataavailable = function(e){
chunks.push(e.data);
};
mediaRecorder.onerror = function(e){
console.log("Error: "+e)
};
mediaRecorder.onstop = function(){
var blob = new Blob(chunks, {type: mediaType});
chunks = [];
var mediaURL = (window.webkitURL || window.URL).createObjectURL(blob);
sendFileToServer(blob);
//console.log(mediaURL);
};
}
function errorCallback(error){
console.log('navigator.getUserMedia error: ', error);
}
function releaseDevice(){
if(stream && stream.stop){
//stream.stop();
if(mediaType=="video/webm"){
stream.getVideoTracks()[0].stop();
stream.getAudioTracks()[0].stop();
}else{
stream.getAudioTracks()[0].stop();
}
}
stream=null;
}

Related

Recording voice and convert speech to text at the same time

I want to use the Web Speech API for speech recognition and record the user's voice in Android Devices at the same time (I mean user holds a button, his/her voice is recorded and transcript to text at the same time .
This is working perfectly in Windows but with Android it just returns the error :
no-speech
Seems like defining the MediaRecorder blocks access of microphone for Web Speech API in Android!
How can I fix this?
If I remove this line which is responsible for recording, speech recognition works again:
new MediaRecorder(stream); // adding this line ruins the speech recognition
Here is the code in action:
In the given code I didn't remove this, in order to show that the code won't work on Android devices:
Note: this code should be tested with an Android device, it is working fine in desktop.
CodePen: https://codepen.io/pixy-dixy/pen/GRddgYL?editors=1010
Demo here in SO:
let audioChunks = [];
let rec;
let stopRecognize;
const output = document.getElementById('output');
async function Recognize() {
console.log('Recognize')
let recognitionAllowed = true;
stopRecognize = function() {
if(recognitionAllowed) {
recognition.stop();
recognitionAllowed = false;
}
}
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList;
var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent;
var recognition = new SpeechRecognition();
var speechRecognitionList = new SpeechGrammarList();
recognition.grammars = speechRecognitionList;
recognition.lang = 'en-GB';
recognition.continuous = false;
recognition.interimResults = true;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onresult = function(event) {
window.interim_transcript = '';
window.speechResult = '';
for(var i = event.resultIndex; i < event.results.length; ++i) {
if(event.results[i].isFinal) {
speechResult += event.results[i][0].transcript;
console.log(speechResult);
output.innerHTML = speechResult;
} else {
interim_transcript += event.results[i][0].transcript;
console.log(interim_transcript);
output.innerHTML = interim_transcript;
}
}
}
recognition.onerror = function(event) {
// restartRecognition();
console.log('recognition error: ' + event.error);
}
recognition.onend = async function(event) {
restartRecognition();
}
function restartRecognition() {
try { if(recognitionAllowed) recognition.start(); } catch(err) {}
}
}
const startRecognition = document.getElementById('start-recognition');
startRecognition.addEventListener('mousedown', handleRecognitionStart);
startRecognition.addEventListener('mouseup', handleRecognitionEnd);
startRecognition.addEventListener('touchstart', handleRecognitionStart);
startRecognition.addEventListener('touchend', handleRecognitionEnd);
function handleRecognitionStart(e) {
console.log('handleRecognitionStart', isTouchDevice)
const event = e.type;
if(isTouchDevice && event == 'touchstart') {
recognitionStart();
} else if(!isTouchDevice && event == 'mousedown') {
console.log('handleRecognitionStart')
recognitionStart();
}
}
const isTouchDevice = touchCheck();
function touchCheck() {
const maxTouchPoints = navigator.maxTouchPoints || navigator.msMaxTouchPoints;
return 'ontouchstart' in window || maxTouchPoints > 0 || window.matchMedia && matchMedia('(any-pointer: coarse)').matches;
}
function handleRecognitionEnd(e) {
const event = e.type;
console.log(':::', event == 'touchend');
if(isTouchDevice && event == 'touchend') {
recognitionEnd();
} else if(!isTouchDevice && event == 'mouseup') {
recognitionEnd();
}
}
function recognitionEnd() {
resetRecognition();
}
function recognitionStart() {
console.log('recognitionStart')
Recognize();
audioChunks = [];
voiceRecorder.start()
}
function resetRecognition() {
console.log('reset')
if(typeof stopRecognize == "function") stopRecognize();
// if(rec.state !== 'inactive') rec.stop();
voiceRecorder.stop()
}
const playAudio = document.getElementById('play');
playAudio.addEventListener('click', () => {
console.log('play');
voiceRecorder.play();
})
class VoiceRecorder {
constructor() {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
console.log("getUserMedia supported")
} else {
console.log("getUserMedia is not supported on your browser!")
}
this.mediaRecorder
this.stream
this.playerRef = document.querySelector("#player")
this.recorderRef = document.querySelector("#recorder")
this.chunks = []
this.isRecording = false
this.constraints = {
audio: true,
video: false
}
}
handleSuccess(stream) {
this.stream = stream
this.stream.oninactive = () => {
console.log("Stream ended!")
};
this.recorderRef.srcObject = this.stream
this.mediaRecorder = new MediaRecorder(this.stream)
console.log(this.mediaRecorder)
this.mediaRecorder.ondataavailable = this.onMediaRecorderDataAvailable.bind(this)
this.mediaRecorder.onstop = this.onMediaRecorderStop.bind(this)
this.recorderRef.play()
this.mediaRecorder.start()
}
handleError(error) {
console.log("navigator.getUserMedia error: ", error)
}
onMediaRecorderDataAvailable(e) { this.chunks.push(e.data) }
onMediaRecorderStop(e) {
const blob = new Blob(this.chunks, { 'type': 'audio/ogg; codecs=opus' })
const audioURL = window.URL.createObjectURL(blob)
this.playerRef.src = audioURL;
this.chunks = [];
this.stream.getAudioTracks().forEach(track => track.stop());
this.stream = null;
}
play() { this.playerRef.play(); }
start() {
console.log('start')
if(this.isRecording) return;
console.log('33')
this.isRecording = true;
this.playerRef.src = '';
navigator.mediaDevices
.getUserMedia(this.constraints)
.then(this.handleSuccess.bind(this))
.catch(this.handleError.bind(this))
}
stop() {
if(!this.isRecording) return;
this.isRecording = false;
this.recorderRef.pause();
this.mediaRecorder.stop();
}
}
voiceRecorder = new VoiceRecorder();
<button id="start-recognition">Hold This Button and Speak In Android This should output the text and record your voice at the s</button>
<button id="play">Play Recorded Audio</button>
<h1 id="output">Voice over here</h1>
<audio id="recorder" muted hidden></audio>
<audio id="player" hidden></audio>

Javascript WebRTC Failed to set remote answer sdp: Called in wrong state: kHaveRemoteOffer and Called in wrong state: kStable

I can't get my WebRTC code to work properly.. I did everything right I believe and it's still not working. There is something strange why ontrack gets called so early maybe it's suppose to be like that.
The website uses javascript code, the server code I didn't post but thats where WebSockets connect is just a exchanger, what you send to server it sends the same information back to the other partner (stranger) you are connected too.
Server code looks like this little sample
private void writeStranger(UserProfile you, String msg) {
UserProfile stranger = you.stranger;
if(stranger != null)
sendMessage(stranger.getWebSocket(), msg);
}
public void sendMessage(WebSocket websocket, String msg) {
try {
websocket.send(msg);
} catch ( WebsocketNotConnectedException e ) {
disconnnectClient(websocket);
}
}
//...
case "ice_candidate":
JSONObject candidatePackage = (JSONObject) packet.get(1);
JSONObject candidate = (JSONObject) candidatePackage.get("candidate");
obj = new JSONObject();
list = new JSONArray();
list.put("iceCandidate");
obj.put("candidate", candidate);
list.put(obj);
System.out.println("Sent = " + list.toString());
writeStranger(you, list.toString()); //send ice candidate to stranger
break;
case "send_answer":
JSONObject sendAnswerPackage = (JSONObject) packet.get(1);
JSONObject answer = (JSONObject) sendAnswerPackage.get("answer");
obj = new JSONObject();
list = new JSONArray();
list.put("getAnswer");
obj.put("answer", answer);
list.put(obj);
System.out.println("Sent = " + list.toString());
writeStranger(you, list.toString()); //send answer to stranger
break;
case "send_offer":
JSONObject offerPackage = (JSONObject) packet.get(1);
JSONObject offer = (JSONObject) offerPackage.get("offer");
obj = new JSONObject();
list = new JSONArray();
list.put("getOffer");
obj.put("offer", offer);
list.put(obj);
System.out.println("Sent = " + list.toString());
writeStranger(you, list.toString()); //send ice candidate to stranger
break;
Here are my outputs.
RAW Text: https://pastebin.com/raw/FL8g29gG
JSON colored: https://pastebin.com/FL8g29gG
My javascript Code below
var ws;
var peerConnection, localStream;
var rtc_server = {
iceServers: [
{urls: "stun:stun.l.google.com:19302"},
{urls: "stun:stun.services.mozilla.com"},
{urls: "stun:stun.stunprotocol.org:3478"},
{url: "stun:stun.l.google.com:19302"},
{url: "stun:stun.services.mozilla.com"},
{url: "stun:stun.stunprotocol.org:3478"},
]
}
//offer SDP's tells other peers what you would like
var rtc_media_constraints = {
mandatory: {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true
}
};
var rtc_peer_options = {
optional: [
{DtlsSrtpKeyAgreement: true}, //To make Chrome and Firefox to interoperate.
]
}
var PeerConnection = RTCPeerConnection || window.PeerConnection || window.webkitPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection;
var IceCandidate = RTCIceCandidate || window.mozRTCIceCandidate || window.RTCIceCandidate;
var SessionDescription = RTCSessionDescription || window.mozRTCSessionDescription || window.RTCSessionDescription;
var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
function hasSupportForVideoChat() {
return window.RTCPeerConnection && window.RTCIceCandidate && window.RTCSessionDescription && navigator.mediaDevices && navigator.mediaDevices.getUserMedia && (RTCPeerConnection.prototype.addStream || RTCPeerConnection.prototype.addTrack) ? true : false;
}
function loadMyCameraStream() {
if (getUserMedia) {
getUserMedia.call(navigator, { video: {facingMode: "user", aspectRatio: 4 / 3/*height: 272, width: 322*/}, audio: { echoCancellation : true } },
function(localMediaStream) {
//Add my video
$("div#videoBox video#you")[0].muted = true;
$("div#videoBox video#you")[0].autoplay = true;
$("div#videoBox video#you").attr('playsinline', '');
$("div#videoBox video#you").attr('webkit-playsinline', '');
$("div#videoBox video#you")[0].srcObject = localMediaStream;
localStream = localMediaStream;
},
function(e) {
addStatusMsg("Your Video has error : " + e);
}
);
} else {
addStatusMsg("Your browser does not support WebRTC (Camera/Voice chat).");
return;
}
}
function loadStrangerCameraStream() {
if(!hasSupportForVideoChat())
return;
peerConnection = new PeerConnection(rtc_server, rtc_peer_options);
if (peerConnection.addTrack !== undefined)
localStream.getTracks().forEach(track => peerConnection.addTrack(track, localStream));
else
peerConnection.addStream(localStream);
peerConnection.onicecandidate = function(e) {
if (!e || !e.candidate)
return;
ws.send(JSON.stringify(['ice_candidate', {"candidate": e.candidate}]));
};
if (peerConnection.addTrack !== undefined) {
//newer technology
peerConnection.ontrack = function(e) {
//e.streams.forEach(stream => doAddStream(stream));
addStatusMsg("ontrack called");
//Add stranger video
$("div#videoBox video#stranger").attr('playsinline', '');
$("div#videoBox video#stranger").attr('webkit-playsinline', '');
$('div#videoBox video#stranger')[0].srcObject = e.streams[0];
$("div#videoBox video#stranger")[0].autoplay = true;
};
} else {
//older technology
peerConnection.onaddstream = function(e) {
addStatusMsg("onaddstream called");
//Add stranger video
$("div#videoBox video#stranger").attr('playsinline', '');
$("div#videoBox video#stranger").attr('webkit-playsinline', '');
$('div#videoBox video#stranger')[0].srcObject = e.stream;
$("div#videoBox video#stranger")[0].autoplay = true;
};
}
peerConnection.createOffer(
function(offer) {
peerConnection.setLocalDescription(offer, function () {
//both offer and peerConnection.localDescription are the same.
addStatusMsg('createOffer, localDescription: ' + JSON.stringify(peerConnection.localDescription));
//addStatusMsg('createOffer, offer: ' + JSON.stringify(offer));
ws.send(JSON.stringify(['send_offer', {"offer": peerConnection.localDescription}]));
},
function(e) {
addStatusMsg('createOffer, set description error' + e);
});
},
function(e) {
addStatusMsg("createOffer error: " + e);
},
rtc_media_constraints
);
}
function closeStrangerCameraStream() {
$('div#videoBox video#stranger')[0].srcObject = null
if(peerConnection)
peerConnection.close();
}
function iceCandidate(candidate) {
//ICE = Interactive Connectivity Establishment
if(peerConnection)
peerConnection.addIceCandidate(new IceCandidate(candidate));
else
addStatusMsg("peerConnection not created error");
addStatusMsg("Peer Ice Candidate = " + JSON.stringify(candidate));
}
function getAnswer(answer) {
if(!hasSupportForVideoChat())
return;
if(peerConnection) {
peerConnection.setRemoteDescription(new SessionDescription(answer), function() {
console.log("get answer ok");
addStatusMsg("peerConnection, SessionDescription answer is ok");
},
function(e) {
addStatusMsg("peerConnection, SessionDescription fail error: " + e);
});
}
}
function getOffer(offer) {
if(!hasSupportForVideoChat())
return;
addStatusMsg("peerConnection, setRemoteDescription offer: " + JSON.stringify(offer));
if(peerConnection) {
peerConnection.setRemoteDescription(new SessionDescription(offer), function() {
peerConnection.createAnswer(
function(answer) {
peerConnection.setLocalDescription(answer);
addStatusMsg("create answer sent: " + JSON.stringify(answer));
ws.send(JSON.stringify(['send_answer', {"answer": answer}]));
},
function(e) {
addStatusMsg("peerConnection, setRemoteDescription create answer fail: " + e);
}
);
});
}
}
My website where I use it: https://www.camspark.com/
Fixed myself I figured out I had 2 problems with this code.
First problem was then createOffer() must only be sent by 1 person not both people.. you have to randomly pick which person which does the createOffer().
Second problem is the ICE Candidate's you have to create a queue/array for both sides, which holds all the incoming ice_candidates. Only do the peerConnection.addIceCandidate(new IceCandidate(candidate)); when the response to createOffer() is received and the setRemoteDescription from createOffer() response is set up.
Both getAnswer() and getOffer() use exactly same code, but one is received for 1 client while the other is received for the other client. Both need to flush the IceCandidates array when either of them is triggered.. Maybe if anyone wants you could combine both functions into 1 function as the code is the same.
Final working code looks like this
var ws;
var peerConnection, localStream;
//STUN = (Session Traversal Utilities for NAT)
var rtc_server = {
iceServers: [
{urls: "stun:stun.l.google.com:19302"},
{urls: "stun:stun.services.mozilla.com"},
{urls: "stun:stun.stunprotocol.org:3478"},
{url: "stun:stun.l.google.com:19302"},
{url: "stun:stun.services.mozilla.com"},
{url: "stun:stun.stunprotocol.org:3478"},
]
}
//offer SDP = [Session Description Protocol] tells other peers what you would like
var rtc_media_constraints = {
mandatory: {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true
}
};
var rtc_peer_options = {
optional: [
{DtlsSrtpKeyAgreement: true}, //To make Chrome and Firefox to interoperate.
]
}
var finishSDPVideoOffer = false;
var isOfferer = false;
var iceCandidates = [];
var PeerConnection = RTCPeerConnection || window.PeerConnection || window.webkitPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection;
var IceCandidate = RTCIceCandidate || window.mozRTCIceCandidate || window.RTCIceCandidate;
var SessionDescription = RTCSessionDescription || window.mozRTCSessionDescription || window.RTCSessionDescription;
var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
function hasSupportForVideoChat() {
return window.RTCPeerConnection && window.RTCIceCandidate && window.RTCSessionDescription && navigator.mediaDevices && navigator.mediaDevices.getUserMedia && (RTCPeerConnection.prototype.addStream || RTCPeerConnection.prototype.addTrack) ? true : false;
}
function loadMyCameraStream() {
if (getUserMedia) {
getUserMedia.call(navigator, { video: {facingMode: "user", aspectRatio: 4 / 3/*height: 272, width: 322*/}, audio: { echoCancellation : true } },
function(localMediaStream) {
//Add my video
$("div#videoBox video#you")[0].muted = true;
$("div#videoBox video#you")[0].autoplay = true;
$("div#videoBox video#you").attr('playsinline', '');
$("div#videoBox video#you").attr('webkit-playsinline', '');
$("div#videoBox video#you")[0].srcObject = localMediaStream;
localStream = localMediaStream;
},
function(e) {
addStatusMsg("Your Video has error : " + e);
}
);
} else {
addStatusMsg("Your browser does not support WebRTC (Camera/Voice chat).");
return;
}
}
function loadStrangerCameraStream(isOfferer_) {
if(!hasSupportForVideoChat())
return;
//Only add pending ICE Candidates when getOffer() is finished.
finishSDPVideoOfferOrAnswer = false;
iceCandidates = []; //clear ICE Candidates array.
isOfferer = isOfferer_;
peerConnection = new PeerConnection(rtc_server, rtc_peer_options);
if (peerConnection.addTrack !== undefined)
localStream.getTracks().forEach(track => peerConnection.addTrack(track, localStream));
else
peerConnection.addStream(localStream);
peerConnection.onicecandidate = function(e) {
if (!e || !e.candidate)
return;
ws.send(JSON.stringify(['ice_candidate', {"candidate": e.candidate}]));
};
if (peerConnection.addTrack !== undefined) {
//newer technology
peerConnection.ontrack = function(e) {
//e.streams.forEach(stream => doAddStream(stream));
addStatusMsg("ontrack called");
//Add stranger video
$("div#videoBox video#stranger").attr('playsinline', '');
$("div#videoBox video#stranger").attr('webkit-playsinline', '');
$('div#videoBox video#stranger')[0].srcObject = e.streams[0];
$("div#videoBox video#stranger")[0].autoplay = true;
};
} else {
//older technology
peerConnection.onaddstream = function(e) {
addStatusMsg("onaddstream called");
//Add stranger video
$("div#videoBox video#stranger").attr('playsinline', '');
$("div#videoBox video#stranger").attr('webkit-playsinline', '');
$('div#videoBox video#stranger')[0].srcObject = e.stream;
$("div#videoBox video#stranger")[0].autoplay = true;
};
}
if(isOfferer) {
peerConnection.createOffer(
function(offer) {
peerConnection.setLocalDescription(offer, function () {
//both offer and peerConnection.localDescription are the same.
addStatusMsg('createOffer, localDescription: ' + JSON.stringify(peerConnection.localDescription));
//addStatusMsg('createOffer, offer: ' + JSON.stringify(offer));
ws.send(JSON.stringify(['send_offer', {"offer": peerConnection.localDescription}]));
},
function(e) {
addStatusMsg('createOffer, set description error' + e);
});
},
function(e) {
addStatusMsg("createOffer error: " + e);
},
rtc_media_constraints
);
}
}
function closeStrangerCameraStream() {
$('div#videoBox video#stranger')[0].srcObject = null
if(peerConnection)
peerConnection.close();
}
function iceCandidate(candidate) {
//ICE = Interactive Connectivity Establishment
if(!finishSDPVideoOfferOrAnswer) {
iceCandidates.push(candidate);
addStatusMsg("Queued iceCandidate");
return;
}
if(!peerConnection) {
addStatusMsg("iceCandidate peerConnection not created error.");
return;
}
peerConnection.addIceCandidate(new IceCandidate(candidate));
addStatusMsg("Added on time, Peer Ice Candidate = " + JSON.stringify(candidate));
}
function getAnswer(answer) {
if(!hasSupportForVideoChat())
return;
if(!peerConnection) {
addStatusMsg("getAnswer peerConnection not created error.");
return;
}
peerConnection.setRemoteDescription(new SessionDescription(answer), function() {
addStatusMsg("getAnswer SessionDescription answer is ok");
finishSDPVideoOfferOrAnswer = true;
while (iceCandidates.length) {
var candidate = iceCandidates.shift();
try {
peerConnection.addIceCandidate(new IceCandidate(candidate));
addStatusMsg("Adding queued ICE Candidates");
} catch(e) {
addStatusMsg("Error adding queued ICE Candidates error:" + e);
}
}
iceCandidates = [];
},
function(e) {
addStatusMsg("getAnswer SessionDescription fail error: " + e);
});
}
function getOffer(offer) {
if(!hasSupportForVideoChat())
return;
if(!peerConnection) {
addStatusMsg("getOffer peerConnection not created error.");
return;
}
addStatusMsg("getOffer setRemoteDescription offer: " + JSON.stringify(offer));
peerConnection.setRemoteDescription(new SessionDescription(offer), function() {
finishSDPVideoOfferOrAnswer = true;
while (iceCandidates.length) {
var candidate = iceCandidates.shift();
try {
peerConnection.addIceCandidate(new IceCandidate(candidate));
addStatusMsg("Adding queued ICE Candidates");
} catch(e) {
addStatusMsg("Error adding queued ICE Candidates error:" + e);
}
}
iceCandidates = [];
if(!isOfferer) {
peerConnection.createAnswer(
function(answer) {
peerConnection.setLocalDescription(answer);
addStatusMsg("getOffer create answer sent: " + JSON.stringify(answer));
ws.send(JSON.stringify(['send_answer', {"answer": answer}]));
},
function(e) {
addStatusMsg("getOffer setRemoteDescription create answer fail: " + e);
}
);
}
});
}
Here is the patch I did on server-side WebSocket (Java) server.
//JSON
//["connected", {videoChatOfferer: true}]
//["connected", {videoChatOfferer: false}]
JSONObject obj = new JSONObject();
JSONArray list = new JSONArray();
list.put("loadStrangerCameraStream");
obj.put("videoChatOfferer", true); //first guy offerer for WebRTC.
list.put(obj);
server.sendMessage(websocket, list.toString()); //connected to chat partner
obj.put("videoChatOfferer", false); //second guy isn't offerer.
list.put(obj);
server.sendMessage(stranger.getWebSocket(), list.toString()); //connected to chat partner

Choose WebCams Via getusermedia

I am beginner with getusermedia, just got some codes from Google, and I'm able to work on those. But i have to show options on my webapp, from where a user can select WebCam from Primary (Laptop) or Secondary (Connected via USB).
Tried this, working for primary (Laptop WebCam), but when i am adding USB WebCam, it is auto select USB WebCam.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
imagegrid = document.getElementById("imagegrid"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
var video = document.querySelector("#video");
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;
if (navigator.getUserMedia) {      
navigator.getUserMedia({video: true}, handleVideo, videoError);
}
function handleVideo(stream) {
video.src = window.URL.createObjectURL(stream);
}
function videoError(e) {
// do something
}
// Trigger photo take
document.getElementById("video").addEventListener("click", function() {
draw(video, canvas, imagegrid);
});
Is it possible, i can show options for both webcams.
Thanks
The function navigator.getUserMedia() will only give you the default camera (with the exception of Firefox which gives you an option of which camera to share with the web application)
To avoid this problem you should use navigator.mediaDevices.enumerateDevices() and then navigator.mediaDevices.getUserMedia(constraints).
Example:
navigator.mediaDevices.enumerateDevices()
.then(gotDevices)
.catch(errorCallback);
...
function gotDevices(deviceInfos) {
...
for (var i = 0; i !== deviceInfos.length; ++i) {
var deviceInfo = deviceInfos[i];
var option = document.createElement('option');
option.value = deviceInfo.deviceId;
if (deviceInfo.kind === 'audioinput') {
option.text = deviceInfo.label ||
'Microphone ' + (audioInputSelect.length + 1);
audioInputSelect.appendChild(option);
} else if (deviceInfo.kind === 'audiooutput') {
option.text = deviceInfo.label || 'Speaker ' +
(audioOutputSelect.length + 1);
audioOutputSelect.appendChild(option);
} else if (deviceInfo.kind === 'videoinput') {
option.text = deviceInfo.label || 'Camera ' +
(videoSelect.length + 1);
videoSelect.appendChild(option);
}
...
}
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
var videoTracks = stream.getVideoTracks();
console.log('Got stream with constraints:', constraints);
console.log('Using video device: ' + videoTracks[0].label);
stream.onended = function() {
console.log('Stream ended');
};
window.stream = stream; // make variable available to console
video.srcObject = stream;
})
.catch(function(error) {
// ...
}
The above functions use promises and require a more complex approach than yours. So you need to do some reading in order to adaptate this method. Have a look at the link below for some examples:
https://developers.google.com/web/updates/2015/10/media-devices

js+html5 getUserMedia conflicts with microphone (android) (mobile thinks phone call is active)

Edited - Update in bottom of post
I'm building a web based app for android using phonegap,
and i came across this problem,
after the initialization of getusermedia when i use the Volume down/up button the volume control is for call and not for speaker,
even if i didn't start a new recording...
in addition i noticed that the phone actually thinks its inside a phone call while the app is running for example:
i start my app , then i open whatsapp and try to record a voice message the message is being canceled.
I know that the next segment is the problem (i commented it and there was no problem)
/***recording audio block***/
function audioRecordingInit() {
navigator.getUserMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
var constraints = {audio: true};
var chunks = [];
var onSuccess = function (stream) {
$.globals.mediaRecorder = new MediaRecorder(stream);
$.globals.mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
if(!$("#recordBtn").hasClass("private"))
$.globals.lastRecorded.audio.src = window.URL.createObjectURL(blob);
else
$.globals.lastRecordedPrivate.audio.src = window.URL.createObjectURL(blob);
console.log("audio created");
};
$.globals.mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
};
var onError = function (err) {
console.log('The following error occured: ' + err);
};
navigator.getUserMedia(constraints, onSuccess, onError);// I think this is the problem
}
else{
console.log('getUserMedia not supported on your browser!');
}
/***end of recording audio block***/
}
this function is called after device ready
and im adding also the start recording and stop recording functions below
function startRecording(event) {
document.getElementById("recordingTime").innerText = "00:00";
$.globals.mediaRecorder.start();
console.log($.globals.mediaRecorder.state);
console.log("recorder started");
$.globals.timerInterval = setInterval(function () {
$.globals.sec += 1;
if ($.globals.sec == 60) {
$.globals.min++;
$.globals.sec = 0;
}
if ($.globals.min < 10) {
if ($.globals.sec < 10)
$.globals.timeText = "0" + $.globals.min + ":0" + $.globals.sec;
else
$.globals.timeText = "0" + $.globals.min + ":" + $.globals.sec;
}
else if ($.globals.sec < 10)
$.globals.timeText = min + ":0" + $.globals.sec;
else
$.globals.timeText = min + ":" + $.globals.sec;
document.getElementById("recordingTime").innerText = $.globals.timeText;
}, 1000);
}
function stopRecording() {
if($(".circleNav").hasClass("recording"))
$(".circleNav").toggleClass("recording");
$.globals.currentState="Recorded";
console.log($.globals.mediaRecorder.state);
if($.globals.mediaRecorder.state=="inactive"){
$.globals.mediaRecorder.start();
}
$.globals.mediaRecorder.stop();
console.log("recorder stopped");
clearInterval($.globals.timerInterval);
}
startRecording starts when touchstarts on record button
stopRecording is called when touchend on record button
thank you for your help
Update:
the conflict was with the microphone because the stream was always live and not only when recording.
now it works fine but still needs to make the record button disabled while in a phone call otherwise it will conflict and possibly crash the app or diconnect the mic from the all or maybe even disconnect it.
ok,
so after alot of reading about MediaRecorder,MediaStream and MediaStreamTrack.
I found the problem, the stream of audio stayed active and was using the microphone and in so denied me access to phone calls and voice messages in whatsapp.
I will add my solution below:
function audioRecordingInit() {
navigator.mediaDevices.getUserMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
navigator.getUserMedia({audio:true}, onSuccessMedia, onErrorMedia);
return true;
}
else{
console.log('getUserMedia not supported on your browser!');
return false;
}
}
notice that i made this function return a boolean value and i separated the success and error functions.
function onSuccessMedia(stream) {
var chunks = [];
$.globals.mediaStream=stream;
console.log(stream);
$.globals.mediaRecorder = new MediaRecorder(stream);
$.globals.mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
var blob = new Blob(chunks, { 'type' : 'audio/mp3; codecs=opus' });
chunks = [];
if(!$("#recordBtn").hasClass("private"))
$.globals.lastRecorded.audio.src = window.URL.createObjectURL(blob);
else
$.globals.lastRecordedPrivate.audio.src = window.URL.createObjectURL(blob);
console.log("audio created");
};
$.globals.mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
};
$.globals.mediaRecorder.start();
console.log("recording start");
}
function onErrorMedia(err) {
console.log('The following error occured: ' + err);
}
here i moved the mediaRecorder.start() to the onSuccess function instead inside of the start recording...
and finely I changed the start recording function to
function startRecording(event) {
if(audioRecordingInit()) {
//here is all of the visual changes
}
}
and that's it everything is working correctly

Setting drop down last item to selected but value does not change

Below is the code I am using to set a drop down list last item to be selected but for some reason the value is still of the first one can someone please help me. This is an application that will allow the user to open the rear camera as he gives permission to the browser. The problem is that although the drop down list is saying camera 2 it still open camera 1. I think the problem is the value is not changing
<body>
<div class='select'>
<label for='videoSource'>Video source: </label><select id='videoSource'></select>
</div>
<video muted autoplay style="width:100%; height:auto"></video>
</body>
</html>
<script type="text/javascript">
var videoElement = document.querySelector("video");
var videoSelect = document.querySelector("select#videoSource");
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
function gotSources(sourceInfos) {
for (var i = 0; i != sourceInfos.length; ++i) {
var sourceInfo = sourceInfos[i];
var option = document.createElement("option");
option.value = sourceInfo.id;
if (sourceInfo.kind === 'video') {
option.text = sourceInfo.label || 'camera ' + (videoSelect.length + 1);
videoSelect.appendChild(option);
if( i == sourceInfos.length - 1 ) {
option.selected = true;
}
} else {
console.log('Some other kind of source: ', sourceInfo);
}
}
}
if (typeof MediaStreamTrack === 'undefined') {
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources(gotSources);
}
function successCallback(stream) {
window.stream = stream; // make stream available to console
videoElement.src = window.URL.createObjectURL(stream);
videoElement.play();
}
function errorCallback(error) {
console.log("navigator.getUserMedia error: ", error);
}
function start() {
if (!!window.stream) {
videoElement.src = null;
window.stream.stop();
}
var videoSource = videoSelect.value;
var constraints = {
video: {
optional: [{ sourceId: videoSource}]
}
};
navigator.getUserMedia(constraints , successCallback, errorCallback);
}
videoSelect.onchange = start;
start();
</script>

Categories