What's the use of `this` in this example? - javascript

I'm using a module to detect when the user is speaking, called hark. Here's some of the code:
// original source code is taken from:
// https://github.com/SimpleWebRTC/hark
// copyright goes to &yet team
// edited by Muaz Khan for RTCMultiConnection.js
function hark(stream, options) {
var audioContextType = window.webkitAudioContext || window.AudioContext;
var harker = this;
harker.events = {};
harker.on = function (event, callback) {
harker.events[event] = callback;
};
harker.emit = function () {
if (harker.events[arguments[0]]) {
harker.events[arguments[0]](arguments[1], arguments[2], arguments[3], arguments[4]);
}
};
// make it not break in non-supported browsers
if (!audioContextType) return harker;
options = options || {};
// Config
var smoothing = (options.smoothing || 0.1),
interval = (options.interval || 50),
threshold = options.threshold,
play = options.play,
history = options.history || 10,
running = true;
(...)
return harker;
}
What is this line for?
var harker = this;
When I checked in the debugger, this stores a Window object in harker. And from what I'm seeing it makes for some unexpected behavior when I call hark more than once.
Why not just do var harker;?
Full code is here:
https://www.webrtc-experiment.com/hark.js
And here's a demo where it's used:
<style>
html, body {
margin: 0!important;
padding: 0!important;
}
video {
width: auto;
max-width: 100%;
}
</style>
<title>Auto Stop RecordRTC on Silence</title>
<h1>Auto Stop RecordRTC on Silence</h1>
<br>
<button id="btn-start-recording">Start Recording</button>
<button id="btn-stop-recording" disabled style="display: none;">Stop Recording</button>
<hr>
<video controls autoplay playsinline></video>
<script src="/RecordRTC.js"></script>
<script src="https://www.webrtc-experiment.com/hark.js"></script>
<script>
var video = document.querySelector('video');
var h1 = document.querySelector('h1');
var default_h1 = h1.innerHTML;
function captureCamera(callback) {
navigator.mediaDevices.getUserMedia({ audio: true, video: true }).then(function(camera) {
callback(camera);
}).catch(function(error) {
alert('Unable to capture your camera. Please check console logs.');
console.error(error);
});
}
function stopRecordingCallback() {
video.srcObject = null;
var blob = recorder.getBlob();
video.src = URL.createObjectURL(blob);
recorder.camera.stop();
video.muted = false;
}
var recorder; // globally accessible
document.getElementById('btn-start-recording').onclick = function() {
this.disabled = true;
captureCamera(function(camera) {
video.muted = true;
video.srcObject = camera;
recorder = RecordRTC(camera, {
type: 'video'
});
recorder.startRecording();
var max_seconds = 3;
var stopped_speaking_timeout;
var speechEvents = hark(camera, {});
speechEvents.on('speaking', function() {
if(recorder.getBlob()) return;
clearTimeout(stopped_speaking_timeout);
if(recorder.getState() === 'paused') {
// recorder.resumeRecording();
}
h1.innerHTML = default_h1;
});
speechEvents.on('stopped_speaking', function() {
if(recorder.getBlob()) return;
// recorder.pauseRecording();
stopped_speaking_timeout = setTimeout(function() {
document.getElementById('btn-stop-recording').click();
h1.innerHTML = 'Recording is now stopped.';
}, max_seconds * 1000);
// just for logging purpose (you ca remove below code)
var seconds = max_seconds;
(function looper() {
h1.innerHTML = 'Recording is going to be stopped in ' + seconds + ' seconds.';
seconds--;
if(seconds <= 0) {
h1.innerHTML = default_h1;
return;
}
setTimeout(looper, 1000);
})();
});
// release camera on stopRecording
recorder.camera = camera;
document.getElementById('btn-stop-recording').disabled = false;
});
};
document.getElementById('btn-stop-recording').onclick = function() {
this.disabled = true;
recorder.stopRecording(stopRecordingCallback);
};
</script>
<footer style="margin-top: 20px;"><small id="send-message"></small></footer>
<script src="https://www.webrtc-experiment.com/common.js"></script>

The pattern of assigning the value of this to a variable is something you can read more about by searching for this that pattern (or self = this ) for example, since that's a common name for the variable for "saving" a reference to this.
The reason for doing that is that this changes depending on the context of functions. If you assign this at a specific scope you can pass that along to other functions - since they wouldn't be able to use this, since this might mean something completely different to them.

Related

How to send MediaStream object from video to canvas element which is being used in opencv.js?

Basically what I'm trying to do is locally detect faces while being in a video conference. For this purpose I'm using RTCMultiConnection for the video conference part, this specific part to be exact and OpenCV.js for the computer vision part, specifically this repository.
I know that I need to send the mediaStream object from video element to the canvas element inside the OpenCV function. I was not getting that stream shown on the canvas so I figured I would make the variable containing that mediaStream object global so I can pass it to the OpenCV function but that is not working either.
I would be truly grateful for any insight you can provide, thank you.
Here's the code:
<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Video Conferencing using RTCMultiConnection</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
<link rel="shortcut icon" href="/demos/logo.png">
<link rel="stylesheet" href="/demos/stylesheet.css">
<script src="/demos/menu.js"></script>
<script async src="js/opencv.js" ></script>
<script src="js/utils.js"></script>
</head>
<body>
<header>
<!-- <a class="logo" href="/"><img src="/demos/logo.png" alt="RTCMultiConnection"></a>
Menu<img src="/demos/menu-icon.png" alt="Menu">
<!-- <nav>
<li>
Home
</li>
<li>
Demos
</li>
<li>
Getting Started
</li>
<li>
FAQ
</li>
<li>
YouTube
</li>
<li>
Wiki
</li>
<li>
Github
</li>
</nav> -->
</header> -->
<h1>
Video Conferencing using RTCMultiConnection
<p class="no-mobile">
Multi-user (many-to-many) video chat using mesh networking model.
</p>
</h1>
<section class="make-center">
<div>
<label><input type="checkbox" id="record-entire-conference"> Record Entire Conference In The Browser?</label>
<span id="recording-status" style="display: none;"></span>
<button id="btn-stop-recording" style="display: none;">Stop Recording</button>
<br><br>
<input type="text" id="room-id" value="abcdef" autocorrect=off autocapitalize=off size=20>
<button id="open-room">Open Room</button>
<button id="join-room">Join Room</button>
<button id="open-or-join-room">Auto Open Or Join Room</button>
</div>
<div id="videos-container" style="margin: 20px 0;"></div>
<canvas id="canvas_output"></canvas>
<div id="room-urls" style="text-align: center;display: none;background: #F1EDED;margin: 15px -10px;border: 1px solid rgb(189, 189, 189);border-left: 0;border-right: 0;"></div>
</section>
<script src="/dist/RTCMultiConnection.min.js"></script>
<script src="/node_modules/webrtc-adapter/out/adapter.js"></script>
<script src="/socket.io/socket.io.js"></script>
<!-- <script async src="js/opencv.js" onload="openCvReady()"></script> -->
<!-- <script src="js/utils.js"></script> -->
<!-- custom layout for HTML5 audio/video elements -->
<link rel="stylesheet" href="/dev/getHTMLMediaElement.css">
<script src="/dev/getHTMLMediaElement.js"></script>
<script src="/node_modules/recordrtc/RecordRTC.js"></script>
<script>
// ......................................................
// .......................UI Code........................
// ......................................................
document.getElementById('open-room').onclick = function() {
disableInputButtons();
connection.open(document.getElementById('room-id').value, function(isRoomOpened, roomid, error) {
if(isRoomOpened === true) {
showRoomURL(connection.sessionid);
}
else {
disableInputButtons(true);
if(error === 'Room not available') {
alert('Someone already created this room. Please either join or create a separate room.');
return;
}
alert(error);
}
});
};
document.getElementById('join-room').onclick = function() {
disableInputButtons();
connection.join(document.getElementById('room-id').value, function(isJoinedRoom, roomid, error) {
if (error) {
disableInputButtons(true);
if(error === 'Room not available') {
alert('This room does not exist. Please either create it or wait for moderator to enter in the room.');
return;
}
alert(error);
}
});
};
document.getElementById('open-or-join-room').onclick = function() {
disableInputButtons();
connection.openOrJoin(document.getElementById('room-id').value, function(isRoomExist, roomid, error) {
if(error) {
disableInputButtons(true);
alert(error);
}
else if (connection.isInitiator === true) {
// if room doesn't exist, it means that current user will create the room
showRoomURL(roomid);
}
});
};
// ......................................................
// .......................UI Code ENDS........................
// ......................................................
// ***********************************************************
// ......................................................
// ..................RTCMultiConnection Code.............
// ......................................................
var connection = new RTCMultiConnection();
// by default, socket.io server is assumed to be deployed on your own URL
connection.socketURL = '/';
// comment-out below line if you do not have your own socket.io server
// connection.socketURL = 'https://rtcmulticonnection.herokuapp.com:443/';
connection.socketMessageEvent = 'video-conference-demo';
connection.session = {
audio: true,
video: true
};
connection.sdpConstraints.mandatory = {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true
};
// STAR_FIX_VIDEO_AUTO_PAUSE_ISSUES
// via: https://github.com/muaz-khan/RTCMultiConnection/issues/778#issuecomment-524853468
var bitrates = 512;
var resolutions = 'Ultra-HD';
var videoConstraints = {};
if (resolutions == 'HD') {
videoConstraints = {
width: {
ideal: 1280
},
height: {
ideal: 720
},
frameRate: 30
};
}
if (resolutions == 'Ultra-HD') {
videoConstraints = {
width: {
ideal: 1920
},
height: {
ideal: 1080
},
frameRate: 30
};
}
connection.mediaConstraints = {
video: videoConstraints,
audio: true
};
var CodecsHandler = connection.CodecsHandler;
connection.processSdp = function(sdp) {
var codecs = 'vp8';
if (codecs.length) {
sdp = CodecsHandler.preferCodec(sdp, codecs.toLowerCase());
}
if (resolutions == 'HD') {
sdp = CodecsHandler.setApplicationSpecificBandwidth(sdp, {
audio: 128,
video: bitrates,
screen: bitrates
});
sdp = CodecsHandler.setVideoBitrates(sdp, {
min: bitrates * 8 * 1024,
max: bitrates * 8 * 1024,
});
}
if (resolutions == 'Ultra-HD') {
sdp = CodecsHandler.setApplicationSpecificBandwidth(sdp, {
audio: 128,
video: bitrates,
screen: bitrates
});
sdp = CodecsHandler.setVideoBitrates(sdp, {
min: bitrates * 8 * 1024,
max: bitrates * 8 * 1024,
});
}
return sdp;
};
// END_FIX_VIDEO_AUTO_PAUSE_ISSUES
// https://www.rtcmulticonnection.org/docs/iceServers/
// use your own TURN-server here!
connection.iceServers = [{
'urls': [
'stun:stun.l.google.com:19302',
'stun:stun1.l.google.com:19302',
'stun:stun2.l.google.com:19302',
'stun:stun.l.google.com:19302?transport=udp',
]
}];
connection.videosContainer = document.getElementById('videos-container');
connection.onstream = function(specialEvent) {
// its just event but I was debugging it on the console and wanted to change the
// name to specialEvent
var existing = document.getElementById(specialEvent.streamid);
if(existing && existing.parentNode) {
existing.parentNode.removeChild(existing);
}
// specialEvent.mediaElement.removeAttribute('src');
// specialEvent.mediaElement.removeAttribute('srcObject');
specialEvent.mediaElement.muted = true;
specialEvent.mediaElement.volume = 0;
var video = document.createElement('video');
video.setAttribute("id", "cam_input");
window.video = video;
try {
video.setAttributeNode(document.createAttribute('autoplay'));
video.setAttributeNode(document.createAttribute('playsinline'));
} catch (e) {
video.setAttribute('autoplay', true);
video.setAttribute('playsinline', true);
}
if(specialEvent.type === 'local') {
video.volume = 0;
try {
video.setAttributeNode(document.createAttribute('muted'));
} catch (e) {
video.setAttribute('muted', true);
}
}
video.srcObject = specialEvent.stream;
var width = parseInt(connection.videosContainer.clientWidth / 3) - 20;
var mediaElement = getHTMLMediaElement(video, {
title: specialEvent.userid,
buttons: ['full-screen'],
width: width,
showOnMouseEnter: false
});
connection.videosContainer.appendChild(mediaElement);
setTimeout(function() {
mediaElement.media.play();
}, 5000);
mediaElement.id = specialEvent.streamid;
// to keep room-id in cache
localStorage.setItem(connection.socketMessageEvent, connection.sessionid);
chkRecordConference.parentNode.style.display = 'none';
if(chkRecordConference.checked === true) {
btnStopRecording.style.display = 'inline-block';
recordingStatus.style.display = 'inline-block';
var recorder = connection.recorder;
if(!recorder) {
recorder = RecordRTC([specialEvent.stream], {
type: 'video'
});
recorder.startRecording();
connection.recorder = recorder;
}
else {
recorder.getInternalRecorder().addStreams([specialEvent.stream]);
}
if(!connection.recorder.streams) {
connection.recorder.streams = [];
}
connection.recorder.streams.push(specialEvent.stream);
recordingStatus.innerHTML = 'Recording ' + connection.recorder.streams.length + ' streams';
}
if(specialEvent.type === 'local') {
connection.socket.on('disconnect', function() {
if(!connection.getAllParticipants().length) {
location.reload();
}
});
}
window.specialEvent = specialEvent;
};
// ******************************
//
// OPENCV CODE STARTS FROM HERE
//
// ******************************
function openCvReady() {
// cv['onRuntimeInitialized']=()=>{
// let video = document.getElementById("cam_input"); // video is the id of video tag
// navigator.mediaDevices.getUserMedia({ video: true, audio: false })
// .then(function(stream) {
// console.log(stream);
// video.srcObject = stream;
// video.play();
// })
// .catch(function(err) {
// console.log("An error occurred! " + err);
// });
// let cam_input = document.getElementById("cam_input");
let video = document.getElementById("cam_input");
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(cam_input);
let faces = new cv.RectVector();
// if (face == true){
// console.log("face shown");
// }
let classifier = new cv.CascadeClassifier();
let utils = new Utils('errorMessage');
let faceCascadeFile = 'haarcascade_frontalface_default.xml'; // path to xml
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
classifier.load(faceCascadeFile); // in the callback, load the cascade from file
});
const FPS = 24;
function processVideo() {
let begin = Date.now();
cap.read(src);
src.copyTo(dst);
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
try{
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
console.log(faces.size());
}catch(err){
console.log(err);
}
for (let i = 0; i < faces.size(); ++i) {
let face = faces.get(i);
console.log("face:");
console.log(face);
let point1 = new cv.Point(face.x, face.y);
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
}
cv.imshow("canvas_output", dst);
// schedule next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
// };
}
var recordingStatus = document.getElementById('recording-status');
var chkRecordConference = document.getElementById('record-entire-conference');
var btnStopRecording = document.getElementById('btn-stop-recording');
btnStopRecording.onclick = function() {
var recorder = connection.recorder;
if(!recorder) return alert('No recorder found.');
recorder.stopRecording(function() {
var blob = recorder.getBlob();
invokeSaveAsDialog(blob);
connection.recorder = null;
btnStopRecording.style.display = 'none';
recordingStatus.style.display = 'none';
chkRecordConference.parentNode.style.display = 'inline-block';
});
};
connection.onstreamended = function(event) {
var mediaElement = document.getElementById(event.streamid);
if (mediaElement) {
mediaElement.parentNode.removeChild(mediaElement);
}
};
connection.onMediaError = function(e) {
if (e.message === 'Concurrent mic process limit.') {
if (DetectRTC.audioInputDevices.length <= 1) {
alert('Please select external microphone. Check github issue number 483.');
return;
}
var secondaryMic = DetectRTC.audioInputDevices[1].deviceId;
connection.mediaConstraints.audio = {
deviceId: secondaryMic
};
connection.join(connection.sessionid);
}
};
// ..................................
// ALL below scripts are redundant!!!
// ..................................
function disableInputButtons(enable) {
document.getElementById('room-id').onkeyup();
document.getElementById('open-or-join-room').disabled = !enable;
document.getElementById('open-room').disabled = !enable;
document.getElementById('join-room').disabled = !enable;
document.getElementById('room-id').disabled = !enable;
}
// ......................................................
// ......................Handling Room-ID................
// ......................................................
function showRoomURL(roomid) {
var roomHashURL = '#' + roomid;
var roomQueryStringURL = '?roomid=' + roomid;
var html = '<h2>Unique URL for your room:</h2><br>';
html += 'Hash URL: ' + roomHashURL + '';
html += '<br>';
html += 'QueryString URL: ' + roomQueryStringURL + '';
var roomURLsDiv = document.getElementById('room-urls');
roomURLsDiv.innerHTML = html;
roomURLsDiv.style.display = 'block';
}
(function() {
var params = {},
r = /([^&=]+)=?([^&]*)/g;
function d(s) {
return decodeURIComponent(s.replace(/\+/g, ' '));
}
var match, search = window.location.search;
while (match = r.exec(search.substring(1)))
params[d(match[1])] = d(match[2]);
window.params = params;
})();
var roomid = '';
if (localStorage.getItem(connection.socketMessageEvent)) {
roomid = localStorage.getItem(connection.socketMessageEvent);
} else {
roomid = connection.token();
}
var txtRoomId = document.getElementById('room-id');
txtRoomId.value = roomid;
txtRoomId.onkeyup = txtRoomId.oninput = txtRoomId.onpaste = function() {
localStorage.setItem(connection.socketMessageEvent, document.getElementById('room-id').value);
};
var hashString = location.hash.replace('#', '');
if (hashString.length && hashString.indexOf('comment-') == 0) {
hashString = '';
}
var roomid = params.roomid;
if (!roomid && hashString.length) {
roomid = hashString;
}
if (roomid && roomid.length) {
document.getElementById('room-id').value = roomid;
localStorage.setItem(connection.socketMessageEvent, roomid);
// auto-join-room
(function reCheckRoomPresence() {
connection.checkPresence(roomid, function(isRoomExist) {
if (isRoomExist) {
connection.join(roomid);
return;
}
setTimeout(reCheckRoomPresence, 5000);
});
})();
disableInputButtons();
}
// detect 2G
if(navigator.connection &&
navigator.connection.type === 'cellular' &&
navigator.connection.downlinkMax <= 0.115) {
alert('2G is not supported. Please use a better internet service.');
}
</script>
<footer>
<small id="send-message"></small>
</footer>
<script src="https://www.webrtc-experiment.com/common.js"></script>
</body>
</html>

JavaScript audio player

I'm building a simple JavaScript audiotrack controller and I don't know what is wrong with my code. I've been searching for hours here and I found a simple code that did the job. I had to change a lot of things (due to typos or code errors) and I finally got this:
var TRACKS = [
{
id: 0,
nom: 'Johnny B. Goode',
autor: 'Chuck Berry',
src: 'src/johnnybgoode.mp3',
any: '1955'
},
{
id: 1,
nom: 'For Your Love',
autor: 'The Yardbirds',
src: 'src/foryourlove.mp3',
any: '1965'
}
];
var songs = 2;
var Player = function () {
"use strict";
var currentPlaying,
trackListPos,
trackList,
source,
audio;
this.getName = function () {
return currentPlaying.nom;
};
this.setTrack = function (obj) {
currentPlaying = obj;
source.src = obj.src;
audio.load();
audio.play();
return this;
};
this.setTrackList = function (t) {
trackList = t;
trackListPos = 0;
audio = document.getElementById('myAudio');
source = document.getElementById('audioSource');
this.setTrack(trackList[trackListPos]);
return this;
};
this.play = function () {
audio.load();
audio.play();
return this;
};
this.stop = function () {
audio.pause();
audio.currentTime = 0;
return this;
};
this.pause = function () {
audio.pause();
};
this.next = function () {
if (currentPlaying.id === (songs - 1)) {
trackListPos = 0;
} else {
trackListPos += 1;
}
this.setTrack(trackList[trackListPos]);
};
};
//Initialize
var reprod = new Player();
reprod.setTrackList(TRACKS).play();
function play() {
"use strict";
reprod.play();
document.getElementById("Play").innerHTML = "Pause";
}
function seguent() {
"use strict";
reprod.next();
document.getElementById("titol").innerHTML = "<b>Títol:</b>";
}
<audio id="myAudio" controls="controls">
<source id="audioSource" src="">
Your browser does not support the audio format.
</audio>
<nav class="navegador" id="sidebar">
<div class="playerwrapper">
<div class="player">
<p id="titol"></p>
<p id="autor"></p>
<p id="any"></p>
<button type="button" onclick="seguent()">Següent</button>
<button id="Play" type="button" onclick="play()">Play</button>
</div>
</div>
</nav>
As you can see there's a couple buttons that trigger the function play() and seguent() (which means next). And they don't seem to work.
The innerHTML that changes the Play button text doesn't work, but when I remove the "reprod.play();" line it does work and changes the button content.
Can someone explain what is exactly happening with my code?
(Sorry if the post is tedious, it's my second post here and I don't know the formatting)
Thanks for letting me know I could use a console, the errors that throws are:
Uncaught TypeError: Cannot read property 'load' of null
Uncaught TypeError: Cannot set property 'src' of null
Observing your code, it seems the element which setTrackList(t) is referring to doesn't exists in the DOM. I think the element you've declared for importing this script in your HTML (the <script> tag) is preceeding the <body> with your content. You have to get sure your DOM content is loaded before calling setTrackList(t), therefore you can bind the onload event to call this, else this function will not find the element.
Try this:
var reprod = new Player();
document.addEventListener("load", function() {
reprod.setTrackList(TRACKS).play();
});
function play() {
"use strict";
reprod.play();
document.getElementById("Play").innerHTML = "Pause";
}
function seguent() {
"use strict";
reprod.next();
document.getElementById("titol").innerHTML = "<b>Títol:</b>";
}
EDIT: Maybe I understood your problem: the context of the function which construct the class (function Player()) ends immediately after the calling itself, so variables are deleted. Therefore try declaring your variables in this context.
this.audio = yourValue;
Instead of:
var audio = yourValue;
EDIT: Maybe now it's working well, try this fiddle.
Except the error because of missing of tracks, it seems working.
Here's the code of the fiddle:
var TRACKS = [
{
id: 0,
nom: 'Johnny B. Goode',
autor: 'Chuck Berry',
src: 'src/johnnybgoode.mp3',
any: '1955'
},
{
id: 1,
nom: 'For Your Love',
autor: 'The Yardbirds',
src: 'src/foryourlove.mp3',
any: '1965'
}
];
var songs = 2;
var Player = function () {
this.getName = function () {
return this.currentPlaying.nom;
};
this.setTrack = function (obj) {
this.currentPlaying = obj;
this.source.src = obj.src;
this.audio.load();
this.audio.play();
return this;
};
this.setTrackList = function (t) {
this.trackList = t;
this.trackListPos = 0;
this.audio = document.getElementById('myAudio');
this.source = document.getElementById('audioSource');
this.setTrack(this.trackList[this.trackListPos]);
return this;
};
this.play = function () {
//this.audio.load();
this.audio.play();
return this;
};
this.stop = function () {
this.audio.pause();
this.audio.currentTime = 0;
return this;
};
this.pause = function () {
this.audio.pause();
};
this.next = function () {
if (this.currentPlaying.id === (songs - 1)) {
this.trackListPos = 0;
} else {
this.trackListPos += 1;
}
this.setTrack(this.trackList[this.trackListPos]);
};
};
//Initialize
var reprod = new Player();
if (document.getElementById("myAudio"))
reprod.setTrackList(TRACKS).play();
else
window.addEventListener('load', function() {
reprod.setTrackList(TRACKS).play();
});
window.play = function() {
"use strict";
reprod.play();
document.getElementById("Play").innerHTML = "Pause";
}
window.seguent = function() {
"use strict";
reprod.next();
document.getElementById("titol").innerHTML = "<b>Títol:</b>";
}
And here's the HTML:
<audio id="myAudio" controls="controls">
<source id="audioSource" src="">
Your browser does not support the audio format.
</audio>
<nav class="navegador" id="sidebar">
<div class="playerwrapper">
<div class="player">
<p id="titol"></p>
<p id="autor"></p>
<p id="any"></p>
<button type="button" onclick="window.seguent()">Següent</button>
<button id="Play" type="button" onclick="window.play()">Play</button>
</div>
</div>
</nav>

How to make annyang detect no sound for 3 seconds or longer, then start next audio?

I got a person's help to build this so far. But my goal is after the first audio file, the program would still listen what the user says until the user finishes talking. And then if the program doesn't detect anything for 3 seconds(or longer), it will play the next audio. This program will do this over and over until all audio files have played.
However, there's one more case. If the user is a 2 years old kid, which means the kid might need to spend 2 seconds or longer between 2 sentences. In this case, the annyang might think the user has finished their sentences and play next audio. This way the program would interrupt the user speech. How should I handle this?
That person who gave me ideas of using setInverval and create some date objects and then minus date objects to get a time greater than 3 seconds, and play next audio. But it's not working. Is my logic of code wrong or there's a better way?
Any help I would appreciate it. Thank you in advance.
<script>
audio = new Audio();
if (annyang)
{
annyang.addCallback('start', function() {console.log('started listening!');});
annyang.addCallback('soundstart', function(){onSoundDetected();});
function monitorSound()
{
if(monitorId && monitorId > 0) return;
var monitorId = window.setInterval(function(){trackSound() }, 1000);
}
var lastSound= new Date();
function trackSound()
{
var now = new Date();
if ((now - lastSound) > 3000)
{
playNextAudio();
return;
}
}
function stopListening()
{
var monitorId = 0;
window.clearInterval(monitorId);
annyang.removeCallback('soundstart', onSoundHeard);
annyang.addCallback('result', function() {playNextAudio(); });
}
function onSoundHeard()
{
lastSound = new Date();
console.log(lastSound);
}
function playNextAudio()
{
if(audioIndex === playList.length - 1)
{
console.log("Played all audios");
return; // we have played all audio
}
else
{
annyang.addCallback('result', function() {
audio.src = dir + playList[audioIndex++] + extention;
audio.load();
//audio.ended = audio.play();
//audio.ended = setTimeout(function(){audio.play();}, 1500);
setTimeout(function(){audio.play();}, 1000);
});
}
}
function playFirstAudio()
{
audio.src = dir + playList[audioIndex] + extention;
audio.load();
audio.ended = setTimeout(function(){audio.play();}, 1000);
console.log('First audio is playing');
}
function onSoundDetected()
{
console.log('sound was detected');
playFirstAudio();
monitorSound();
}
// Start from here
var playList = ["1_hello", "2_how_old", "3_what_did_you_make"];
var dir = "sound/";
var extention = ".wav";
var audioIndex = 0;
annyang.debug(true);
};
</script>
You have logic errors in your code. When stopping and starting timers, you need to refer to the global variables.
This code isn't going to stop your timer:
function stopListening()
{
var monitorId = 0;
window.clearInterval(monitorId);
}
Your code also only initiates your play loop. It never starts the listening timer.
You logic will also play the first song twice.
To demonstrate the control flow, I have mocked the audio and annyang objects. The audio mock simulates the playing of 3 ten second audio files. When the window loads, the first audio mock will play. After which the annyang and the listening timer will start.
To mock annyang's sound detect there is a "mock sound" button. If it is clicked it will extend sound detection for another 3 seconds. Once 3 seconds goes by annyang will be paused and the next audio is played.
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>Annyang Mock</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script>
var AudioMock = function () {
this.src = null;
this.start = null;
this.timer = 0;
var that = this;
this.load = function () {
$('#AudioState').html("Loaded " + that.src);
}
this.play = function () {
that.start = new Date();
$('#AudioState').html("Playing: " + this.src);
window.setTimeout(that.end, 10000);
that.timer = window.setInterval(that.mockPlayTime, 500);
}
this.mockPlayTime = function () {
var d = new Date();
var elapsed = (d - that.start) / 1000;
$('#AudioState').html("Playing: " + that.src + " " + elapsed + " secs");
}
this.endMockPlayTime = function () {
window.clearInterval(that.timer);
}
this.end = function () {
that.endMockPlayTime();
$('#AudioState').html("Ended: " + that.src);
if (that.ended) {
that.ended();
}
}
this.ended = null;
};
var annyangMock = function () {
this.callbacks = {};
this.paused = false;
};
annyangMock.prototype.addCallback = function (name, callback) {
this.callbacks[name] = callback;
}
annyangMock.prototype.removeCallback = function (name, callback) {
this.callbacks[name] = null;
}
annyangMock.prototype.pause = function () {
$('#AnnyangState').html("Annyang: pause()");
this.paused = true;
}
annyangMock.prototype.start = function () {
$('#AnnyangState').html("Annyang: start()");
this.paused = false;
}
annyangMock.prototype.invoke = function (name) {
if (!this.paused) {
$('#AnnyangState').html("called(" + name + ")");
var cb = this.callbacks[name];
if (cb) cb();
}
}
annyangMock.prototype.debug = function (flag) { };
var annyang = new annyangMock();
var annyangMock = function () {
var callbacks = {};
var _paused = false;
var pause = function () {
$('#AnnyangState').html("Annyang: pause()");
_paused = true;
}
function addCallback(name, callback) {
callbacks[name] = callback;
}
function removeCallback(name, callback) {
callbacks[name] = null;
}
function invoke(name) {
$('#AnnyangState').html("called(" + name + ")");
if (name == "start") {
_paused = false;
}
else {
if (!_paused) {
var cb = callbacks[name];
if (cb) cb();
}
}
}
}
</script>
<script>
audio = new AudioMock();
var monitorId = 0;
if (annyang) {
annyang.addCallback('start', function () { console.log('started listening!'); });
annyang.addCallback('soundstart', onSoundHeard);
function monitorSound() {
lastSound = new Date();
if (monitorId && monitorId > 0) return;
monitorId = window.setInterval(trackSound, 1000);
annyang.start();
}
var lastSound = new Date();
function onSoundHeard() {
lastSound = new Date();
//console.log(lastSound);
}
function trackSound() {
var now = new Date();
var elapsed = now - lastSound;
$('#AnnyangState').html("Listening: " + (elapsed / 1000) + " secs");
if ((now - lastSound) > 3000) {
stopListening();
playNextAudio();
return;
}
}
function stopListening() {
window.clearInterval(monitorId);
monitorId = 0;
annyang.pause();
}
function playNextAudio() {
if (audioIndex === playList.length - 1) {
console.log("Played all audios");
return; // we have played all audio
}
else {
audio.src = dir + playList[++audioIndex] + extention;
load();
setTimeout(function () { play(); }, 1000);
}
}
function load() {
$($('#playlist li')[audioIndex]).addClass("loading");
audio.load();
}
function play() {
audio.play();
$('#playlist li').removeClass("loading")
var li = $('#playlist li')[audioIndex];
$(li).addClass("playing");
}
function playFirstAudio() {
annyang.pause();
audio.src = dir + playList[audioIndex] + extention;
load();
audio.ended = function () {
$('#playlist li').removeClass("playing");
lastSound = new Date(); // set timestamp
monitorSound(); // poll sound detection
}
setTimeout(function () { play(); }, 1000);
//console.log('First audio is playing');
}
// Start from here
var playList = ["1_hello", "2_how_old", "3_what_did_you_make"];
var dir = "sound/";
var extention = ".wav";
var audioIndex = 0;
annyang.debug(true);
$(document).ready(function () {
playFirstAudio();
var l = $("<ol>");
playList.forEach(function (j) {
l.append($("<li>").html(j));
});
$('#playlist').append(l);
})
};
</script>
<style type="text/css">
#playlist li {
width: 200px;
padding: 5px;
}
div {
padding: 15px;
}
#playlist li.playing {
border: 1px solid green;
background: #dedede;
}
#playlist li.loading {
border: 1px solid red;
background: #dedede;
}
</style>
</head>
<body>
<div>
<b>Annyang State:</b> <span id="AnnyangState"></span>
</div>
<div><b>Audio State:</b> <span id="AudioState"></span></div>
<div id="playlist">
<b>Playlist:</b>
</div>
<div id="Controls">
<input id="MockSound" type="button" value="Mock Sound" onclick="annyang.invoke('soundstart');" />
</div>
</body>
</html>

Change the sound in WebAudioAPI with no user interaction on iOS

I'm using this function to create a sound, which works well on desktop and Android, and works initially on iOS when I use a touchevent to start it. I need to later replace the sound with another sound file, however on iOS it doesn't start - I'm assuming because it needs another user interaction to play the sound.
This is a VR app in a headset so this kind of user interaction isn't possible. Is there another way of replacing the sound or another non-click user interaction I can use like movement?
I've seen this http://matt-harrison.com/perfect-web-audio-on-ios-devices-with-the-web-audio-api/
Which seems to have another solution, but I don't want to pre-load all of the files (they're reasonably big and there's 10 of them) which seems to be a requirement here - plus I use the pause function in the code I have. Are there any easy ways round this?
var AudioContext = AudioContext || webkitAudioContext, context = new AudioContext();
function createSound(filename) {
console.log('createSound()');
var url = cdnPrefix + '/' + filename;
var buffer;
context = new AudioContext();
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// Decode asynchronously
request.onload = function() {
context.decodeAudioData(request.response, function(b) {
buffer = b;
play();
});
}
request.send();
var sourceNode = null,
startedAt = 0,
pausedAt = 0,
playing = false,
volume = context.createGain();
var play = function() {
if(playing || !buffer)
return;
var offset = pausedAt;
sourceNode = context.createBufferSource();
sourceNode.connect(context.destination);
sourceNode.connect(volume);
volume.gain.value = 1;
sourceNode.buffer = buffer;
sourceNode.start(0, offset);
sourceNode.onended = onEnded;
sourceNode.onstatechange = onStateChange;
sourceNode.onloaded = onLoaded;
//sourceNode.loop = true;
startedAt = context.currentTime - offset;
pausedAt = 0;
playing = true;
$(document).trigger("voiceoverPlay");
if(isPaused == true)
pause();
};
function onEnded(event){
$(document).trigger("voiceoverEnded");
play();
}
function onStateChange(event){
console.log('onStateChange',event);
}
function onLoaded(event){
console.log('onLoaded',event);
}
var pause = function() {
var elapsed = context.currentTime - startedAt;
stop();
pausedAt = elapsed;
$(document).trigger("voiceoverPause");
};
var stop = function() {
if (sourceNode) {
sourceNode.disconnect();
if(playing === true)
sourceNode.stop(0);
sourceNode = null;
}
pausedAt = 0;
startedAt = 0;
playing = false;
};
var getPlaying = function() {
return playing;
};
var getCurrentTime = function() {
if(pausedAt) {
return pausedAt;
}
if(startedAt) {
return context.currentTime - startedAt;
}
return 0;
};
var setCurrentTime = function(time) {
pausedAt = time;
};
var getDuration = function() {
return buffer.duration;
};
return {
getCurrentTime: getCurrentTime,
setCurrentTime: setCurrentTime,
getDuration: getDuration,
getPlaying: getPlaying,
play: play,
pause: pause,
stop: stop
};
}
You need a touch event for each sound.
I ended up using SoundJS which is much better.

Youtube api does not load on firefox

I have the following code snippet that controls an embedded youtube player. It works great on Chrome and Safari but not on Firefox.
jsfiddle : http://jsfiddle.net/fuSSn/4/
Code from my app:
the iframe:
<div class="tubeframe" id="video-frame-155" style="">
<iframe title="YouTube video player" width="350" height="262" src="http://www.youtube.com/embed/hc5xkf9JqoE?HD=1;rel=0;showinfo=0;autohide=1" frameborder="0" allowfullscreen="" id="video-frame-155-frame"></iframe>
</div>
my javascript:
var source_tag = document.createElement("script");
var first_source_tag = document.getElementsByTagName("script")[0];
first_source_tag.parentNode.insertBefore(source_tag, first_source_tag);
// This function will be called when the API is fully loaded
function onYouTubeIframeAPIReady() {
YT_ready(true)
console.log("api loaded! yikes")
}
function getFrameID(id){
var elem = document.getElementById(id);
if (elem) {
if(/^iframe$/i.test(elem.tagName)) return id; //Frame, OK
// else: Look for frame
var elems = elem.getElementsByTagName("iframe");
if (!elems.length) return null; //No iframe found, FAILURE
for (var i=0; i<elems.length; i++) {
if (/^https?:\/\/(?:www\.)?youtube(?:-nocookie)?\.com(\/|$)/i.test(elems[i].src)) break;
}
elem = elems[i]; //The only, or the best iFrame
if (elem.id) return elem.id; //Existing ID, return it
// else: Create a new ID
do { //Keep postfixing `-frame` until the ID is unique
id += "-frame";
} while (document.getElementById(id));
elem.id = id;
return id;
}
// If no element, return null.
return null;
}
// Define YT_ready function.
var YT_ready = (function(){
var onReady_funcs = [], api_isReady = false;
return function(func, b_before){
if (func === true) {
api_isReady = true;
while(onReady_funcs.length > 0){
// Removes the first func from the array, and execute func
onReady_funcs.shift()();
}
}
else if(typeof func == "function") {
if (api_isReady) func();
else onReady_funcs[b_before?"unshift":"push"](func);
}
}
})();
var video = function ( videoid, frameid) {
var player;
var that;
var seconds;
var duration;
var stateChangeCallback;
var update_play = 0;
return {
setOnStateChangeCallback: function(callback) {
stateChangeCallback = callback;
},
getCurrentTime: function() {
return player.getCurrentTime();
},
getPlayer: function () {
return player;
},
getVideoFrameId: function () {
return "video-frame-" + videoid;
},
initVideo: function (second) {
console.log("initing")
that = this;
YT_ready(function(){
var frameID = getFrameID("video-frame-" + videoid);
console.log("creating player")
console.log(frameID)
if (frameID) { //If the frame exists
console.log("frame exists")
player = new YT.Player(frameID, {
events: {
"onStateChange": that.stateChange
}
});
console.log("Player Created!");
if (second) {
console.log(second)
setTimeout(function() { console.log("seek to"); player.seekTo(second, false); player.stopVideo()}, 1000);
}
}
});
},
stateChange: function (event) {
console.log("event.data = ", event.data);
switch(event.data) {
case YT.PlayerState.PLAYING:
{
if (stateChangeCallback)
stateChangeCallback("play", player.getCurrentTime(), player.getDuration());
onsole.log("play");
}
break;
case YT.PlayerState.PAUSED:
case YT.PlayerState.CUED:
case YT.PlayerState.ENDED:
{
if (stateChangeCallback)
stateChangeCallback("pause", player.getCurrentTime(), player.getDuration());
console.log("pause");
}
break;
}
},
pauseVideo: function () {
player.stopVideo();
console.log('player.stopVid()');
},
seekTo: function(second) {
player.seekTo(second, false);
}
};
};
function onStateChange(vid, action, second, total) {
if (Videos[vid]) {
console.log( (second / total) * 100);
}
};
$(document).ready(function () {
var Videos = {};
logger.info("heyyy")
var videoId=155;
//if (videoId) {
Videos[videoId] = video(videoId, 155);
console.log(Videos[155])
Videos[155].initVideo();
Videos[155].setOnStateChangeCallback(function(action, second, total) {
onStateChange(155, action, second, total);
});
//}
Videos[155].seekTo(1000, false);
onStateChange(155, "start", 0, 0);
});
I know that the required script tags are being added, I can test that from console. I also know that onYouTubePlayerAPIReady() is actually called. But I still receive errors like
TypeError: player.stopVideo is not a function
When I run the three lines that adds the source tag again from the web console on firefox, the api seems to load and everything starts working again.
I have been struggling with this for days and I really need help figuring out what might be wrong. If it helps my application is developed in ruby on rails but I don't think this is relevant information.
Thanks
There is no problem with the above code. My video was loaded in a bootstrap modal. Modal's hide property would make it invisible to firefox and firefox would not load the api at all. So I removed the modal hide class and instead of display:none I used item.css("visibility", "visible"); and item.css("visibility", "hidden"); which made firefox load the api.

Categories