I am having trouble understanding an assignment that my lecturer gave me. He supplies us with a program that is supposed to record from the computer microphone and tells us, to build a volume control for it that controls the db for the loudspeaker. It just confuses me, because there is, as far as I can tell, no loudspeaker involved in this program.
<html>
<head>
<title>Audio Renderer -chrome</title>
<style>
</style>
</head>
<body>
<h1>HTML5 webmic-Renderer </h1>
<h4>Chrome</h4>
<pre id="preLog">Access to micro</pre>
<p>
<input type="button" id="buttonStart" value="Start" onclick="start()" />
<input type="button" id="buttonStop" value="Stop" onclick="stop()" />
</p>
<script>
var audioContext = new webkitAudioContext();
var realAudioInput = null;
var preLog ;
var zeroGain;
var channel = 2;
var bufferSize =1024;
function log(text){
preLog = document.getElementById('preLog');
if (preLog) preLog.textContent += ('\n' + text);
else alert(text);
}
function start() {
log('Get user media..');
if (navigator.webkitGetUserMedia) navigator.webkitGetUserMedia({audio:true}, gotStream, noStream);
else log('getUserMedia() not available from your Web browser!');
}
function noStream() {
log('Access to Micro was denied!');
}
function gotStream(stream) {
log('Access to Micro was started');
// Create an AudioNode from the stream.
realAudioInput = audioContext.createMediaStreamSource(stream);
// Create an GainNode .
zeroGain = audioContext.createGain();
zeroGain.gain.value = 1.0;
// create an audio node with 2 input and 1 output channels, and 1024 byte buffer size per audio frame
jsNode = audioContext.createScriptProcessor(bufferSize, channel, channel-1);
jsNode.onaudioprocess = audioProcess;
// Signal Graph
realAudioInput.connect( jsNode );
// zeroGain.connect(??);
jsNode.connect( audioContext.destination );
}
function stop() {
log('Access to Micro stopped');
realAudioInput.disconnect(0);
}
// this function is called every audio frame
function audioProcess(event) {
var sampleIn_l = event.inputBuffer.getChannelData(channel-2); // Stereo: 0 = left channel, 1 = right channel
var sampleIn_r = event.inputBuffer.getChannelData(channel-1);
var sampleOut = event.outputBuffer.getChannelData(channel-2);
// loop through every sample and add sample values to out buffer
for(i = 0; i < event.inputBuffer.length; i++) {
var sample_l = sampleIn_l[i] ;
var sample_r = sampleIn_r[i] ;
sampleOut[i] = ( sample_l );
}
}
</script>
</body>
</html>
It says in his assignment: Create for the program Audio Renderer an interactive volume control for the loudspeaker on node-level: zeroGain.gain.value = 1.0;
I just don't understand what he wants from us. I would be so glad if anyone could help :)
Thank you very much for reading!
Related
I have a large datasets which i am retrieving via tableau API call. im using async await to call the data and storing this as txt extension.
How i am retrieving the data is by using this script below, script is working as expected and the logic i came out with is
Retrieve data records via api call
Append data into div element
Once data is fully loaded to div, use file streamer to save records as txt file
script used -
<!DOCTYPE html>
<html>
<head>
<title>getData() Basic Example</title>
<script type="text/javascript" src="https://public.tableau.com/javascripts/api/tableau-2.min.js"></script>
<script type="text/javascript">
var viz, sheet, table;
function initViz() {
var containerDiv = document.getElementById("vizContainer"),
url = "http://public.tableau.com/views/RegionalSampleWorkbook/Storms",
options = {
hideTabs: true,
hideToolbar: true,
onFirstInteractive: function () {
document.getElementById('getData').disabled = false; // Enable our button
}
};
viz = new tableau.Viz(containerDiv, url, options);
}
async function savefile(data){
const newHandle = await window.showSaveFilePicker();
const writableStream = await newHandle.createWritable();
await writableStream.write(data)
await writableStream.close();
}
function getUnderlyingData(){
sheet = viz.getWorkbook().getActiveSheet().getWorksheets().get("Storm Map Sheet");
sheet.getUnderlyingDataAsync().then(function(dataTable){
let _tmpdata = ''
for(let i = 0; i < dataTable.getData().length;i++){
for(let a = 0; a < dataTable.getColumns().length;a++){
_tmpdata = dataTable.getData()[i][a].formattedValue;
document.getElementById('storage').innerHTML += _tmpdata
}
}
let whatisthis = document.getElementById('storage').innerHTML
savefile(whatisthis)
});
}
</script>
</head>
<body onload="initViz();">
<div class="page-header">
<button id="getData" onclick="getUnderlyingData()" class="btn" disabled>Get Data</button>
<div id="storage"></div>
</div>
<div id="vizContainer" style="width:600px; height:600px;"></div>
<div id="dataTarget"></div>
</body>
</html>
This is working as expected but what worries me is when i have super large volume of data, the alternative logic i have in mind which i tried to implement is as follow
create streamer inside getunderlyingdata function
append data directly in for loop
New logic i tried, ets say saveFile() does not exist and writableStream are directy implemented in getUnderlyingData, this is script i tried
async function getUnderlyingData(){
// save file to location
const newHandle = await window.showSaveFilePicker();
const writableStream = await newHandle.createWritable();
sheet = viz.getWorkbook().getActiveSheet().getWorksheets().get("Storm Map Sheet");
sheet.getUnderlyingDataAsync().then(async function(dataTable){
let _tmpdata = ''
for(let i = 0; i < dataTable.getData().length;i++){
for(let a = 0; a < dataTable.getColumns().length;a++){
_tmpdata = dataTable.getData()[i][a].formattedValue;
// Write data to stream
await writableStream.write(_tmpdata)
}
}
});
// Close Sream
await writableStream.close();
}
It was not able to capture the data is because the page get reloaded as soon as i tried to save to a location . Is it possible to disable the page reload when a location is selected to save the file ?
I am trying to create a dashboard for our office, and I want to show the occupancy of the meeting rooms (we, unfortunately, do not have a proper meeting room system). I have some sensors which can spit out a CSV of whether the motion is detected or not and I have a plan of the office with the green or red overlays for each room.
Where I am stuck is in how to toggle the visibility of green or blue in the dashboard based on the CSV values. I.e. if the last value is "MotionDetected", show red, if the value is not "MotionDetected" then show green.
I'm very new to any coding, so I only have basic knowledge. Any help would be much appreciated!
Thanks in advance!
The expected output is if the last value in the CSV is "MotionDetected", show red, if the value is not "MotionDetected" then show green.
Get last value of csv: Get last part of CSV string
JavaScript:
var myCSV = "red,yellow,green,blue";
var csv_Data = myCSV.split(',');
var last = csv_Data[csv_Data.length - 1];
// or
//var last = myCSV.substr(myCSV.lastIndexOf(',') + 1);
// get the thing that changes color.
var ele = document.getElementById("theThingThatChangesColor");
if(last === "MotionDetected") {
ele.style.backgroundColor = "red";
} else {
ele.style.backgroundColor = "green";
}
If you have the classes you need inside a css file, you can change the class instead of the style: JS Change class of element
Based on wazz's generous help, and with some additional input from Alan Stines on Youtube for extracting the CSV string, I used the following code to make the above work!
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<link href="CSV Parsing css.css" type="text/css" rel="stylesheet">
<title>CSV Parsing Test</title>
</head>
<body>
<div id="output"></div>
<img id="Background" src="Brochure QR Code.png" alt="Broken">
<img id="TestImage" src="search.png" alt="Broken">
<script>
function init(){
var myCSV = new XMLHttpRequest();
myCSV.onreadystatechange = function(){
if(this.readyState == 4 && this.status == 200) {
console.log(this.responseText);
var csv_Data = this.responseText.split(',');
console.log(csv_Data);
var last = csv_Data[csv_Data.length - 3];
console.log(last);
var ele = document.getElementById("TestImage");
if(last === "DetectedMovement"){
ele.style.backgroundColor = "red";
} else {ele.style.backgroundColor = "green";
}
}
};
myCSV.open("GET", "MotionLog_PIRTempHum001Testing R00.01.csv", true);
myCSV.send()
}
window.onload = init;
</script>
</body>
</html>
I hope sharing this helps!
I'm trying to implement a simple speech to text and then text to speech as a response to the initial speech to text input!
I'm using code from another open source site for both the speech synthesis and text 2 speech, so I don't fully understand the code.
Basically, what is happening is that when I am finished speaking the input, I press the pause-record-btn which is supposed to trigger myFunction(), yet I have to press the button twice!
(I am VERY new to js but do understand a bit of front end development (css, html) so any help will be very appreciated )
Code Pen
/*-----------------------------
Voice Recognition Script
------------------------------*/
try {
var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
var recognition = new SpeechRecognition();
}
catch(e) {
console.error(e);
$('.no-browser-support').show();
$('.app').hide();
}
var noteTextarea = $('#note-textarea');
var instructions = $('#recording-instructions');
var notesList = $('ul#notes');
var noteContent = '';
// Get all notes from previous sessions and display them.
var notes = getAllNotes();
renderNotes(notes);
/*-----------------------------
Voice Recognition
------------------------------*/
// If false, the recording will stop after a few seconds of silence.
// When true, the silence period is longer (about 15 seconds),
// allowing us to keep recording even when the user pauses.
recognition.continuous = true;
// This block is called every time the Speech APi captures a line.
recognition.onresult = function(event) {
// event is a SpeechRecognitionEvent object.
// It holds all the lines we have captured so far.
// We only need the current one.
var current = event.resultIndex;
// Get a transcript of what was said.
var transcript = event.results[current][0].transcript;
// Add the current transcript to the contents of our Note.
// There is a weird bug on mobile, where everything is repeated twice.
// There is no official solution so far so we have to handle an edge case.
var mobileRepeatBug = (current == 1 && transcript == event.results[0][0].transcript);
if(!mobileRepeatBug) {
noteContent += transcript;
noteTextarea.val(noteContent);
}
};
recognition.onstart = function() {
instructions.text('Voice recognition activated. Try speaking into the microphone.');
}
recognition.onspeechend = function() {
instructions.text('You were quiet for a while so voice recognition turned itself off.');
}
recognition.onerror = function(event) {
if(event.error == 'no-speech') {
instructions.text('No speech was detected. Try again.');
};
}
/*-----------------------------
App buttons and input
------------------------------*/
$('#start-record-btn').on('click', function(e) {
if (noteContent.length) {
noteContent += ' ';
}
recognition.start();
});
$('#pause-record-btn').on('click', function(e) {
recognition.stop();
instructions.text('Voice recognition paused.');
});
// Sync the text inside the text area with the noteContent variable.
noteTextarea.on('input', function() {
noteContent = $(this).val();
})
$('#save-note-btn').on('click', function(e) {
recognition.stop();
if(!noteContent.length) {
instructions.text('Could not save empty note. Please add a message to your note.');
}
else {
// Save note to localStorage.
// The key is the dateTime with seconds, the value is the content of the note.
saveNote(new Date().toLocaleString(), noteContent);
// Reset variables and update UI.
noteContent = '';
renderNotes(getAllNotes());
noteTextarea.val('');
instructions.text('Note saved successfully.');
}
})
notesList.on('click', function(e) {
e.preventDefault();
var target = $(e.target);
// Listen to the selected note.
if(target.hasClass('listen-note')) {
var content = target.closest('.note').find('.content').text();
readOutLoud(content);
}
// Delete note.
if(target.hasClass('delete-note')) {
var dateTime = target.siblings('.date').text();
deleteNote(dateTime);
target.closest('.note').remove();
}
});
/*-----------------------------
Speech Synthesis
------------------------------*/
function readOutLoud(message) {
var speech = new SpeechSynthesisUtterance();
// Set the text and voice attributes.
speech.text = message;
speech.volume = 1;
speech.rate = 1;
speech.pitch = 1;
window.speechSynthesis.speak(speech);
}
/*-----------------------------
Helper Functions
------------------------------*/
function renderNotes(notes) {
var html = '';
if(notes.length) {
notes.forEach(function(note) {
html+= `<li class="note">
<p class="header">
<span class="date">${note.date}</span>
Listen to Note
Delete
</p>
<p class="content">${note.content}</p>
</li>`;
});
}
else {
html = '<li><p class="content">You don\'t have any notes yet.</p></li>';
}
notesList.html(html);
}
function saveNote(dateTime, content) {
localStorage.setItem('note-' + dateTime, content);
}
function getAllNotes() {
var notes = [];
var key;
for (var i = 0; i < localStorage.length; i++) {
key = localStorage.key(i);
if(key.substring(0,5) == 'note-') {
notes.push({
date: key.replace('note-',''),
content: localStorage.getItem(localStorage.key(i))
});
}
}
return notes;
}
function deleteNote(dateTime) {
localStorage.removeItem('note-' + dateTime);
}
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Voice Controlled Notes App</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<script src='https://code.responsivevoice.org/responsivevoice.js'></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/shoelace-css/1.0.0-beta16/shoelace.css">
<link rel="stylesheet" href="styles.css">
</head>
<body>
<div class="container">
<h1>Voice Controlled Notes App</h1>
<p class="page-description">A tiny app that allows you to take notes by recording your voice</p>
<h3 class="no-browser-support">Sorry, Your Browser Doesn't Support the Web Speech API. Try Opening This Demo In Google Chrome.</h3>
<div class="app">
<h3>Add New Note</h3>
<div class="input-single">
<textarea id="note-textarea" placeholder="Input." rows="6"></textarea>
</div>
<button id="start-record-btn" title="Start Recording">Start Recognition</button>
<button id="pause-record-btn" onClick="myFunction()" title="Pause Recording">Stop listening</button>
<p id="recording-instructions">Press the <strong>Start Recognition</strong> button and allow access.</p>
</div>
</div>
<p id="demo"></p>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
<script src="script.js"></script>
<script>
function myFunction() {
var str = document.getElementById("note-textarea").value;
if (str.includes("hello how are you")){
document.getElementById("demo").innerHTML = "hi anthony";
responsiveVoice.speak("I am good thanks, and you?");
}
}
</script>
</body>
</html>
I'd like to record some sounds and process their raw data with iOS and Javascript. So I'm planning to use Web Audio techniques, but ScriptProcessorNode or getChannelData won't work well.
I put my sample code at the end of this post. It is expected to display in the textarea the raw audio data of the movie file you choose. Although it works correctly with Chrome or Firefox in Windows, it shows only zeros when I use iOS (both Safari and Chrome).
If I use OscillatorNode instead of MediaElementSourceNode, it works well. Therefore, I'm suspecting using MediaElementSourceNode and ScriptProcessorNode at the same time but I'm at a loss how to deal this problem.
Sorry for my poor English.
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>sample</title>
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js" />
<script type="text/javascript">
//<![CDATA[
window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext;
var rawdata;
var bufsize = 4096;
function onaudproc(e) {
var input = e.inputBuffer.getChannelData(0);
var output = e.outputBuffer.getChannelData(0);
var buf = new Float32Array(bufsize);
for(var i=0; i<bufsize; i++) buf[i] = output[i] = input[i];
Array.prototype.push.apply(rawdata, buf);
}
function filechanges()
{
var url = window.URL.createObjectURL($('#camfile')[0].files[0]);
var fn = function(){
$('#video').on('ended pause',function(){
$('#console').append(rawdata.toString());
$('#video').off();
});
$('#writebtn').click(function(){
audctx = new AudioContext();
var spnode = audctx.createScriptProcessor(bufsize, 1, 1);
var src = audctx.createMediaElementSource($('#video')[0]);
src.connect(spnode);
spnode.onaudioprocess = onaudproc;
spnode.connect(audctx.destination);
rawdata = [];
$('#video')[0].play();
});
};
$('#video').on('loadstart',fn);
$('#video').attr('src',url);
}
//]]>
</script></head>
<body>
<div class="buttonset">
<input type="file" id="camfile" onchange="filechanges()" />
<input type="button" id="writebtn" value="output" />
</div>
<div><video src="#none" id="video" controls="controls" /></div>
<div><textarea id="console" rows="20" cols="80" /></div>
</body></html>
This is probably a CORS access issue. You need permissions (with recent versions of Chrome) for createMediaElementSource to access the data.
This bug happens only when easyRTC is run inside an angularJS application.
Say we have 3 participants, A, B and C. When each one of them connects at random times, they see each other's video stream (3 concurrent streams). Say A refreshes the page. This is the sequence of events, simplified
easyrtc.setRoomOccupantListener is fired
The occupants list is retrieved
some other easyrtc events are fired
for each occupant's stream a stream URL is generated calling URL.createObjectURL
the stream URL is added to a video element and the video stream is shown (via an ng-repeat method)
BUT
Although everything works as planned, A only sees the video from B. C has a valid stream URL but the video is black.
This happens only when A refreshes the page and retrieves all the other streams in bulk.
the video element lives inside a directive. I have tried to create the video element with jQuery, inside the directive, but no luck
The directive has an isolate scope
I use $sce.trustAsResourceURL for the stream URL
The same exact code works fine in an angular-free js sample file
I have tried to save the streams to a window.myStreamArray and then in the console re-generate the stream URLs and add them to the video elements with some jQuery. This works fine for the 1st stream. The 2nd stream, although I get a valid url, nothing is shown
The video element has autoplay
The code that runs standalone and works. the same code runs as an angular service. If we open 3 browser tabs and B taps 1.connect, C taps 2.connect and A taps 1.connect, A will receive B and C and display the video streams.
<!DOCTYPE html>
<html>
<head>
<title>Demo</title>
<meta http-equiv='cache-control' content='no-cache'>
<meta http-equiv='expires' content='0'>
<meta http-equiv='pragma' content='no-cache'>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"/>
</head>
<body>
<h1>easy rtc demo</h1>
<button id="btnOneInit">1. connect</button>
<button id="btnOne">1. start streaming</button>
<button id="btnTwoInit">2. connect</button>
<button id="btnTwo">2. start streaming</button>
<div id="one">
<video autoplay src=""></video>
</div>
<div id="two">
<video autoplay src=""></video>
</div>
<div id="three">
<video autoplay src=""></video>
</div>
<div id="four">
<video autoplay src=""></video>
</div>
<script src="/lib/jquery-2.1.0.min.js"></script>
<script src="/lib/sugar.min.js"></script>
<script src="/lib/socket.io.js"></script>
<script src="/lib/sails.io.js"></script>
<script src="/lib/easyrtc.js"></script>
<script>
function easy() {
var _localRoom, _remoteRoom, _localStream, _remoteStreams, _available , _webRTCinitialized, _user;
var _acceptedStreamsMap = {};
var roomName = "SectorOne";
var mySocket;
function init(options) {
_webRTCinitialized = false;
_available = false;
_remoteStreams = [];
_remoteRoom = {
id: undefined,
token: undefined
};
mySocket = io.connect();
_user = {
id: options.id,
role: 'presenter'
};
return this;
}
function connect() {
easyrtc.dontAddCloseButtons();
easyrtc.enableDebug(true);
easyrtc.enableAudio(false);
easyrtc.enableVideo(false);
easyrtc.enableAudioReceive(true);
easyrtc.enableVideoReceive(true);
easyrtc.setRoomOccupantListener(occupantsChanged);
easyrtc.connect('auvious.audioVideo', function (easyrtcid, roomOwner) {
_localRoom = easyrtcid;
console.log('connected with id : ' + easyrtcid);
}, function (error) {
alert(error);
});
easyrtc.setDisconnectListener(roomDisconnected);
easyrtc.setStreamAcceptor(callAccepted);
easyrtc.setAcceptChecker(acceptCall);
easyrtc.setOnStreamClosed(streamRemoved);
}
function startStreaming(audio, video, screen) {
easyrtc.enableAudio(audio);
easyrtc.enableVideo(video);
easyrtc.enableAudioReceive(true);
easyrtc.enableVideoReceive(true);
easyrtc.initMediaSource(
// success callback
function () {
var stream = easyrtc.getLocalStream();
var compositeID = _user.id;
var eventData = {userId: _user.id, role: _user.role, easyrtcid: _localRoom, id: stream.id, video: video, audio: !video, screen: screen};
_acceptedStreamsMap[_localRoom] = eventData;
var streamUrl = easyrtc.getLocalStreamAsUrl();
var options = {video: video, audio: !video, screen: screen, local: true, streamUrl: streamUrl};
console.log('about to show LOCAL stream...');
showStream(compositeID, stream, options);
easyrtc.setRoomApiField(roomName, _localRoom, JSON.stringify(eventData));
},
function (err) {
alert(err);
}
);
}
function occupantsChanged(roomName, occupants, selfInfo) {
_remoteStreams = [];
easyrtc.setRoomOccupantListener(null);
for (var easyrtcid in occupants) {
var occ = occupants[easyrtcid];
if (occ && occ.apiField) {
var streamingId = occ.apiField[easyrtcid];
if (streamingId && !_acceptedStreamsMap.hasOwnProperty(streamingId)) {
var data = JSON.parse(occ.apiField[easyrtcid].fieldValue);
_remoteStreams.push(data);
}
}
}
if (_remoteStreams.length > 0)
callOthers();
}
function callAccepted(easyrtcid, stream) {
if (_acceptedStreamsMap[easyrtcid] && !_acceptedStreamsMap[easyrtcid].streaming) {
var remoteStream = _acceptedStreamsMap[easyrtcid];
var compositeID = remoteStream.userId;
var options = Object.clone(remoteStream);
options.streamUrl = URL.createObjectURL(stream);
options.local = false;
console.log('about to show remote stream...');
console.log(_acceptedStreamsMap);
_acceptedStreamsMap[easyrtcid].streaming = true;
showStream(compositeID, stream, options);
}
}
function acceptCall(easyrtcid, acceptedCB) {
acceptedCB(true);
}
function callOthers() {
function establishConnection(position) {
function callSuccess() {
}
function callFailure(errorCode, errorText) {
}
if (position >= 0) {
console.log('calling ....' + _remoteStreams[position].easyrtcid);
easyrtc.call(_remoteStreams[position].easyrtcid, callSuccess, callFailure, function (accepted, easyrtcid) {
if (position > 0) {
establishConnection(position - 1);
}
connectionCallAccepted(accepted, easyrtcid)
})
}
}
if (_remoteStreams.length > 0) {
establishConnection(_remoteStreams.length - 1);
}
}
function connectionCallAccepted(accepted, easyrtcid) {
if (accepted) {
if (_acceptedStreamsMap[easyrtcid] == undefined) {
_acceptedStreamsMap[easyrtcid] = _remoteStreams.find(function (stream) {
return stream.easyrtcid == easyrtcid;
});
}
console.log('accepted stream--->' + easyrtcid);
console.log(_acceptedStreamsMap)
}
}
function showStream(compositeID, stream, options) {
$(compositeID).find('video').attr('src', options.streamUrl);
console.log('streaming...');
}
function closeStream(stream) {
easyrtc.enableAudio(false);
easyrtc.enableVideo(false);
if (stream.local) {
delete _acceptedStreamsMap[_localRoom];
easyrtc.setRoomApiField(roomName, _localRoom, undefined);
easyrtc.closeLocalStream(stream.streamName);
}
}
function streamRemoved(easyrtcId) {
easyrtc.enableAudio(false);
easyrtc.enableVideo(false);
var stream = _acceptedStreamsMap[easyrtcId];
if (stream) {
console.log('stream removed ---->' + easyrtcId);
console.log(_acceptedStreamsMap);
var attrs = {guid: stream.id, userId: stream.userId};
delete _acceptedStreamsMap[easyrtcId];
console.log(_acceptedStreamsMap);
}
}
function roomDisconnected() {
}
function disconnect() {
easyrtc.disconnect();
}
return {
init: init,
startStreaming: startStreaming,
connect: connect,
disconnect: disconnect,
closeStream: closeStream
}
}
var rtc = new easy();
$('#btnOneInit').click(function () {
rtc.init({id: "#one"});
rtc.connect();
})
$('#btnTwoInit').click(function () {
rtc.init({id: "#two"});
rtc.connect();
})
$('#btnOne, #btnTwo').click(function () {
rtc.startStreaming(false, true, false);
})
</script>
</body>
</html>