Cannot read property of undefined(reading 'getusermedia' ) in AWS EC2 - javascript

I have deployed a django App into AWS EC2 in order to recording audio from a speaker.
Here is my code for recording audio stream;
let log = console.log.bind(console),
id = val => document.getElementById(val),
ul = id('ul'),
gUMbtn = id('gUMbtn'),
start = id('start'),
stop = id('stop'),
stream,
recorder,
counter=1,
chunks,
media;
gUMbtn.onclick = e => {
let mv = id('mediaVideo'),
mediaOptions = {
audio: {
tag: 'audio',
type: 'audio/wav',
ext: '.wav',
gUM: {audio: true}
}
};
media = mediaOptions.audio;
navigator.mediaDevices.getUserMedia(media.gUM).then(_stream => {
stream = _stream;
id('gUMArea').style.display = 'none';
id('btns').style.display = 'inherit';
start.removeAttribute('disabled');
recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
chunks.push(e.data);
if(recorder.state == 'inactive'){
makeLink();
}
};
log('got media successfully');
}).catch(log);
}
start.onclick = e => {
start.disabled = true;
stop.removeAttribute('disabled');
chunks=[];
recorder.start();
}
stop.onclick = e => {
stop.disabled = true;
recorder.stop();
start.removeAttribute('disabled');
}
function makeLink(){
let blob = new Blob(chunks, {type: media.type })
, url = URL.createObjectURL(blob)
, li = document.createElement('li')
, mt = document.createElement(media.tag)
, hf = document.createElement('a')
;
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.innerHTML = "";
ul.appendChild(li);
}
When I run that code in the server, it said "Uncaught TypeError: Cannot read properties of undefined (reading 'getUserMedia')"
If someone has experienced this before, please give me a hand!

When I hosted the app on a https, it did run well.
Actually the mediaDevice is undefined at http.

Related

Adding real time Voice filter with web audio api

I am trying to implement real time voice changer, I have tried some methods but I could not get any results.
I have tried to implement my codes according to following tutorial link
According to the link above, instead of using bufferSource, I created the source from the stream with createMediaStreamSource and sent it to the demonBeastTransform method
here is my final codes:
const video = document.querySelector("video");
const range = document.querySelector("#gain");
navigator.mediaDevices
.getUserMedia({
audio: true,
video: true
})
.then((stream) => {
video.srcObject = stream;
video.onloadedmetadata = (e) => {
video.play();
video.muted = true;
};
// Create a MediaStreamAudioSourceNode
// Feed the HTMLMediaElement into it
const audioCtx = new AudioContext();
const source = audioCtx.createMediaStreamSource(stream);
demonBeastTransform(source, audioCtx)
})
.catch((err) => {
console.error(`The following error occured: ${err}`);
});
async function demonBeastTransform(source, ctx, distortionAmount = 100) {
/* let ctx = new OfflineAudioContext(audioBuffer.numberOfChannels, audioBuffer.length, audioBuffer.sampleRate); */
// Source
/* let source = ctx.createBufferSource();
source.buffer = audioBuffer; */
// Reverb
let convolver = ctx.createConvolver();
convolver.buffer = await ctx.decodeAudioData(await (await fetch("https://voicechanger.io/audio/impulse-responses/voxengo/Large Wide Echo Hall.wav")).arrayBuffer());
// Fire
let fire = ctx.createBufferSource();
fire.buffer = await ctx.decodeAudioData(await (await fetch("https://voicechanger.io/audio/backgrounds/brush_fire-Stephan_Schutze-55390065.mp3")).arrayBuffer());
fire.loop = true;
// Compressor
let compressor = ctx.createDynamicsCompressor();
compressor.threshold.value = -50;
compressor.ratio.value = 16;
// Wobble
let oscillator = ctx.createOscillator();
oscillator.frequency.value = 50;
oscillator.type = 'sawtooth';
// ---
let oscillatorGain = ctx.createGain();
oscillatorGain.gain.value = 0.004;
// ---
let delay = ctx.createDelay();
delay.delayTime.value = 0.01;
// ---
let fireGain = ctx.createGain();
fireGain.gain.value = 0.2;
// ---
let convolverGain = ctx.createGain();
convolverGain.gain.value = 2;
// Filter
let filter = ctx.createBiquadFilter();
filter.type = "highshelf";
filter.frequency.value = 1000;
filter.gain.value = 10;
// Create graph
oscillator.connect(oscillatorGain);
oscillatorGain.connect(delay.delayTime);
// ---
source.connect(delay)
delay.connect(convolver);
//waveShaper.connect(convolver);
fire.connect(fireGain);
convolver.connect(convolverGain);
convolverGain.connect(filter);
filter.connect(compressor);
fireGain.connect(ctx.destination);
compressor.connect(ctx.destination);
let filter2 = ctx.createBiquadFilter();
filter2.type = "lowpass";
filter2.frequency.value = 2000;
let noConvGain = ctx.createGain();
noConvGain.gain.value = 0.9;
delay.connect(filter2);
filter2.connect(filter);
filter.connect(noConvGain);
noConvGain.connect(compressor);
// Render
oscillator.start(0);
source.start(0);
fire.start(0);
/* let outputAudioBuffer = await ctx.startRendering();
return outputAudioBuffer; */
}
<h1>Web Audio API examples: MediaStreamAudioSourceNode</h1>
<video controls></video>
<br />
I took the liberty of tinkering a bit with your script and made the following modifications:
Stream
The biggest change is how the stream is handled. Now the demonBeastTransform passes the stream object in to the function as its only argument.
The reason for this is that we need both the audio and video tracks from the stream. And the idea behind it is to split the audio and video tracks, modify the audio track and then combine them back together into a single stream that is passed as the srcObject of the video.
CORS
The first issue I ran into is that the fetch requests weren't working because of a Same Origin Policy error. However, I was able to download the files and fetch them from my local server, so I'd recommend that you do the same.
Add a try / catch block around any awaited requests to handle them better in case of an error.
const video = document.querySelector("video");
const range = document.querySelector("#gain");
navigator.mediaDevices
.getUserMedia({
audio: true,
video: true
})
.then(demonBeastTransform)
.then((stream) => {
video.onloadedmetadata = () => video.play()
video.srcObject = stream;
})
.catch((err) => {
console.error(`The following error occured: ${err}`);
});
async function demonBeastTransform(stream) {
const audioCtx = new AudioContext();
const source = audioCtx.createMediaStreamSource(stream);
const streamDestination = audioCtx.createMediaStreamDestination();
// Get the video tracks and add them to the stream destination.
const videoTracks = stream.getVideoTracks();
for (const videoTrack of videoTracks) {
streamDestination.stream.addTrack(videoTrack);
}
// Reverb
let convolver = audioCtx.createConvolver();
try {
const convolerResponse = await fetch("Large Wide Echo Hall.wav");
const convolverBuffer = await convolerResponse.arrayBuffer();
convolver.buffer = await audioCtx.decodeAudioData(convolverBuffer);
} catch (error) {
return Promise.reject(error);
}
// Fire
let fire = audioCtx.createBufferSource();
try {
const fireResponse = await fetch("brush_fire-Stephan_Schutze-55390065.mp3");
const fireBuffer = await fireResponse.arrayBuffer();
fire.buffer = await audioCtx.decodeAudioData(fireBuffer);
fire.loop = true;
} catch (error) {
return Promise.reject(error);
}
// Compressor
let compressor = audioCtx.createDynamicsCompressor();
compressor.threshold.value = -50;
compressor.ratio.value = 16;
// Wobble
let oscillator = audioCtx.createOscillator();
oscillator.frequency.value = 50;
oscillator.type = 'sawtooth';
// ---
let oscillatorGain = audioCtx.createGain();
oscillatorGain.gain.value = 0.004;
// ---
let delay = audioCtx.createDelay();
delay.delayTime.value = 0.01;
// ---
let fireGain = audioCtx.createGain();
fireGain.gain.value = 0.2;
// ---
let convolverGain = audioCtx.createGain();
convolverGain.gain.value = 2;
// Filter
let filter = audioCtx.createBiquadFilter();
filter.type = "highshelf";
filter.frequency.value = 1000;
filter.gain.value = 10;
// Create graph
oscillator.connect(oscillatorGain);
oscillatorGain.connect(delay.delayTime);
// ---
source.connect(delay)
delay.connect(convolver);
//waveShaper.connect(convolver);
fire.connect(fireGain);
convolver.connect(convolverGain);
convolverGain.connect(filter);
filter.connect(compressor);
// Instead of audioCtx.destination we pass the audio into the new stream.
fireGain.connect(streamDestination);
compressor.connect(streamDestination);
let filter2 = audioCtx.createBiquadFilter();
filter2.type = "lowpass";
filter2.frequency.value = 2000;
let noConvGain = audioCtx.createGain();
noConvGain.gain.value = 0.9;
delay.connect(filter2);
filter2.connect(filter);
filter.connect(noConvGain);
noConvGain.connect(compressor);
// Render
oscillator.start(0);
fire.start(0);
return streamDestination.stream;
}

Detect silence in audio recording

There are similar questions for Java and iOS, but I'm wondering about detecting silence in javascript for audio recordings via getUserMedia(). So given:
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
});
});
I'm wondering if there is anything that can be checked on the Blob, URL, or Audio objects in the stop event for an absence of audio. In the case of a bad microphone or a virtual device selected - anything along those lines. I was previously checking the blob's size, but silent audio still has a filesize. I can do this on the backend via ffmpeg, but hoping there is a way in pure JS to simplify.
With this solution inspired by Visualizations with Web Audio API, you can set minimal required decibels and detect if anything was recorded.
const MIN_DECIBELS = -45;
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.minDecibels = MIN_DECIBELS;
audioStreamSource.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const domainData = new Uint8Array(bufferLength);
let soundDetected = false;
const detectSound = () => {
if (soundDetected) {
return
}
analyser.getByteFrequencyData(domainData);
for (let i = 0; i < bufferLength; i++) {
const value = domainData[i];
if (domainData[i] > 0) {
soundDetected = true
}
}
window.requestAnimationFrame(detectSound);
};
window.requestAnimationFrame(detectSound);
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks);
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
console.log({ soundDetected });
});
});
This code can run a function for every dialog it detects. it runs in a loop until the user stops it:
VOICE_MIN_DECIBELS = -35;
DELAY_BETWEEN_DIALOGS = 400;
DIALOG_MAX_LENGTH = 60*1000;
MEDIA_RECORDER = null;
IS_RECORDING = false;
//startRecording:
function startRecording(){
IS_RECORDING = true;
record();
}
//stopRecording:
function stopRecording(){
IS_RECORDING = false;
if(MEDIA_RECORDER !== null)
MEDIA_RECORDER.stop();
}
//record:
function record(){
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
//start recording:
MEDIA_RECORDER = new MediaRecorder(stream);
MEDIA_RECORDER.start();
//save audio chunks:
const audioChunks = [];
MEDIA_RECORDER.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
//analisys:
const audioContext = new AudioContext();
const audioStreamSource = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.minDecibels = VOICE_MIN_DECIBELS;
audioStreamSource.connect(analyser);
const bufferLength = analyser.frequencyBinCount;
const domainData = new Uint8Array(bufferLength);
//loop:
const time = new Date();
let startTime,
lastDetectedTime = time.getTime();
let anySoundDetected = false;
const detectSound = () => {
//recording stoped by user:
if(IS_RECORDING)
return;
time = new Date();
currentTime = time.getTime();
//time out:
if(currentTime > startTime + DIALOG_MAX_LENGTH){
MEDIA_RECORDER.stop();
return;
}
//a dialog detected:
if( anySoundDetected === true &&
currentTime > lastDetectedTime + DELAY_BETWEEN_DIALOGS
){
MEDIA_RECORDER.stop();
return;
}
//check for detection:
analyser.getByteFrequencyData(domainData);
for(let i = 0; i < bufferLength; i++)
if(domainData[i] > 0){
anySoundDetected = true;
time = new Date();
lastDetectedTime = time.getTime();
}
//continue the loop:
window.requestAnimationFrame(detectSound);
};
window.requestAnimationFrame(detectSound);
//stop event:
MEDIA_RECORDER.addEventListener('stop', () => {
//stop all the tracks:
stream.getTracks().forEach(track => track.stop());
if(!anySoundDetected) return;
//send to server:
const audioBlob = new Blob(audioChunks, {'type': 'audio/mp3'});
doWhateverWithAudio(audioBlob);
//start recording again:
record();
});
});
}
//doWhateverWithAudio:
function doWhateverWithAudio(audioBlob){
//.... send to server, downlod, etc.
}

How to define document in javascript blob?

I want to create an audio object in the blob but i couldn't.
const blobCode = () => {
return `
var sound = document.createElement('audio');
sound.id = 'audio-player';
sound.controls = 'controls';
sound.src = 'media/Blue Browne.mp3';
sound.type = 'audio/mpeg';
document.body.appendChild(sound);
class WhiteNoiseProcessor extends AudioWorkletProcessor {
process (inputs, outputs, parameters) {
const output = outputs[0]
output.forEach(channel => {
for (let i = 0; i < channel.length; i++) {
channel[i] = Math.random() * 2 - 1
}
})
return true
}
}
registerProcessor('white-noise-processor', WhiteNoiseProcessor);`
}
const audioContext = new OfflineAudioContext(1, 128, 300000);
var blob_url = new Blob([blobCode()], {
type: "text/javascript"
});
var blob_url_create = URL.createObjectURL(blob_url);
await audioContext.audioWorklet.addModule(blob_url_create).then(async () => {
var wa = new AudioWorkletNode(audioContext, "white-noise-processor");
});
If i set the Blob type text/javascript get the error Uncaught ReferenceError: document is not defined
var blob_url = new Blob([blobCode()], {type: "text/javascript"});
If i set the Blob type text/html get the error Uncaught DOMException: The user aborted a request
var blob_url = new Blob([blobCode()], {type: "text/html"});
How do i resolve this error?

Save canvas data as mp4 - Javascript

I have an animated canvas, I want to convert that into mp4. I am using MediaRecorder to capture the screen and then converting that Blob. I learned that MediaRecorder does not allow recording in mp4, so I am forced to get the canvas in webm. Here is what I have tried:
<canvas id="canvas"></canvas>
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res(url);
var xhr = new XMLHttpRequest;
xhr.responseType = 'blob';
xhr.onload = function() {
var recoveredBlob = xhr.response;
var reader = new FileReader;
reader.onload = function() {
var blobAsDataUrl = reader.result;
document.getElementById("my-video").setAttribute("src", blobAsDataUrl);
};
reader.readAsDataURL(recoveredBlob);
};
xhr.open('GET', url);
xhr.send();
}
});
Any solution is highly appreciated.
Quick demo of transcoding using ffmpeg.wasm:
const { createFFmpeg } = FFmpeg;
const ffmpeg = createFFmpeg({
log: true
});
const transcode = async (webcamData) => {
const message = document.getElementById('message');
const name = 'record.webm';
await ffmpeg.load();
message.innerHTML = 'Start transcoding';
await ffmpeg.write(name, webcamData);
await ffmpeg.transcode(name, 'output.mp4');
message.innerHTML = 'Complete transcoding';
const data = ffmpeg.read('output.mp4');
const video = document.getElementById('output-video');
video.src = URL.createObjectURL(new Blob([data.buffer], { type: 'video/mp4' }));
dl.href = video.src;
dl.innerHTML = "download mp4"
}
fn().then(async ({url, blob})=>{
transcode(new Uint8Array(await (blob).arrayBuffer()));
})
function fn() {
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
// for demo, removed stop() call to capture more than one frame
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res({url, blob}); // resolve both blob and url in an object
myVideo.src = url;
// removed data url conversion for brevity
}
// for demo, draw random lines and then stop recording
var i = 0,
tid = setInterval(()=>{
if(i++ > 20) { // draw 20 lines
clearInterval(tid);
mediaRecorder.stop();
}
let canvas = document.querySelector("canvas");
let cx = canvas.getContext("2d");
cx.beginPath();
cx.strokeStyle = 'green';
cx.moveTo(Math.random()*100, Math.random()*100);
cx.lineTo(Math.random()*100, Math.random()*100);
cx.stroke();
},200)
});
}
<script src="https://unpkg.com/#ffmpeg/ffmpeg#0.8.1/dist/ffmpeg.min.js"></script>
<canvas id="canvas" style="height:100px;width:100px"></canvas>
<video id="myVideo" controls="controls"></video>
<video id="output-video" controls="controls"></video>
<a id="dl" href="" download="download.mp4"></a>
<div id="message"></div>

Browser's audio record or microphone's permission on button click rather than on page load

I have user the MediaStream Recording API for recording audio using microphone, the recording works well but in case of don't allow the browser to record a voice then click on the record button it's not showing any error,
What I want is in case of user don't allow voice and still clicks on record button browser again asp for audio recording permission.
I have tried several options with the following code but unfortunately, it does not work, Can anyone help me out with this issue?
The same question or similar question asked on reprompt for permissions with getUserMedia() after initial denial but still, there is no resolution for that issue, hope for the best appropriate answer which resolve the reprompt permission popup once initialized the page.
<body>
<div class="wrapper">
<section class="main-controls">
<canvas class="visualizer" height="60px" style="display:none"></canvas>
<div id="buttons" style="margin-bottom:20px;">
<button class="record">Record</button>
<button class="stop">Stop</button>
</div>
</section>
<section class="sound-clips"></section>
</div>
<script>
const record = document.querySelector('.record');
const stop = document.querySelector('.stop');
const soundClips = document.querySelector('.sound-clips');
const mainSection = document.querySelector('.main-controls');
stop.disabled = true;
let audioCtx;
if (navigator.mediaDevices.getUserMedia) {
const constraints = {
audio: true
};
let chunks = [];
let onSuccess = function(stream) {
const mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
//ask for browser's audio record or microphone permission here
if (navigator.mediaDevices.getUserMedia) {
mediaRecorder.start();
console.log("recorder started");
record.style.background = "red";
stop.disabled = false;
record.disabled = true;
}
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
stop.disabled = true;
record.disabled = false;
}
mediaRecorder.onstop = function(e) {
const clipContainer = document.createElement('article');
const clipLabel = document.createElement('p');
const audio = document.createElement('audio');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
soundClips.appendChild(clipContainer);
audio.controls = true;
const blob = new Blob(chunks, {
'type': 'audio/ogg; codecs=opus'
});
chunks = [];
const audioURL = window.URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
clipLabel.onclick = function() {
const existingName = clipLabel.textContent;
const newClipName = prompt('Enter a new name for your sound clip?');
if (newClipName === null) {
clipLabel.textContent = existingName;
} else {
clipLabel.textContent = newClipName;
}
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
}
let onError = function(err) {
console.log('The following error occured: ' + err);
}
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
console.log('getUserMedia not supported on your browser!');
}
function visualize(stream) {
if (!audioCtx) {
audioCtx = new AudioContext();
}
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
source.connect(analyser);
}
</script>
</body>

Categories