HTML 5: AudioContext AudioBuffer - javascript

I need to understand how audio buffer works and to do it I want to make the following sequence: Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers. Auto means auto data transfer and manual I do myself via the code in processor.onaudioprocess. So I have the following code:
navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
navigator.getUserMedia(
{audio: true},
function(stream) {
audioContext = new AudioContext();
//STEP 1 - we create buffer and its node
speakers = audioContext.destination;
myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
var bufferNode = audioContext.createBufferSource();
bufferNode.buffer = myAudioBuffer;
bufferNode.connect(speakers);
bufferNode.start();
//STEP 2- we create microphone and processor
microphone = audioContext.createMediaStreamSource(stream);
var processor = (microphone.context.createScriptProcessor ||
microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
processor.onaudioprocess = function(audioProcessingEvent) {
var inputBuffer = audioProcessingEvent.inputBuffer;
var inputData = inputBuffer.getChannelData(0); // we have only one channel
var nowBuffering = myAudioBuffer.getChannelData(0);
for (var sample = 0; sample < inputBuffer.length; sample++) {
nowBuffering[sample] = inputData[sample];
}
}
microphone.connect(processor);
},
function() {
console.log("Error 003.")
});
}
However, this code doesn't work. No errors, only silence. Where is my mistake?

EDIT
So since the OP definitely wants to use a buffer. I wrote some more code which you can try out on JSFiddle. The trick part definitely was that you somehow have to pass the input from the microphone through to some "destination" to get it to process.
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
captureMicrophone();
}
// First Step - Capture microphone and process the input
function captureMicrophone() {
// process input from microphone
const processAudio = ev =>
processBuffer(ev.inputBuffer.getChannelData(CHANNEL));
// setup media stream from microphone
const microphoneStream = stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
// #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
processor.connect(mute);
};
// TODO: Handle error properly (see todo above - but probably more specific)
const userMediaError = err => console.error(err);
// Second step - Process buffer and output to speakers
const processBuffer = buffer => {
audioBuffer.getChannelData(CHANNEL).set(buffer);
// We could move this out but that would affect audio quality
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(speakers);
source.start();
}
const audioContext = new AudioContext();
const speakers = audioContext.destination;
// We currently only operate on this channel we might need to add a couple
// lines of code if this fact changes
const CHANNEL = 0;
const CHANNELS = 1;
const BUFFER_SIZE = 4096;
const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);
const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);
// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
The code I wrote up there looks quite dirty to me. But since you have a large project you can definitely structure it much more cleanly.
I've no clue what you're trying to achieve but I definitely also recommend to have a look at Recorder.js
Previous answer
The main point you're missing is that you'll get an output buffer passed into createScriptProcessor so all the createBuffer stuff you do is unnecessary. Apart from that you're on the right track.
This would be a working solution. Try it out on JSFiddle!
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia) {
captureMicrophone();
}
function captureMicrophone() {
const audioContext = new AudioContext();
const speaker = audioContext.destination;
const processor = audioContext.createScriptProcessor(4096, 1, 1);
const processAudio =
ev => {
const CHANNEL = 0;
const inputBuffer = ev.inputBuffer;
const outputBuffer = ev.outputBuffer;
const inputData = inputBuffer.getChannelData(CHANNEL);
const outputData = outputBuffer.getChannelData(CHANNEL);
// TODO: manually do something with the audio
for (let i = 0; i < inputBuffer.length; ++i) {
outputData[i] = inputData[i];
}
};
const microphoneStream =
stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
processor.connect(speaker);
};
// TODO: handle error properly
const userMediaError = err => console.error(err);
processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

Are you getting silence (i.e. your onprocess is getting called, but the buffers are empty) or nothing (i.e. your onprocess is never getting called)?
If the latter, try connecting the scriptprocessor to the context.destination. Even if you don't use the output, some implementations currently need that connection to pull data through.

Related

How do I record AND download / upload the webcam stream on server (javascript) WHILE using the webcam input for facial recognition (opencv4nodejs)?

I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated.
In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.
Below is what i tried to do, But it doesn't work for obvious reasons.
$(document).ready(function () {
run1()
})
let chunks = []
// run1() for uploading model and for facecam
async function run1() {
const MODELS = "/models";
await faceapi.loadSsdMobilenetv1Model(MODELS)
await faceapi.loadFaceLandmarkModel(MODELS)
await faceapi.loadFaceRecognitionModel(MODELS)
var _stream
//Accessing the user webcam
const videoEl = document.getElementById('inputVideo')
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(
(stream) => {
_stream = stream
recorder = new MediaRecorder(_stream);
recorder.ondataavailable = (e) => {
chunks.push(e.data);
console.log(chunks, i);
if (i == 20) makeLink(); //Trying to make Link from the blob for some i==20
};
videoEl.srcObject = stream
},
(err) => {
console.error(err)
}
)
}
// run2() main recognition code and training
async function run2() {
// wait for the results of mtcnn ,
const input = document.getElementById('inputVideo')
const mtcnnResults = await faceapi.ssdMobilenetv1(input)
// Detect All the faces in the webcam
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
// Training the algorithm with given data of the Current Student
const labeledFaceDescriptors = await Promise.all(
CurrentStudent.map(
async function (label) {
// Training the Algorithm with the current students
for (let i = 1; i <= 10; i++) {
// console.log(label);
const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
const img = await faceapi.fetchImage(imgUrl)
// detect the face with the highest score in the image and compute it's landmarks and face descriptor
const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
if (!fullFaceDescription) {
throw new Error(`no faces detected for ${label}`)
}
const faceDescriptors = [fullFaceDescription.descriptor]
return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
}
}
)
)
const maxDescriptorDistance = 0.65
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))
i++;
}
// I somehow want this to work
function makeLink() {
alert("ML")
console.log("IN MAKE LINK");
let blob = new Blob(chunks, {
type: media.type
}),
url = URL.createObjectURL(blob),
li = document.createElement('li'),
mt = document.createElement(media.tag),
hf = document.createElement('a');
mt.controls = true;
mt.src = url;
hf.href = url;
hf.download = `${counter++}${media.ext}`;
hf.innerHTML = `donwload ${hf.download}`;
li.appendChild(mt);
li.appendChild(hf);
ul.appendChild(li);
}
// onPlay(video) function
async function onPlay(videoEl) {
run2()
setTimeout(() => onPlay(videoEl), 50)
}
I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.

Struggling to playback a Float 32 Array (Web Audio API)

I'm building a simple looper, to help me come to an understanding of the Web Audio API however struggling to to get a buffer source to play back the recorded audio.
The code has been simplified as much as possible however with annotation it's still 70+ lines, ommitting the CSS and HTML, so apologies for that. A version including the CSS and HTML can be found on JSFiddle:
https://jsfiddle.net/b5w9j4yk/10/
Any help would be much appreciated. Thank you :)
// Aim of the code is to record the input from the mike to a float32 array. then prass that to a buffer which is linked to a buffer source, so the audio can be played back.
// Grab DOM Elements
const playButton = document.getElementById('play');
const recordButton = document.getElementById('record');
// If allowed access to microphone run this code
const promise = navigator.mediaDevices.getUserMedia({audio: true, video: false})
.then((stream) => {
recordButton.addEventListener('click', () => {
// when the record button is pressed clear enstanciate the record buffer
if (!recordArmed) {
recordArmed = true;
recordButton.classList.add('on');
console.log('recording armed')
recordBuffer = new Float32Array(audioCtx.sampleRate * 10);
}
else {
recordArmed = false;
recordButton.classList.remove('on');
// After the recording has stopped pass the recordBuffer the source's buffer
myArrayBuffer.copyToChannel(recordBuffer, 0);
//Looks like the buffer has been passed
console.log(myArrayBuffer.getChannelData(0));
}
});
// this should stat the playback of the source intended to be used adter the audio has been recorded, I can't get it to work in this given context
playButton.addEventListener('click', () => {
playButton.classList.add('on');
source.start();
});
//Transport variables
let recordArmed = false;
let playing = false;
// this buffer will later be assigned a Float 32 Array / I'd like to keep this intimediate buffer so the audio can be sliced and minipulated with ease later
let recordBuffer;
// Declear Context, input source and a block processor to pass the input sorce to the recordBuffer
const audioCtx = new AudioContext();
const audioIn = audioCtx.createMediaStreamSource(stream);
const processor = audioCtx.createScriptProcessor(512, 1, 1);
// Create a source and corrisponding buffer for playback and then assign link
const myArrayBuffer = audioCtx.createBuffer(1, audioCtx.sampleRate * 10, audioCtx.sampleRate);
const source = audioCtx.createBufferSource();
source.buffer = myArrayBuffer;
// Audio Routing
audioIn.connect(processor);
source.connect(audioCtx.destination);
// When recording is armed pass the samples of the block one at a time to the record buffer
processor.onaudioprocess = ((audioProcessingEvent) => {
let inputBuffer = audioProcessingEvent.inputBuffer;
let i = 0;
if (recordArmed) {
for (let channel = 0; channel < inputBuffer.numberOfChannels; channel++) {
let inputData = inputBuffer.getChannelData(channel);
let avg = 0;
inputData.forEach(sample => {
recordBuffer.set([sample], i);
i++;
});
}
}
else {
i = 0;
}
});
})

How can I play an arrayBuffer as an audio file?

I am receiving an arrayBuffer via a socket.io event and want to be able to process and play the stream as an audio file.
I am receiving the buffer like so:
retrieveAudioStream = () => {
this.socket.on('stream', (arrayBuffer) => {
console.log('arrayBuffer', arrayBuffer)
})
}
Is it possible to set the src attribute of an <audio/> element to a buffer? If not how can I play the the incoming buffer stream?
edit:
To show how I am getting my audio input and streaming it:
window.navigator.getUserMedia(constraints, this.initializeRecorder, this.handleError);
initializeRecorder = (stream) => {
const audioContext = window.AudioContext;
const context = new audioContext();
const audioInput = context.createMediaStreamSource(stream);
const bufferSize = 2048;
// create a javascript node
const recorder = context.createScriptProcessor(bufferSize, 1, 1);
// specify the processing function
recorder.onaudioprocess = this.recorderProcess;
// connect stream to our recorder
audioInput.connect(recorder);
// connect our recorder to the previous destination
recorder.connect(context.destination);
}
This is where I receive the inputBuffer event and stream via a socket.io event
recorderProcess = (e) => {
const left = e.inputBuffer.getChannelData(0);
this.socket.emit('stream', this.convertFloat32ToInt16(left))
}
EDIT 2:
Adding Raymonds suggestion:
retrieveAudioStream = () => {
const audioContext = new window.AudioContext();
this.socket.on('stream', (buffer) => {
const b = audioContext.createBuffer(1, buffer.length, audioContext.sampleRate);
b.copyToChannel(buffer, 0, 0)
const s = audioContext.createBufferSource();
s.buffer = b
})
}
Getting error: NotSupportedError: Failed to execute 'createBuffer' on 'BaseAudioContext': The number of frames provided (0) is less than or equal to the minimum bound (0).
Based on a quick read of what initializeRecorder and recorderProcess do, it looks like you're converting the float32 samples to int16 in some say and that gets sent to retrieveAudioStream in some way.
If this is correct, then the arrayBuffer is an array of int16 values. Convert them to float32 (most likely by dividing each value by 32768) and save them in a Float32Array. Then create an AudioBuffer of the same lenght and copyToChannel(float32Array, 0, 0) to write the values to the AudioBuffer. Use an AudioBufferSourceNode with this buffer to play out the audio.

Stream live audio to Node.js server

I'm working on a project and I require to send an audio stream to a Node.js server. I'm able to capture microphone sound with this function:
function micCapture(){
'use strict';
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var constraints = {
audio: true,
video: false
};
var video = document.querySelector('video');
function successCallback(stream) {
window.stream = stream; // stream available to console
if (window.URL) {
video.src = window.webkitURL.createObjectURL(stream);
} else {
video.src = stream;
}
//Send audio stream
//server.send(stream);
}
function errorCallback(error) {
console.log('navigator.getUserMedia error: ', error);
}
navigator.getUserMedia(constraints, successCallback, errorCallback);
}
As you can see, I'm able to capture audio and play it on the website.
Now I want to send that audio stream to a Node.js server, and send it back to other clients. Like a voicechat, but I don't want to use WebRTC as I need the stream in the server. How can I achieve this? Can I use socket.io-stream to do this? In the examples I saw, they recorded the audio, and sent a file, but I need "live" audio.
I have recently done live audio upload using socket.io from browser to server. I am going to answer here in case someone else needs it.
var stream;
var socket = io();
var bufferSize = 1024 * 16;
var audioContext = new AudioContext();
// createScriptProcessor is deprecated. Let me know if anyone find alternative
var processor = audioContext.createScriptProcessor(bufferSize, 1, 1);
processor.connect(audioContext.destination);
navigator.mediaDevices.getUserMedia({ video: false, audio: true }).then(handleMicStream).catch(err => {
console.log('error from getUserMedia', err);
});
handleMicStream will run when user accepts the permission to use microphone.
function handleMicStream(streamObj) {
// keep the context in a global variable
stream = streamObj;
input = audioContext.createMediaStreamSource(stream);
input.connect(processor);
processor.onaudioprocess = e => {
microphoneProcess(e); // receives data from microphone
};
}
function microphoneProcess(e) {
const left = e.inputBuffer.getChannelData(0); // get only one audio channel
const left16 = convertFloat32ToInt16(left); // skip if you don't need this
socket.emit('micBinaryStream', left16); // send to server via web socket
}
// Converts data to BINARY16
function convertFloat32ToInt16(buffer) {
let l = buffer.length;
const buf = new Int16Array(l / 3);
while (l--) {
if (l % 3 === 0) {
buf[l / 3] = buffer[l] * 0xFFFF;
}
}
return buf.buffer;
}
Have your socket.io server listen to micBinaryStream and you should get the data. I needed the data as a BINARY16 format for google api if you do not need this you can skip the function call to convertFloat32ToInt16().
Important
When you need to stop listening you MUST disconnect the the processor and end the stream. Run the function closeAll() below.
function closeAll() {
const tracks = stream ? stream.getTracks() : null;
const track = tracks ? tracks[0] : null;
if (track) {
track.stop();
}
if (processor) {
if (input) {
try {
input.disconnect(processor);
} catch (error) {
console.warn('Attempt to disconnect input failed.');
}
}
processor.disconnect(audioContext.destination);
}
if (audioContext) {
audioContext.close().then(() => {
input = null;
processor = null;
audioContext = null;
});
}
}
it's an old time question, i see. I'm doing the same thing (except my server doesn't run node.js and is written in C#) and stumbled upon this.
Don't know if someone is still interested but i've elaborated a bit. The current alternative to the deprecated createScriptProcessor is the AudioWorklet interface.
From: https://webaudio.github.io/web-audio-api/#audioworklet
1.32.1. Concepts
The AudioWorklet object allows developers to supply scripts (such as JavaScript or >WebAssembly code) to process audio on the rendering thread, supporting custom >AudioNodes. This processing mechanism ensures synchronous execution of the script >code with other built-in AudioNodes in the audio graph.
You cannot implement interfaces in Javascript as far as i know but you can extend a class derived from it.
And the one we need is: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor
So i did write a processor that just mirrors the output with the input values and displays them.
class CustomAudioProcessor extends AudioWorkletProcessor {
process (inputs, outputs, parameters) {
const input = inputs[0];
const output = output[0];
for (let channel = 0; channel < input.length; ++channel) {
for (let i = 0; i < input[channel].length; ++i) {
// Just copying all the data from input to output
output[channel][i] = input[channel][i];
// The next one will make the app crash but yeah, the values are there
// console.log(output[channel][i]);
}
}
}
}
The processor must then be placed into the audio pipeline, after the microphone and before the speakers.
function record() {
constraints = { audio: true };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
audioCtx = new AudioContext();
var source = audioCtx.createMediaStreamSource(stream);
audioCtx.audioWorklet.addModule("custom-audio-processor.js").then(() => {
customAudioProcessor = new AudioWorkletNode(audioCtx, "custom-audio-processor");
source.connect(customAudioProcessor);
customAudioProcessor.connect(audioCtx.destination);
})
audioCtx.destination.play();
Works! Good luck! :)

Access microphone from a browser - Javascript

Is it possible to access the microphone (built-in or auxiliary) from a browser using client-side JavaScript?
Ideally, it would store the recorded audio in the browser. Thanks!
Here we capture microphone audio as a Web Audio API event loop buffer using getUserMedia() ... time domain and frequency domain snippets of each audio event loop buffer are printed (viewable in browser console just hit key F12 or ctrl+shift+i )
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone audio into buffer</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE = 16384;
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_fft_node = null,
analyserNode = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
var max_index = num_row_to_display;
console.log("__________ " + label);
for (; index < max_index && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
}
function process_microphone_buffer(event) { // invoked by event loop
var i, N, inp, microphone_output_buffer;
microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
// microphone_output_buffer <-- this buffer contains current gulp of data size BUFF_SIZE
show_some_data(microphone_output_buffer, 5, "from getChannelData");
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_fft_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_fft_node.connect(gain_node);
analyserNode = audioContext.createAnalyser();
analyserNode.smoothingTimeConstant = 0;
analyserNode.fftSize = 2048;
microphone_stream.connect(analyserNode);
analyserNode.connect(script_processor_fft_node);
script_processor_fft_node.onaudioprocess = function() {
// get the average for the first channel
var array = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(array);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array, 5, "from fft");
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>
Since this code exposes microphone data as a buffer you could add ability to stream using websockets or simply aggregate each event loop buffer into a monster buffer then download the monster to a file
Notice the call to
var audioContext = new AudioContext();
which indicates its using the Web Audio API which is baked into all modern browsers (including mobile browsers) to provide an extremely powerful audio platform of which tapping into the mic is but a tiny fragment ... NOTE the CPU usage jumps up due to this demo writing each event loop buffer into browser console log which is for testing only so actual use is far less resource intensive even when you mod this to stream audio to elsewhere
Links to some Web Audio API documentation
Basic concepts behind Web Audio API
SO wiki on Web Audio API
nice Web Audio API demos ... some with github links
Yes you can.
Using the getUserMedia() API, you can capture raw audio input from your microphone.
In a secure context, to query the devices.
getUserMedia() is a powerful feature which can only be used in secure
contexts; in insecure contexts, navigator.mediaDevices is undefined,
preventing access to getUserMedia(). A secure context is, in short, a
page loaded using HTTPS or the file:/// URL scheme, or a page loaded
from localhost.
async function getMedia(constraints) {
let stream = null;
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
console.log(stream)
} catch(err) {
document.write(err)
}
}
getMedia({ audio: true, video: true })
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
This is a simple way:
//event:
const micButtonClicked = () => {
//check the access:
isMicrophoneAllowed(isAllowed => {
if(isAllowed)
record();
else
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => record())
.catch(err => alert('need permission to use microphone'));
});
}
//isMicrophoneAllowed:
const isMicrophoneAllowed = callback => {
navigator.permissions.query({name: 'microphone'})
.then(permissionStatus => Strings.runCB(callback, permissionStatus.state === 'granted'));
}
//record:
const record = () => {
// start recording...
}

Categories