Web MediaRecorder API cannot record audio and video simultaneously - javascript

I've been trying to record video and audio with the MediaRecorder API but it will only let me record my screen without audio. Do I need to have two separate streams and merge them into one? But why would it be possible to set { audio: true, video: true } in the navigator.mediaDevices.getDisplayMedia() method in this case?
This is my code:
async function startRecording() {
let mimeType = "video/webm;codecs=vp9";
try {
const mediaDevices = navigator.mediaDevices as any;
const stream = await mediaDevices.getDisplayMedia({
audio: true,
video: true,
});
const options = {
mimeType: mimeType,
bitsPerSecond: 500000,
};
let recorder = new MediaRecorder(stream, options);
const chunks = [];
recorder.ondataavailable = (e) => {
if (e.data.size > 0) {
chunks.push(e.data);
} else {
console.log("no data to push");
}
};
recorder.onstop = (e) => {
const completeBlob = new Blob(chunks, {
type: chunks[0].type
});
stream.getTracks().forEach((track) => {
track.stop();
console.log(track);
});
setVideoData({
recorded: true,
localVideoURL: URL.createObjectURL(completeBlob),
blob: completeBlob,
});
};
recorder.start();
} catch (error) {
console.log(error);
}
}
Any pointers greatly appreciated.

Most browsers don't support capturing audio with display media. Even in Chrome and Chromium variants, capture support depends on the OS.
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia#Browser_compatibility

Related

recorded Audio file is not working in ios

I am developing a web admin application and backend for IOS & android mobile apps.
I am recording audio from the web using navigation.mediaDevices api.
When I record audio from chrome and firefox and upload to the server then this recorded audio file is playing in android but not playing in ios app.
and when I record audio from safari then audio file played in ios & android
Note - I am statically serving audio file from server
below I have written code to record audio from web using browser navigation.mediaDevices api
import * as toast from "../Toast";
export default function recordAudio() {
return new Promise((resolve) => {
navigator.mediaDevices
.getUserMedia({ audio: true })
.then((stream) => {
const mediaRecorder = new MediaRecorder(stream, {
audioBitsPerSecond: 32000,
// bitsPerSecond: 32000
// mimeType: "audio/webm;codecs=opus",
});
const audioChunks = [];
mediaRecorder.addEventListener("dataavailable", (event) => {
audioChunks.push(event.data);
});
function start() {
mediaRecorder.start();
}
function pause() {
mediaRecorder.pause();
}
function resume() {
mediaRecorder.resume();
}
function stop() {
return new Promise((resolve) => {
mediaRecorder.addEventListener("stop", () => {
const audioName = "audio.mp3";
const blobData = new Blob(audioChunks, {
type: "audio/mpeg",
});
const audioFile = new File([blobData], audioName, {
type: "audio/mpeg",
});
const audioUrl = URL.createObjectURL(audioFile);
const audio = new Audio(audioUrl);
function play() {
audio.play();
}
stream.getTracks()[0].stop();
resolve({ audioFile, audioUrl, play, audioName });
});
mediaRecorder.stop();
});
}
resolve({ start, stop, pause, resume });
})
.catch((error) => {
toast.info(
"To record Voice Messages, application needs access to your computer’s microphone. Click in the URL bar and set “Use the Microphone” to “Allow.” "
);
});
});
}
Please feel free to send in your suggestions and solutions.
Thank you

MediaRecorder only record one frame

I'm developing a react native app and there I'm recording a canvas and make 5 seconds video files to upload to the server. Everything works great except all my webm files have only one frame. Here is my code. Please help me to understand what's wrong here. Thanks!
initMediaRecorder = () => {
const promise = new Promise((resolve) => {
const stream = this.selfieCanvas.captureStream(10);
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
console.log(`LOG - Data available ${event.data.size}`);
this.sendToSaveVideo(event.data);
};
resolve(mediaRecorder);
});
return promise;
}
captureVideo = async (oldMediaRecorder) => {
this.initMediaRecorder().then((mediaRecorder) => {
if (oldMediaRecorder !== null && typeof oldMediaRecorder !== 'undefined') {
// I don't want to stop previous recorder until I init the next recorder
oldMediaRecorder.stop();
}
if (mediaRecorder !== null) {
mediaRecorder.start();
}
this.captureVideoTimer = setTimeout(() => {
this.captureVideo(mediaRecorder);
}, 5000);
});
}
sendToSaveVideo = async (eventData) => {
const blobChunk = [];
blobChunk.push(eventData);
const video = new Blob(blobChunk, { type: 'video/webm' });
saveBlobToCloud(video); // save the file to cloud
}```
You are not setting up start(), this (probably) makes ondataavailable run every frame.
Also try avoiding using ondataavailable like that, onstop exists exactly for that purpose.
If this doesn't work try checking if the canvas is actually changing the frames.
initMediaRecorder = () => {
const promise = new Promise((resolve) => {
const stream = this.selfieCanvas.captureStream(10);
let chunks = [];
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
chunks.push(event.data);
};
mediaRecorder.onstop = (event) => {
this.sendToSaveVideo(chunks);
};
resolve(mediaRecorder);
});
return promise;
}
captureVideo = async (oldMediaRecorder) => {
this.initMediaRecorder().then((mediaRecorder) => {
if (oldMediaRecorder !== null && typeof oldMediaRecorder !== 'undefined') {
// I don't want to stop previous recorder until I init the next recorder
oldMediaRecorder.stop();
}
if (mediaRecorder !== null) {
// make ondataavailable run every second.
// ondataavailable should not be used as a stop!
mediaRecorder.start(1000);
}
this.captureVideoTimer = setTimeout(() => {
this.captureVideo(mediaRecorder);
}, 5000);
});
}
sendToSaveVideo = async (chuncks) => {
const video = new Blob(chunks, { type: 'video/webm' });
saveBlobToCloud(video); // save the file to cloud
}
Edit
Also you do not need to declare the mediarecorder every single time...
Something like this would be better:
const stream = selfieCanvas.captureStream(10);
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
chunks.push(event.data);
};
mediaRecorder.onstop = (event) => {
const video = new Blob(chunks, { type: 'video/webm' });
saveBlobToCloud(video);
chunks = [];
};
// makes ondataavailable run every 5 seconds
mediaRecorder.start(1000);
// a video is made for every 5 seconds
setInterval(function(){
mediaRecorder.stop();
// ondataavailable should be running more often than stop
mediaRecorder.start(1000);
}, 5000);
Here are some other useful links:
https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/dataavailable_event
MediaRecorder ondataavailable work successfully once

Original audio of tab gets muted while using chrome.tabCapture.capture() and MediaRecorder()

when i use chrome.tabCapture.capture() with MediaRecorder API to record stream original audio of tabs which i am capturing gets muted but the audio comes OK in recorded stream, i want the audio in the tab to run normally ....
class Recorder {
constructor(onChunksAvailable) {
this.chunks = [];
this.active = false;
this.callback = onChunksAvailable;
}
start(stream) {
if (this.active) {
throw new Error("recorder is already running");
}
this.recorder = new MediaRecorder(stream, {
mimeType: "audio/webm",
});
this.recorder.onstop = () => {
stream.getAudioTracks()[0].stop();
this.callback([...this.chunks]);
setTimeout(() => {
this.chunks = [];
});
this.active = false;
};
this.recorder.ondataavailable = (event) => this.chunks.push(event.data);
this.active = true;
this.recorder.start();
}
stop() {
if (!this.active) {
throw new Error("recorder is already stop");
} else {
this.recorder.stop();
}
}
}
let rec = new Recorder(async (chunks) => {
//using chunks then to get the stream
});
chrome.tabCapture.capture(
{
audio: true,
video: false,
},
function (stream) {
rec.start(stream);
}
Forgive me for lack of documentation as I last played with these APIs years ago, but MDN has some stuff.
In my case adding these 3 lines to the start function was fixed.
this.context = new AudioContext();
this.stream = this.context.createMediaStreamSource(stream);
this.stream.connect(this.context.destination);
class Recorder {
constructor(onChunksAvailable) {
this.chunks = [];
this.active = false;
this.callback = onChunksAvailable;
this.context = new AudioContext();
}
start(stream) {
if (this.active) {
throw new Error("recorder is already running");
}
// Reconnect the stream to actual output
this.stream = this.context.createMediaStreamSource(stream);
this.stream.connect(this.context.destination);
this.recorder = new MediaRecorder(stream, {
mimeType: "audio/webm",
});
this.recorder.onstop = () => {
stream.getAudioTracks()[0].stop();
this.callback([...this.chunks]);
setTimeout(() => {
this.chunks = [];
});
this.active = false;
};
this.recorder.ondataavailable = (event) => this.chunks.push(event.data);
this.active = true;
this.recorder.start();
}
stop() {
if (!this.active) {
throw new Error("recorder is already stop");
} else {
this.recorder.stop();
}
}
}
let rec = new Recorder(async (chunks) => {
//using chunks then to get the stream
});
chrome.tabCapture.capture(
{
audio: true,
video: false,
},
function (stream) {
rec.start(stream);
})
Sorry for lack of details, but I believe when you start an audio capture it disconnects the stream from the default output (speakers). By creating a secondary MediaStreamSource and connecting it to the default output (AudioContext.destination) you can allow the stream to continue outputting to speakers while being input to your recorder.
Sources
MDN: AudioContext
MDN: MediaStreamSource
Chrome extension I made 2 years ago

Creating AudioBuffers from MediaRecorder Chunks

I'm trying to analyse Audio in realtime as it flows in, using a function that takes an AudioBuffer. I convert the Blob that the recorder gives me with the following function, but it throws a DOMException: Unable to decode audio data.
async function audioBufferFromBlob(blob: Blob, audioCtx: AudioContext): Promise<AudioBuffer> {
return await audioCtx.decodeAudioData(await new Response(blob).arrayBuffer());
}
I believe this is due to the fact, that the recorders ondataavailable blobs aren't full audio streams only chunks. Is there a way convert the blobs to full audio streams?
Full code:
import PitchFinder = require('pitchfinder');
const amdf = PitchFinder.AMDF();
function logPitch(pitch: [number, number], elem: HTMLElement) {
elem.innerText = pitch.toString();
}
function detectPitch(audioBuffer: AudioBuffer): [number, number]{
let frequency = amdf(audioBuffer.getChannelData(0));
return [frequency, noteFromFrequency(frequency)]
}
async function audioBufferFromBlob(blob: Blob, audioCtx: AudioContext): Promise<AudioBuffer> {
return await audioCtx.decodeAudioData(await new Response(blob).arrayBuffer());
}
function noteFromFrequency(frequency: number ): number {
let noteNum = 12 * (Math.log( frequency / 440 )/Math.log(2) );
return Math.round( noteNum ) + 69;
}
export function displayPitch(displayElem: HTMLElement) {
let audioCtx = new AudioContext();
navigator.getUserMedia({audio: true, video: false}, successCallback, errorCallback);
function errorCallback() {
alert("Something went wrong.");
}
function successCallback(stream: MediaStream) {
let recorder = new MediaRecorder(stream, { mimeType: 'audio/webm'});
recorder.ondataavailable = async (event: BlobEvent) => {
let buffer = await audioBufferFromBlob(event.data, audioCtx);
logPitch(detectPitch(buffer), displayElem);
};
recorder.start(500);
}
}

How to capture generated audio from window.speechSynthesis.speak() call?

Previous questions have presented this same or similar inquiry
Can Web Speech API used in conjunction with Web Audio API?
How to access audio result from Speech Synthesis API?
Record HTML5 SpeechSynthesisUtterance generated speech to file
generate audio file with W3C Web Speech API
yet no workarounds appear to be have been created using window.speechSynthesis(). Though there are workarounds using epeak , meSpeak How to create or convert text to audio at chromium browser? or making requests to external servers.
How to capture and record audio output of window.speechSynthesis.speak() call and return result as a Blob, ArrayBuffer, AudioBuffer or other object type?
The Web Speech API Specification does not presently provide a means or hint on how to achieve returning or capturing and recording audio output of window.speechSynthesis.speak() call.
See also
MediaStream, ArrayBuffer, Blob audio result from speak() for recording?
Re: MediaStream, ArrayBuffer, Blob audio result from speak() for recording?
Re: MediaStream, ArrayBuffer, Blob audio result from speak() for recording?. In pertinent part, use cases include, but are not limited to
Persons who have issues speaking; i.e.g., persons whom have suffered a
stroke or other communication inhibiting afflictions. They could convert
text to an audio file and send the file to another individual or group.
This feature would go towards helping them communicate with other persons,
similar to the technologies which assist Stephen Hawking communicate;
Presently, the only person who can hear the audio output is the person
in front of the browser; in essence, not utilizing the full potential of
the text to speech functionality. The audio result can be used as an
attachment within an email; media stream; chat system; or other
communication application. That is, control over the generated audio output;
Another application would be to provide a free, libre, open source audio
dictionary and translation service - client to client and client to server,
server to client.
It is possible to capture the output of audio output of window.speechSynthesis.speak() call utilizing navigator.mediaDevices.getUserMedia() and MediaRecorder(). The expected result is returned at Chromium browser. Implementation at Firefox has issues. Select Monitor of Built-in Audio Analog Stereo at navigator.mediaDevices.getUserMedia() prompt.
The workaround is cumbersome. We should be able to get generated audio, at least as a Blob, without navigator.mediaDevices.getUserMedia() and MediaRecorder().
More interest is evidently necessary by users of browsers, JavaScript and C++ developers, browser implementers and specification authors for further input; to create a proper specification for the feature, and consistent implementation at browsers' source code; see How to implement option to return Blob, ArrayBuffer, or AudioBuffer from window.speechSynthesis.speak() call.
At Chromium a speech dispatcher program should be installed and the instance launched with --enable-speech-dispatcher flag set, as window.speechSynthesis.getVoices() returns an empty array, see How to use Web Speech API at chromium?.
Proof of concept
// SpeechSynthesisRecorder.js guest271314 6-17-2017
// Motivation: Get audio output from `window.speechSynthesis.speak()` call
// as `ArrayBuffer`, `AudioBuffer`, `Blob`, `MediaSource`, `MediaStream`, `ReadableStream`, or other object or data types
// See https://lists.w3.org/Archives/Public/public-speech-api/2017Jun/0000.html
// https://github.com/guest271314/SpeechSynthesisRecorder
// Configuration: Analog Stereo Duplex
// Input Devices: Monitor of Built-in Audio Analog Stereo, Built-in Audio Analog Stereo
class SpeechSynthesisRecorder {
constructor({text = "", utteranceOptions = {}, recorderOptions = {}, dataType = ""}) {
if (text === "") throw new Error("no words to synthesize");
this.dataType = dataType;
this.text = text;
this.mimeType = MediaRecorder.isTypeSupported("audio/webm; codecs=opus")
? "audio/webm; codecs=opus" : "audio/ogg; codecs=opus";
this.utterance = new SpeechSynthesisUtterance(this.text);
this.speechSynthesis = window.speechSynthesis;
this.mediaStream_ = new MediaStream();
this.mediaSource_ = new MediaSource();
this.mediaRecorder = new MediaRecorder(this.mediaStream_, {
mimeType: this.mimeType,
bitsPerSecond: 256 * 8 * 1024
});
this.audioContext = new AudioContext();
this.audioNode = new Audio();
this.chunks = Array();
if (utteranceOptions) {
if (utteranceOptions.voice) {
this.speechSynthesis.onvoiceschanged = e => {
const voice = this.speechSynthesis.getVoices().find(({
name: _name
}) => _name === utteranceOptions.voice);
this.utterance.voice = voice;
console.log(voice, this.utterance);
}
this.speechSynthesis.getVoices();
}
let {
lang, rate, pitch
} = utteranceOptions;
Object.assign(this.utterance, {
lang, rate, pitch
});
}
this.audioNode.controls = "controls";
document.body.appendChild(this.audioNode);
}
start(text = "") {
if (text) this.text = text;
if (this.text === "") throw new Error("no words to synthesize");
return navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => new Promise(resolve => {
const track = stream.getAudioTracks()[0];
this.mediaStream_.addTrack(track);
// return the current `MediaStream`
if (this.dataType && this.dataType === "mediaStream") {
resolve({tts:this, data:this.mediaStream_});
};
this.mediaRecorder.ondataavailable = event => {
if (event.data.size > 0) {
this.chunks.push(event.data);
};
};
this.mediaRecorder.onstop = () => {
track.stop();
this.mediaStream_.getAudioTracks()[0].stop();
this.mediaStream_.removeTrack(track);
console.log(`Completed recording ${this.utterance.text}`, this.chunks);
resolve(this);
}
this.mediaRecorder.start();
this.utterance.onstart = () => {
console.log(`Starting recording SpeechSynthesisUtterance ${this.utterance.text}`);
}
this.utterance.onend = () => {
this.mediaRecorder.stop();
console.log(`Ending recording SpeechSynthesisUtterance ${this.utterance.text}`);
}
this.speechSynthesis.speak(this.utterance);
}));
}
blob() {
if (!this.chunks.length) throw new Error("no data to return");
return Promise.resolve({
tts: this,
data: this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, {
type: this.mimeType
})
});
}
arrayBuffer(blob) {
if (!this.chunks.length) throw new Error("no data to return");
return new Promise(resolve => {
const reader = new FileReader;
reader.onload = e => resolve(({
tts: this,
data: reader.result
}));
reader.readAsArrayBuffer(blob ? new Blob(blob, {
type: blob.type
}) : this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, {
type: this.mimeType
}));
});
}
audioBuffer() {
if (!this.chunks.length) throw new Error("no data to return");
return this.arrayBuffer()
.then(ab => this.audioContext.decodeAudioData(ab))
.then(buffer => ({
tts: this,
data: buffer
}))
}
mediaSource() {
if (!this.chunks.length) throw new Error("no data to return");
return this.arrayBuffer()
.then(({
data: ab
}) => new Promise((resolve, reject) => {
this.mediaSource_.onsourceended = () => resolve({
tts: this,
data: this.mediaSource_
});
this.mediaSource_.onsourceopen = () => {
if (MediaSource.isTypeSupported(this.mimeType)) {
const sourceBuffer = this.mediaSource_.addSourceBuffer(this.mimeType);
sourceBuffer.mode = "sequence"
sourceBuffer.onupdateend = () =>
this.mediaSource_.endOfStream();
sourceBuffer.appendBuffer(ab);
} else {
reject(`${this.mimeType} is not supported`)
}
}
this.audioNode.src = URL.createObjectURL(this.mediaSource_);
}));
}
readableStream({size = 1024, controllerOptions = {}, rsOptions = {}}) {
if (!this.chunks.length) throw new Error("no data to return");
const src = this.chunks.slice(0);
const chunk = size;
return Promise.resolve({
tts: this,
data: new ReadableStream(controllerOptions || {
start(controller) {
console.log(src.length);
controller.enqueue(src.splice(0, chunk))
},
pull(controller) {
if (src.length = 0) controller.close();
controller.enqueue(src.splice(0, chunk));
}
}, rsOptions)
});
}
}
Usage
let ttsRecorder = new SpeechSynthesisRecorder({
text: "The revolution will not be televised",
utternanceOptions: {
voice: "english-us espeak",
lang: "en-US",
pitch: .75,
rate: 1
}
});
// ArrayBuffer
ttsRecorder.start()
// `tts` : `SpeechSynthesisRecorder` instance, `data` : audio as `dataType` or method call result
.then(tts => tts.arrayBuffer())
.then(({tts, data}) => {
// do stuff with `ArrayBuffer`, `AudioBuffer`, `Blob`,
// `MediaSource`, `MediaStream`, `ReadableStream`
// `data` : `ArrayBuffer`
tts.audioNode.src = URL.createObjectURL(new Blob([data], {type:tts.mimeType}));
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// AudioBuffer
ttsRecorder.start()
.then(tts => tts.audioBuffer())
.then(({tts, data}) => {
// `data` : `AudioBuffer`
let source = tts.audioContext.createBufferSource();
source.buffer = data;
source.connect(tts.audioContext.destination);
source.start()
})
// Blob
ttsRecorder.start()
.then(tts => tts.blob())
.then(({tts, data}) => {
// `data` : `Blob`
tts.audioNode.src = URL.createObjectURL(blob);
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// ReadableStream
ttsRecorder.start()
.then(tts => tts.readableStream())
.then(({tts, data}) => {
// `data` : `ReadableStream`
console.log(tts, data);
data.getReader().read().then(({value, done}) => {
tts.audioNode.src = URL.createObjectURL(value[0]);
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
})
// MediaSource
ttsRecorder.start()
.then(tts => tts.mediaSource())
.then(({tts, data}) => {
console.log(tts, data);
// `data` : `MediaSource`
tts.audioNode.srcObj = data;
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// MediaStream
let ttsRecorder = new SpeechSynthesisRecorder({
text: "The revolution will not be televised",
utternanceOptions: {
voice: "english-us espeak",
lang: "en-US",
pitch: .75,
rate: 1
},
dataType:"mediaStream"
});
ttsRecorder.start()
.then(({tts, data}) => {
// `data` : `MediaStream`
// do stuff with active `MediaStream`
})
.catch(err => console.log(err))
plnkr
This is an updated code from previous answer which works in Chrome 96:
make sure to select "Share system audio" checkbox in "Choose what to share" window
won't run via SO code snippet (save to demo.html)
<script>
(async () => {
const text = "The revolution will not be televised";
const blob = await new Promise(async resolve => {
console.log("picking system audio");
const stream = await navigator.mediaDevices.getDisplayMedia({video:true, audio:true});
const track = stream.getAudioTracks()[0];
if(!track)
throw "System audio not available";
stream.getVideoTracks().forEach(track => track.stop());
const mediaStream = new MediaStream();
mediaStream.addTrack(track);
const chunks = [];
const mediaRecorder = new MediaRecorder(mediaStream, {bitsPerSecond:128000});
mediaRecorder.ondataavailable = event => {
if (event.data.size > 0)
chunks.push(event.data);
}
mediaRecorder.onstop = () => {
stream.getTracks().forEach(track => track.stop());
mediaStream.removeTrack(track);
resolve(new Blob(chunks));
}
mediaRecorder.start();
const utterance = new SpeechSynthesisUtterance(text);
utterance.onend = () => mediaRecorder.stop();
window.speechSynthesis.speak(utterance);
console.log("speaking...");
});
console.log("audio available", blob);
const player = new Audio();
player.src = URL.createObjectURL(blob);
player.autoplay = true;
player.controls = true;
})()
</script>

Categories