I am working on a project where I need the user to be able to record screen, audio and microphone. At the moment I could only make it recognize screen and audio.
First I am capturing the screen and the audio from it and saving it to a variable. And then I am capturing that variable to show the a video component.
invokeGetDisplayMedia(success, error) {
let displaymediastreamconstraints = {
video: {
displaySurface: 'monitor', // monitor, window, application, browser
logicalSurface: true,
cursor: 'always' // never, always, motion
}
};
// above constraints are NOT supported YET
// that's why overridnig them
displaymediastreamconstraints = {
video: true,
audio:true
};
if (navigator.mediaDevices.getDisplayMedia) {
navigator.mediaDevices.getDisplayMedia(displaymediastreamconstraints).then(success).catch(error);
}
else {
navigator.getDisplayMedia(displaymediastreamconstraints).then(success).catch(error);
}
},
captureScreen(callback) {
this.invokeGetDisplayMedia((screen) => {
this.addStreamStopListener(screen, () => {
//
});
callback(screen);
}, function (error) {
console.error(error);
alert('Unable to capture your screen. Please check console logs.\n' + error);
});
},
startRecording() {
this.captureScreen(screen=>{
this.audioStream = audio
console.log(audio)
this.video=this.$refs.videoScreen
this.video.srcObject = screen;
this.recorder = RecordRTC(screen, {
type: 'video'
});
this.recorder.startRecording();
// release screen on stopRecording
this.recorder.screen = screen;
this.videoStart = true;
});
},
I fixed it by increasing a function where I capture the audio from the microphone
captureAudio(success, error) {
let displayuserstreamconstraints = {
audio:true
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(displayuserstreamconstraints).then(success).catch(error);
}
else {
navigator.getUserMedia(displayuserstreamconstraints).then(success).catch(error);
}
},
And adding a function in the startRecording method
startRecording() {
this.captureAudio((audio) => {
this.captureScreen(screen=>{
this.video=this.$refs.videoScreen
this.audioStream=audio
this.video.srcObject = screen;
this.recorder = RecordRTC(screen, {
type: 'video'
});
this.recorder.startRecording();
// release screen on stopRecording
this.recorder.screen = screen;
this.videoStart = true;
});
})
},
And adding a function in the stopRecording method
stopRecordingCallback() {
this.video.src = this.video.srcObject = null;
this.video=this.$refs.videoScreen
this.video.src = URL.createObjectURL(this.recorder.getBlob());
this.recorder.screen.stop();
this.audioStream.stop();
this.recorder.destroy();
this.recorder = null;
},
Related
i am trying to add a voice message feature to my chat app and i have a problem on playing the audio that i record.
recordAudio.js
import { onUnmounted } from 'vue'
const constraint = { audio: true }
let chunks = []
function record() {
let mediaRecorder
let stream
function close() {
// console.log(mediaRecorder?.state, 'state man')
if (mediaRecorder && mediaRecorder?.state == 'recording'){
mediaRecorder?.stop()
stream && (
stream.getTracks()
.forEach(track => track.stop())
)
}
}
onUnmounted(() => {
close()
})
async function start() {
if (navigator.mediaDevices.getUserMedia) {
const strm = await navigator.mediaDevices.getUserMedia(constraint)
// console.log(strm)
if (!strm) return false
// console.log('media', mediaRecorder)
stream = strm
mediaRecorder = new MediaRecorder(strm)
mediaRecorder.start(100)
// console.log('listingin for audio')
mediaRecorder.ondataavailable = (e) => {
// console.log(e.data)
chunks.push(e.data)
}
}
return true
}
function stop() {
close()
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
console.log(chunks)
chunks = []
// const audioURL = URL.createObjectURL(blob)
return blob
}
return {
start,
stop
}
}
export {
record
}
this is a code fragment in a .vue file
import {record} from '#util/recordAudio.js'
const { start, stop } = record()
async function recordAudio() {
if (!recording.value) {
let res = await start()
res && (recording.value = true)
} else {
stop()
recording.value = false
}
}
function sendAudio() {
let audioBlob = stop()
recording.value = false
console.log(audioBlob)
messages.addMessage({
id: props.chat.id,
message: {
id: 1,
to: 2,
from: 1,
message: audioBlob,
seen: true,
type: 'audio',
createdAt: new Date()
}
})
}
let url = {}
function getObjectUrl(id, message) {
if (!url[id]) {
url[id] = URL.createObjectURL(message)
return url[id]
}
return url[id]
}
//this is the template for the audio
<div v-if='message.type == "audio"'>
<audio controls :src='getObjectUrl(message.id, message.message)'></audio>
</div>
it seems to work for the first time and it doesnt work after.
in firefox i get the warning/error
Media resource blob:http://localhost:5374/861d13c5-533f-4cd7-8608-68eecc7deb4e could not be decoded
Media resource blob:http://localhost:5374/861d13c5-533f-4cd7-8608-68eecc7deb4e could not be decoded, error: Error Code: NS_ERROR_DOM_MEDIA_METADATA_ERR (0x806e0006)
error message
I have a face recognition app. You press the button - the camera turns on - face recognition is in progress - done. It works well in the browser on all devices.
There is also an app for ios and android. It also has the ability to take advantage of this recognition, but only with webview.
And for some reason, in webview, the camera does not work as it should. This happens - you press the button - a modal window appears asking you to give permission to use it or something like that - the camera opens to full screen without prompts & hints that should be, etc.. If I close this window with the livestream, the correct window opens with hints, but with the camera frozen on the last frame.
const startVideo = async () => {
options = new TinyFaceDetectorOptions();
if (
navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia &&
await navigator.mediaDevices.enumerateDevices()
) {
// first we call getUserMedia to trigger permissions
// we need this before deviceCount, otherwise Safari doesn't return all the cameras
// we need to have the number in order to display the switch front/back button
navigator.mediaDevices
.getUserMedia({
audio: false,
video: true
})
.then((stream: MediaStream) => {
stream.getTracks().forEach((track: MediaStreamTrack) => {
track.stop();
});
if (videoElem.current && (videoElem.current.srcObject as MediaStream)) {
videoElem.current.srcObject = null;
}
// init the UI and the camera stream
initCameraStream();
})
.catch(error => {
// https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
if (error === 'PermissionDeniedError') {
setModalStatus([modalsNames.videoAccessError, true]);
}
if (error.name === 'NotAllowedError') {
setModalStatus([modalsNames.videoAccessError, true]);
}
});
} else {
setModalStatus([modalsNames.cameraNotSupportedError, true]);
}};
init camera
const initCameraStream = () => {
// stop any active streams in the window
if (videoElem.current && (videoElem.current.srcObject as MediaStream)) {
(videoElem.current.srcObject as MediaStream)
.getTracks()
.forEach((track: MediaStreamTrack) => {
track.stop();
});
}
if (videoElem.current && (videoElem.current.srcObject as MediaStream)) {
videoElem.current.srcObject = null;
}
// we ask for a square resolution, it will cropped on top (landscape)
// or cropped at the sides (landscape)
const sizeH = 1280;
const sizeW = 1920;
const constraints = {
audio: false,
video: {
// width: { ideal: sizeW },
// height: { ideal: sizeH },
facingMode: currentFacingMode,
// aspectRatio: { exact: 1.777777778 }
}
};
const handleSuccess = (stream: MediaStream) => {
if (videoElem.current) {
videoElem.current.srcObject = stream;
videoElem.current.onloadedmetadata = () => {
if (videoElem.current) {
onPlay();
}
};
}
};
const handleError = () => {
setModalStatus([modalsNames.cameraNotSupportedError, true]);
};
navigator.mediaDevices
.getUserMedia(constraints)
.then(handleSuccess)
.catch(handleError);
};
I'm developing a react native app and there I'm recording a canvas and make 5 seconds video files to upload to the server. Everything works great except all my webm files have only one frame. Here is my code. Please help me to understand what's wrong here. Thanks!
initMediaRecorder = () => {
const promise = new Promise((resolve) => {
const stream = this.selfieCanvas.captureStream(10);
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
console.log(`LOG - Data available ${event.data.size}`);
this.sendToSaveVideo(event.data);
};
resolve(mediaRecorder);
});
return promise;
}
captureVideo = async (oldMediaRecorder) => {
this.initMediaRecorder().then((mediaRecorder) => {
if (oldMediaRecorder !== null && typeof oldMediaRecorder !== 'undefined') {
// I don't want to stop previous recorder until I init the next recorder
oldMediaRecorder.stop();
}
if (mediaRecorder !== null) {
mediaRecorder.start();
}
this.captureVideoTimer = setTimeout(() => {
this.captureVideo(mediaRecorder);
}, 5000);
});
}
sendToSaveVideo = async (eventData) => {
const blobChunk = [];
blobChunk.push(eventData);
const video = new Blob(blobChunk, { type: 'video/webm' });
saveBlobToCloud(video); // save the file to cloud
}```
You are not setting up start(), this (probably) makes ondataavailable run every frame.
Also try avoiding using ondataavailable like that, onstop exists exactly for that purpose.
If this doesn't work try checking if the canvas is actually changing the frames.
initMediaRecorder = () => {
const promise = new Promise((resolve) => {
const stream = this.selfieCanvas.captureStream(10);
let chunks = [];
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
chunks.push(event.data);
};
mediaRecorder.onstop = (event) => {
this.sendToSaveVideo(chunks);
};
resolve(mediaRecorder);
});
return promise;
}
captureVideo = async (oldMediaRecorder) => {
this.initMediaRecorder().then((mediaRecorder) => {
if (oldMediaRecorder !== null && typeof oldMediaRecorder !== 'undefined') {
// I don't want to stop previous recorder until I init the next recorder
oldMediaRecorder.stop();
}
if (mediaRecorder !== null) {
// make ondataavailable run every second.
// ondataavailable should not be used as a stop!
mediaRecorder.start(1000);
}
this.captureVideoTimer = setTimeout(() => {
this.captureVideo(mediaRecorder);
}, 5000);
});
}
sendToSaveVideo = async (chuncks) => {
const video = new Blob(chunks, { type: 'video/webm' });
saveBlobToCloud(video); // save the file to cloud
}
Edit
Also you do not need to declare the mediarecorder every single time...
Something like this would be better:
const stream = selfieCanvas.captureStream(10);
let mediaRecorder = null;
let options;
if (MediaRecorder.isTypeSupported('video/webm; codecs=vp9')) {
options = { mimeType: 'video/webm; codecs=vp9', videoBitsPerSecond: 2500000 };
} else if (MediaRecorder.isTypeSupported('video/webm;codecs=vp8')) {
options = { mimeType: 'video/webm; codecs=vp8', videoBitsPerSecond: 2500000 };
} else {
options = 'video/vp8'; // Chrome 47
}
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e0) {
resolve(null);
}
mediaRecorder.ondataavailable = (event) => {
chunks.push(event.data);
};
mediaRecorder.onstop = (event) => {
const video = new Blob(chunks, { type: 'video/webm' });
saveBlobToCloud(video);
chunks = [];
};
// makes ondataavailable run every 5 seconds
mediaRecorder.start(1000);
// a video is made for every 5 seconds
setInterval(function(){
mediaRecorder.stop();
// ondataavailable should be running more often than stop
mediaRecorder.start(1000);
}, 5000);
Here are some other useful links:
https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/dataavailable_event
MediaRecorder ondataavailable work successfully once
when i use chrome.tabCapture.capture() with MediaRecorder API to record stream original audio of tabs which i am capturing gets muted but the audio comes OK in recorded stream, i want the audio in the tab to run normally ....
class Recorder {
constructor(onChunksAvailable) {
this.chunks = [];
this.active = false;
this.callback = onChunksAvailable;
}
start(stream) {
if (this.active) {
throw new Error("recorder is already running");
}
this.recorder = new MediaRecorder(stream, {
mimeType: "audio/webm",
});
this.recorder.onstop = () => {
stream.getAudioTracks()[0].stop();
this.callback([...this.chunks]);
setTimeout(() => {
this.chunks = [];
});
this.active = false;
};
this.recorder.ondataavailable = (event) => this.chunks.push(event.data);
this.active = true;
this.recorder.start();
}
stop() {
if (!this.active) {
throw new Error("recorder is already stop");
} else {
this.recorder.stop();
}
}
}
let rec = new Recorder(async (chunks) => {
//using chunks then to get the stream
});
chrome.tabCapture.capture(
{
audio: true,
video: false,
},
function (stream) {
rec.start(stream);
}
Forgive me for lack of documentation as I last played with these APIs years ago, but MDN has some stuff.
In my case adding these 3 lines to the start function was fixed.
this.context = new AudioContext();
this.stream = this.context.createMediaStreamSource(stream);
this.stream.connect(this.context.destination);
class Recorder {
constructor(onChunksAvailable) {
this.chunks = [];
this.active = false;
this.callback = onChunksAvailable;
this.context = new AudioContext();
}
start(stream) {
if (this.active) {
throw new Error("recorder is already running");
}
// Reconnect the stream to actual output
this.stream = this.context.createMediaStreamSource(stream);
this.stream.connect(this.context.destination);
this.recorder = new MediaRecorder(stream, {
mimeType: "audio/webm",
});
this.recorder.onstop = () => {
stream.getAudioTracks()[0].stop();
this.callback([...this.chunks]);
setTimeout(() => {
this.chunks = [];
});
this.active = false;
};
this.recorder.ondataavailable = (event) => this.chunks.push(event.data);
this.active = true;
this.recorder.start();
}
stop() {
if (!this.active) {
throw new Error("recorder is already stop");
} else {
this.recorder.stop();
}
}
}
let rec = new Recorder(async (chunks) => {
//using chunks then to get the stream
});
chrome.tabCapture.capture(
{
audio: true,
video: false,
},
function (stream) {
rec.start(stream);
})
Sorry for lack of details, but I believe when you start an audio capture it disconnects the stream from the default output (speakers). By creating a secondary MediaStreamSource and connecting it to the default output (AudioContext.destination) you can allow the stream to continue outputting to speakers while being input to your recorder.
Sources
MDN: AudioContext
MDN: MediaStreamSource
Chrome extension I made 2 years ago
I have added audio to chrome extension on screen cast of desktop. But audio is not in good quality. As, I record screen of the tab. I got good quality video and audio. How can I get good quality audio in desktop recording. Here is what I am using
chrome.desktopCapture.chooseDesktopMedia(['screen', 'window', 'audio'], function (streamId) {
if (streamId) {
var obj = {
audio: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: streamId
}
},
video: {
optional: [],
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: streamId,
maxWidth: 2560,
maxHeight: 1440
}
}
};
countdownRun(function () {
window.navigator.mediaDevices.getUserMedia(obj).then(function(stream) {
$streamVideo = stream;
$timeStart = Date.now();
var audio = stream.getAudioTracks()[0];
var video = stream.getVideoTracks()[0];
alert(JSON.stringify(audio))
alert(JSON.stringify(video))
if (micSound) {
window.navigator.webkitGetUserMedia({audio: true}, function (s) {
$streamAudio = s;
audio = s.getAudioTracks()[0];
captureUseNacl(audio, video);
}, function (e) {
chrome.tabs.create({url: 'mic.html'});
})
} else {
captureUseNacl(audio, video);
(function () {
var v = document.createElement('video');
document.body.appendChild(v);
v.setAttribute('autoplay', '');
v.addEventListener('canplay', function () {
console.log('play video');
}, false);
v.src = window.URL.createObjectURL(stream);
$streamElement = v;
})()
}
}).catch(function(err) {
alert(err)
alert(JSON.stringify(err));
});;
})
}
});
You can do this by removing document.createElement and createObject part. It nothing but adding nose to the recording. If you check on the readystate of audio it is end. It should be live. Again, add maxFrameRate to video section to synchronize audio and video. Here is your code
chrome.desktopCapture.chooseDesktopMedia(['screen', 'window', 'audio'], function (streamId) {
if (streamId) {
var obj = {
audio: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: streamId
}
},
video: {
optional: [],
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: streamId,
maxWidth: 2560,
maxHeight: 1440,
maxFrameRate:30
}
}
};
countdownRun(function () {
navigator.mediaDevices.getUserMedia(obj).then(function(stream) {
$streamVideo = stream;
$timeStart = Date.now();
var audio = stream.getAudioTracks()[0];
var video = stream.getVideoTracks()[0];
console.log(stream);
if (micSound) {
window.navigator.webkitGetUserMedia({audio: true}, function (s) {
$streamAudio = s;
audio = s.getAudioTracks()[0];
captureUseNacl(audio, video);
}, function (e) {
chrome.tabs.create({url: 'mic.html'});
})
} else {
captureUseNacl(audio, video);
}
}).catch(function(err) {
console.log(err)
});
})
}
});