How do you initialize a WebRTC call using the transceiver API but only enable audio&video later after signaling completed? - javascript

I am trying to first connect two WebRTC peers. Once the connection is established I want to give the users on both sides the option to enable/disable video and audio. This should happen without triggering the signaling process again.
I do run into an issue though: If I call replaceTrack(audioTack) the remote peer will not playback audio until I also call replaceTrack(video).
I am unsure why this happen and can not find any clue in the documentation. It does play fine after 10 seconds once I also attach the video track. Without video track there is no audio playback. Why?
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
p1_video_in.srcObject = p1_stream_in;
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
p2_video_in.srcObject = p2_stream_in;
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}
Same example in the js fiddle (needs camera and microphone access):
https://jsfiddle.net/vnztcx5p/5/
Once audio works this will cause an echo.

that is a known issue. https://bugs.chromium.org/p/chromium/issues/detail?id=813243 and https://bugs.chromium.org/p/chromium/issues/detail?id=403710 have some background information.
In a nutshell the video element expect you to send audio and video data and these need to be synchronized. But you don't send any video data and the element needs to fire a loadedmetadata and resize event because that is what the specification says. Hence it will block audio indefinitely

You can enable/disable audio and video tracks, so you dont have to renegotiate. Note that this tracks have to be added before negotiation starts. You can achieve it with:
mediaStream.getAudioTracks()[0].enabled = false; // or true to enable it.
Or if you want to disable video:
mediaStream.getVideoTracks()[0].enabled = false; // or true to enable it.
Here is the documentation
getAudioTracks()
getVideoTracks()

I got this working. It looks like more a problem with how HTMLVideoElement works rather than WebRTC.
If I set
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
before I add the tracks to the stream it works.
Complete example looks like this:
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}

Related

The code is working after 1 render, i.e ,first recording does appear after 2 succesfull recodings?

const [recording, setRecording] = useState(false);
const [audioURL, setAudioURL] = useState('');
const [chunks, setChunks] = useState([]);
const[mediaRecorder,setMediaRecorder]=useState(null)
const prepareRecording = () => {
navigator.mediaDevices.getUserMedia({ audio: true })
.then((stream) => {
let mediaRecorder = new MediaRecorder(stream, { type: 'audio/webm' });
setMediaRecorder(mediaRecorder)
console.log(mediaRecorder);
})
};
const startRecording=()=>{
mediaRecorder.start()
setRecording(true)
setRecording(true)
// mediaRecorder.ondataavailable = e => { setChunks(prev => [...prev, e.data]) }
mediaRecorder.addEventListener('dataavailable', e =>{
console.log(chunks)
setChunks(prev=>[...prev,e.data])
})
}
const stopRecording = () => {
console.log(chunks)
if (mediaRecorder) {
mediaRecorder.addEventListener('stop', () => {
if (chunks.length > 0) {
const blob = new Blob(chunks, { type: 'audio/webm' });
setAudioURL(URL.createObjectURL(blob));
}
setRecording(false);
});
mediaRecorder.stop();
}
};
I have three seperate buttons: begin(which prepares the mediaRecorder),start recording(which starts the recording),stop recording(which stops the recording).
The code works, but the first recording appears on DOM after 2 recordings,2nd appears after 3 recordings, something like that.
How can I improve the code so that each click on stop recording does show me my recording of that time

Screen share to the connected peer with webRTC

I am exploring webRTC and trying to build a zoom alike web application. I have successfully sharing screen and showing it my device but I could show the shared screen to the peer networks. I want to share my screen with everyone who are connected to the same room. I don't know how to do that. Can anyone help me with the issue?
Below is the JavaScript code:
Script.js
const socket = io('/')
const videoGrid = document.getElementById('video-grid')
const myPeer = new Peer(undefined, {
host: '/',
port: '8000'
})
const myVideo = document.createElement('video')
myVideo.muted = true
const peers = {}
let videoStream
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(stream => {
videoStream = stream
addVideoStream(myVideo, stream)
myPeer.on('call', call => {
call.answer(stream)
const video = document.createElement('video')
call.on('stream', userVideoStream => {
addVideoStream(video, userVideoStream)
})
})
socket.on('user-connected', userId => {
connectToNewUser(userId, stream)
})
socket.on('user-disconnected', userId => {
if (peers[userId]) peers[userId].close()
})
})
myPeer.on('open', id => {
socket.emit('join-room', ROOM_ID, id)
})
function connectToNewUser(userId, stream) {
const call = myPeer.call(userId, stream)
const video = document.createElement('video')
call.on('stream', userVideoStream => {
addVideoStream(video, userVideoStream)
})
call.on('close', () => {
video.remove()
})
peers[userId] = call
}
function addVideoStream(video, stream) {
video.srcObject = stream
video.addEventListener('loadedmetadata', () => {
video.play()
})
videoGrid.append(video)
}
//====================================== Front-end styling logics =================================
const audio = document.getElementById('audio')
const audio_mute = document.getElementById('audio-mute')
const video = document.getElementById('video')
const video_mute = document.getElementById('video-mute')
const screen_share = document.getElementById('screen-share')
const record = document.getElementById('record')
const record_stop = document.getElementById('record-stop')
const leave_btn = document.getElementById('leave-btn')
const message_view_box = document.getElementById('message-view-box')
audio.addEventListener('click', function () {
const track = videoStream.getAudioTracks()[0].enabled
console.log(videoStream.getAudioTracks());
if (track) {
videoStream.getAudioTracks()[0].enabled = false
}
else {
videoStream.getAudioTracks()[0].enabled = true
}
audio.style.display = 'none';
audio_mute.style.display = 'inline-block';
})
audio_mute.addEventListener('click', function () {
const track = videoStream.getAudioTracks()[0].enabled
console.log(videoStream.getAudioTracks());
if (track) {
videoStream.getAudioTracks()[0].enabled = false
}
else {
videoStream.getAudioTracks()[0].enabled = true
}
audio_mute.style.display = 'none';
audio.style.display = 'inline-block';
})
video.addEventListener('click', function () {
const track = videoStream.getVideoTracks()[0].enabled
console.log(videoStream.getVideoTracks()[0].enabled);
if (track) {
videoStream.getVideoTracks()[0].enabled = false
}
else {
videoStream.getVideoTracks()[0].enabled = true
}
video.style.display = 'none';
video_mute.style.display = 'inline-block';
})
video_mute.addEventListener('click', function () {
const track = videoStream.getVideoTracks()[0].enabled
console.log(videoStream.getVideoTracks()[0].enabled);
if (track) {
videoStream.getVideoTracks()[0].enabled = false
}
else {
videoStream.getVideoTracks()[0].enabled = true
}
video_mute.style.display = 'none';
video.style.display = 'inline-block';
})
// ============================= Chat box logics ===============================
let chat_box = document.getElementById('chat_box');
let chat_box_input = document.getElementById('chat_box_input');
let send_icon = document.getElementById('send-icon');
chat_box.addEventListener('submit', function (e) {
e.preventDefault()
// console.log(e.target.chat_box_input.value);
if (chat_box_input.value) {
socket.emit('chat message', chat_box_input.value);
chat_box_input.value = '';
}
// e.target.chat_box_input.value = ''
})
socket.on('chat message', function (msg) {
const item = document.createElement('li');
item.textContent = msg;
message_view_box.appendChild(item);
message_view_box.scrollTop = message_view_box.scrollHeight - message_view_box.clientHeight;
});
// =============================================================================
//================================== Screen share logics =======================
const videoElem = document.getElementById('screen')
screen_share.addEventListener('click',async function () {
startCapture();
})
// Options for getDisplayMedia()
const displayMediaOptions = {
video: {
cursor: "always"
},
audio: true
};
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// const videoStreamTrack = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// call.peerConnection.getSenders()[1].replaceTrack(videoStreamTrack)
dumpOptionsInfo();
} catch (err) {
console.error(`Error: ${err}`);
}
}
function dumpOptionsInfo() {
const videoTrack = videoElem.srcObject.getVideoTracks()[0];
}
//==============================================================================
//==============================================================================
record.addEventListener('click', async function () {
record.style.display = 'none';
record_stop.style.display = 'inline-block';
let stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true
})
//needed for better browser support
const mime = MediaRecorder.isTypeSupported("video/webm; codecs=vp9")
? "video/webm; codecs=vp9"
: "video/webm"
let mediaRecorder = new MediaRecorder(stream, {
mimeType: mime
})
let chunks = []
mediaRecorder.addEventListener('dataavailable', function(e) {
chunks.push(e.data)
})
mediaRecorder.addEventListener('stop', function(){
let blob = new Blob(chunks, {
type: chunks[0].type
})
let url = URL.createObjectURL(blob)
// let video = document.querySelector("video")
// video.src = url
let a = document.createElement('a')
a.href = url
a.download = 'video.webm'
a.click()
})
//we have to start the recorder manually
mediaRecorder.start()
})
record_stop.addEventListener('click', function () {
record_stop.style.display = 'none';
record.style.display = 'inline-block';
mediaRecorder.stop();
})
//==============================================================================
screen sharing functionality below
//================================== Screen share logics =======================
const videoElem = document.getElementById('screen')
screen_share.addEventListener('click',async function () {
startCapture();
})
// Options for getDisplayMedia()
const displayMediaOptions = {
video: {
cursor: "always"
},
audio: true
};
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// const videoStreamTrack = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// call.peerConnection.getSenders()[1].replaceTrack(videoStreamTrack)
dumpOptionsInfo();
} catch (err) {
console.error(`Error: ${err}`);
}
}
function dumpOptionsInfo() {
const videoTrack = videoElem.srcObject.getVideoTracks()[0];
}
//==============================================================================

audio file doesn't work on ios iphone neither safari nor chrome

I'm creating a chat with the ability to send voice notes.
and voice notes work perfectly on desktop and android but on ios things start to crash
once the audio files load, the chrome console on ios shows an error
mediaError {code:4, message:Unsupported source type, MEDIA_ERR_ABORTED:1, MEDIA_ERR_NETWORK:2, MEDIA_ERR_DECODE:3}
and if I click on the play button it gives the error DOMException
This is the function that records audio
const recordAudio = async (_) => {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
const mediaRecorder = new MediaRecorder(stream, {
mimeType:'audio/mp4',
audioBitrate: '128000',
})
mediaRecorder.start()
const audioChunks = []
mediaRecorder.addEventListener('dataavailable', (event) => {
audioChunks.push(event.data)
})
mediaRecorder.addEventListener('stop', () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/mp3' })
composeMessage('audio', audioBlob)
setIsRecording(false)
})
setTimeout(() => {
mediaRecorder.stop()
}, 30000)
}
The function that creates audio file
const createAudioFile = () => {
const audio = new Audio()
audio.setAttribute('preload', 'metadata')
const source = document.createElement('source')
source.setAttribute('src', URL)
source.setAttribute('type', 'audio/mp3')
audio.appendChild(source)
setAudioFile(audio)
}
and this is the function that triggers the audio file
const playAudioHandler = () => {
const playPromise = audioFile.play()
if (playPromise !== undefined) {
playPromise
.then((_) => {
audioFile.play()
setIsPlaying(true)
})
.catch((error) => {
pauseAudioHandler()
})
}
}

MediaRecorder class Not Available in Electron APP

I was following Fireships' Electron tutorial to build a desktop capturer.
One thing I know is, as of now there is a huge difference between the version I used and his.
The only problem I am having is during the instantiation of the MediaRecorder class.
The class is not identified at all.
Is there a way I can fix it?
Render.js - Source Code
// Buttons
const videoElement = document.querySelector('video');
const startBtn = document.getElementById('startBtn');
startBtn.onclick = e => {
mediaRecorder.start();
startBtn.classList.add('is-danger');
startBtn.innerText = 'Recording';
};
const stopBtn = document.getElementById('stopBtn');
stopBtn.onclick = e => {
mediaRecorder.stop();
startBtn.classList.remove('is-danger');
startBtn.innerText = 'Start';
};
const videoSelectBtn = document.getElementById('videoSelectBtn');
videoSelectBtn.onclick = getVideoSources;
const { desktopCapturer, remote } = require('electron');
const { dialog, Menu } = remote;
// Get the available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ['window', 'screen']
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map(source => {
return {
label: source.name,
click: () => selectSource(source)
};
})
);
videoOptionsMenu.popup();
}
let mediaRecorder; //MediaRecorder instance to capture footage
const recordedChunks = [];
// Change the videoSources window to record
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
};
// Create a Stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
//Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
// Create the Media Recorder
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new MediaRecorder(stream, options);
// Register Event Handlers
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onStop = handleStop;
}
// Captures allrecorded chunks
function handleDataAvailable(e) {
console.log('video data available')
recordedChunks.push(e.data);
}
const { writeFile } = require('fs');
//Saves the video file on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks,{
type: 'video/webm; codecs=vp9'
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save Video',
defaultPath: `vid -${Date.now()}.webm`
});
console.log(filePath);
writeFile(filePath, buffer, () => console.log('Video Saved Successfully!'));
}
Web Preferences - Index.js
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
contextIsolation: false,
enableRemoteModule: true,
}
Try this in the render.js file, using electron": "10.2.0
const { desktopCapturer, remote, dialog } = require('electron');
const { writeFile } = require('fs');
const { Menu } = remote;
//Buttons
const videoElement = document.querySelector('video');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const videoSelectBtn = document.getElementById('videoSelectBtn');
videoSelectBtn.onclick = getVideoSources();
//Get all available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ['window', 'screen'],
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map((source) => {
return {
label: source.name,
click: () => selectSource(source),
};
})
);
videoOptionsMenu.popup();
}
let mediaRecorder; //Mediarecorder instance to capture footage
const recordedChunks = [];
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id,
},
},
};
//Create a stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
//Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
//Create the Media Recorder
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new mediaRecorder(stream, options);
//Register Event Handlers
mediaRecorder.ondataavailable = handleAvailableData;
mediaRecorder.onstop = handleStop;
}
async function handleAvailableData(e) {
console.log('Video data available');
recordedChunks.push(e.data);
}
//Save video on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: 'video/webm; codecs=vp9',
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save Video',
defaultPath: `vid-${Date.now()}.webm`
})
console.log(filePath);
writeFile(filePath, buffer, () => console.log('Saved Successfully'))
}

Uncaught (in promise) TypeError: Cannot read property 'buildFromTemplate' of undefined at HTMLButtonElement.getVideoSources

i follow along with the tutorial from firespace which is a electron tutorial and i get this massage saying that was an error even though the tutorial doesn't
Uncaught (in promise) TypeError: Cannot read property 'buildFromTemplate' of undefined
at HTMLButtonElement.getVideoSources
const { writeFile } = require("fs");
const dialog = remote;
const Menu = remote;
// Global state
let mediaRecorder; // MediaRecorder instance to capture footage
const recordedChunks = [];
// Buttons
const videoElement = document.querySelector("video");
const startBtn = document.getElementById("startBtn");
startBtn.onclick = (e) => {
mediaRecorder.start();
startBtn.classList.add("is-danger");
startBtn.innerText = "Recording";
};
const stopBtn = document.getElementById("stopBtn");
stopBtn.onclick = (e) => {
mediaRecorder.stop();
startBtn.classList.remove("is-danger");
startBtn.innerText = "Start";
};
const videoSelectBtn = document.getElementById("videoSelectBtn");
videoSelectBtn.onclick = getVideoSources;
// Get the available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ["window", "screen"],
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map((source) => {
return {
label: source.name,
click: () => selectSource(source),
};
})
);
videoOptionsMenu.popup();
}
// Change the videoSource window to record
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
},
},
};
// Create a Stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
// Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
// Create the Media Recorder
const options = { mimeType: "video/webm; codecs=vp9" };
mediaRecorder = new MediaRecorder(stream, options);
// Register Event Handlers
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
// Updates the UI
}
// Captures all recorded chunks
function handleDataAvailable(e) {
console.log("video data available");
recordedChunks.push(e.data);
}
// Saves the video file on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: "video/webm; codecs=vp9",
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: "Save video",
defaultPath: `vid-${Date.now()}.webm`,
});
if (filePath) {
writeFile(filePath, buffer, () => console.log("video saved successfully!"));
}
}
This code is from the sourcefile from the tutorial github.
can you help or fix it, thank you
const {remote} = require("electron");
const {dialog, Menu} = remote;
not
const dialog = remote;
const Menu = remote;
And I believe you are using the newer version of Electron and you need to create BrowserWindow like this
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true
}
})
As you are not allowed to use remote on renderer as enableRemoteModuel is false by default from v9

Categories