Uncaught (in promise) TypeError: Cannot read property 'buildFromTemplate' of undefined at HTMLButtonElement.getVideoSources - javascript

i follow along with the tutorial from firespace which is a electron tutorial and i get this massage saying that was an error even though the tutorial doesn't
Uncaught (in promise) TypeError: Cannot read property 'buildFromTemplate' of undefined
at HTMLButtonElement.getVideoSources
const { writeFile } = require("fs");
const dialog = remote;
const Menu = remote;
// Global state
let mediaRecorder; // MediaRecorder instance to capture footage
const recordedChunks = [];
// Buttons
const videoElement = document.querySelector("video");
const startBtn = document.getElementById("startBtn");
startBtn.onclick = (e) => {
mediaRecorder.start();
startBtn.classList.add("is-danger");
startBtn.innerText = "Recording";
};
const stopBtn = document.getElementById("stopBtn");
stopBtn.onclick = (e) => {
mediaRecorder.stop();
startBtn.classList.remove("is-danger");
startBtn.innerText = "Start";
};
const videoSelectBtn = document.getElementById("videoSelectBtn");
videoSelectBtn.onclick = getVideoSources;
// Get the available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ["window", "screen"],
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map((source) => {
return {
label: source.name,
click: () => selectSource(source),
};
})
);
videoOptionsMenu.popup();
}
// Change the videoSource window to record
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
},
},
};
// Create a Stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
// Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
// Create the Media Recorder
const options = { mimeType: "video/webm; codecs=vp9" };
mediaRecorder = new MediaRecorder(stream, options);
// Register Event Handlers
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
// Updates the UI
}
// Captures all recorded chunks
function handleDataAvailable(e) {
console.log("video data available");
recordedChunks.push(e.data);
}
// Saves the video file on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: "video/webm; codecs=vp9",
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: "Save video",
defaultPath: `vid-${Date.now()}.webm`,
});
if (filePath) {
writeFile(filePath, buffer, () => console.log("video saved successfully!"));
}
}
This code is from the sourcefile from the tutorial github.
can you help or fix it, thank you

const {remote} = require("electron");
const {dialog, Menu} = remote;
not
const dialog = remote;
const Menu = remote;
And I believe you are using the newer version of Electron and you need to create BrowserWindow like this
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true
}
})
As you are not allowed to use remote on renderer as enableRemoteModuel is false by default from v9

Related

Screen share to the connected peer with webRTC

I am exploring webRTC and trying to build a zoom alike web application. I have successfully sharing screen and showing it my device but I could show the shared screen to the peer networks. I want to share my screen with everyone who are connected to the same room. I don't know how to do that. Can anyone help me with the issue?
Below is the JavaScript code:
Script.js
const socket = io('/')
const videoGrid = document.getElementById('video-grid')
const myPeer = new Peer(undefined, {
host: '/',
port: '8000'
})
const myVideo = document.createElement('video')
myVideo.muted = true
const peers = {}
let videoStream
navigator.mediaDevices.getUserMedia({
video: true,
audio: true
}).then(stream => {
videoStream = stream
addVideoStream(myVideo, stream)
myPeer.on('call', call => {
call.answer(stream)
const video = document.createElement('video')
call.on('stream', userVideoStream => {
addVideoStream(video, userVideoStream)
})
})
socket.on('user-connected', userId => {
connectToNewUser(userId, stream)
})
socket.on('user-disconnected', userId => {
if (peers[userId]) peers[userId].close()
})
})
myPeer.on('open', id => {
socket.emit('join-room', ROOM_ID, id)
})
function connectToNewUser(userId, stream) {
const call = myPeer.call(userId, stream)
const video = document.createElement('video')
call.on('stream', userVideoStream => {
addVideoStream(video, userVideoStream)
})
call.on('close', () => {
video.remove()
})
peers[userId] = call
}
function addVideoStream(video, stream) {
video.srcObject = stream
video.addEventListener('loadedmetadata', () => {
video.play()
})
videoGrid.append(video)
}
//====================================== Front-end styling logics =================================
const audio = document.getElementById('audio')
const audio_mute = document.getElementById('audio-mute')
const video = document.getElementById('video')
const video_mute = document.getElementById('video-mute')
const screen_share = document.getElementById('screen-share')
const record = document.getElementById('record')
const record_stop = document.getElementById('record-stop')
const leave_btn = document.getElementById('leave-btn')
const message_view_box = document.getElementById('message-view-box')
audio.addEventListener('click', function () {
const track = videoStream.getAudioTracks()[0].enabled
console.log(videoStream.getAudioTracks());
if (track) {
videoStream.getAudioTracks()[0].enabled = false
}
else {
videoStream.getAudioTracks()[0].enabled = true
}
audio.style.display = 'none';
audio_mute.style.display = 'inline-block';
})
audio_mute.addEventListener('click', function () {
const track = videoStream.getAudioTracks()[0].enabled
console.log(videoStream.getAudioTracks());
if (track) {
videoStream.getAudioTracks()[0].enabled = false
}
else {
videoStream.getAudioTracks()[0].enabled = true
}
audio_mute.style.display = 'none';
audio.style.display = 'inline-block';
})
video.addEventListener('click', function () {
const track = videoStream.getVideoTracks()[0].enabled
console.log(videoStream.getVideoTracks()[0].enabled);
if (track) {
videoStream.getVideoTracks()[0].enabled = false
}
else {
videoStream.getVideoTracks()[0].enabled = true
}
video.style.display = 'none';
video_mute.style.display = 'inline-block';
})
video_mute.addEventListener('click', function () {
const track = videoStream.getVideoTracks()[0].enabled
console.log(videoStream.getVideoTracks()[0].enabled);
if (track) {
videoStream.getVideoTracks()[0].enabled = false
}
else {
videoStream.getVideoTracks()[0].enabled = true
}
video_mute.style.display = 'none';
video.style.display = 'inline-block';
})
// ============================= Chat box logics ===============================
let chat_box = document.getElementById('chat_box');
let chat_box_input = document.getElementById('chat_box_input');
let send_icon = document.getElementById('send-icon');
chat_box.addEventListener('submit', function (e) {
e.preventDefault()
// console.log(e.target.chat_box_input.value);
if (chat_box_input.value) {
socket.emit('chat message', chat_box_input.value);
chat_box_input.value = '';
}
// e.target.chat_box_input.value = ''
})
socket.on('chat message', function (msg) {
const item = document.createElement('li');
item.textContent = msg;
message_view_box.appendChild(item);
message_view_box.scrollTop = message_view_box.scrollHeight - message_view_box.clientHeight;
});
// =============================================================================
//================================== Screen share logics =======================
const videoElem = document.getElementById('screen')
screen_share.addEventListener('click',async function () {
startCapture();
})
// Options for getDisplayMedia()
const displayMediaOptions = {
video: {
cursor: "always"
},
audio: true
};
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// const videoStreamTrack = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// call.peerConnection.getSenders()[1].replaceTrack(videoStreamTrack)
dumpOptionsInfo();
} catch (err) {
console.error(`Error: ${err}`);
}
}
function dumpOptionsInfo() {
const videoTrack = videoElem.srcObject.getVideoTracks()[0];
}
//==============================================================================
//==============================================================================
record.addEventListener('click', async function () {
record.style.display = 'none';
record_stop.style.display = 'inline-block';
let stream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true
})
//needed for better browser support
const mime = MediaRecorder.isTypeSupported("video/webm; codecs=vp9")
? "video/webm; codecs=vp9"
: "video/webm"
let mediaRecorder = new MediaRecorder(stream, {
mimeType: mime
})
let chunks = []
mediaRecorder.addEventListener('dataavailable', function(e) {
chunks.push(e.data)
})
mediaRecorder.addEventListener('stop', function(){
let blob = new Blob(chunks, {
type: chunks[0].type
})
let url = URL.createObjectURL(blob)
// let video = document.querySelector("video")
// video.src = url
let a = document.createElement('a')
a.href = url
a.download = 'video.webm'
a.click()
})
//we have to start the recorder manually
mediaRecorder.start()
})
record_stop.addEventListener('click', function () {
record_stop.style.display = 'none';
record.style.display = 'inline-block';
mediaRecorder.stop();
})
//==============================================================================
screen sharing functionality below
//================================== Screen share logics =======================
const videoElem = document.getElementById('screen')
screen_share.addEventListener('click',async function () {
startCapture();
})
// Options for getDisplayMedia()
const displayMediaOptions = {
video: {
cursor: "always"
},
audio: true
};
async function startCapture() {
try {
videoElem.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// const videoStreamTrack = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// call.peerConnection.getSenders()[1].replaceTrack(videoStreamTrack)
dumpOptionsInfo();
} catch (err) {
console.error(`Error: ${err}`);
}
}
function dumpOptionsInfo() {
const videoTrack = videoElem.srcObject.getVideoTracks()[0];
}
//==============================================================================

i am having a problem with playing an audio blob i get from a microphone. it works for the first time but doesnt seem to work after that

i am trying to add a voice message feature to my chat app and i have a problem on playing the audio that i record.
recordAudio.js
import { onUnmounted } from 'vue'
const constraint = { audio: true }
let chunks = []
function record() {
let mediaRecorder
let stream
function close() {
// console.log(mediaRecorder?.state, 'state man')
if (mediaRecorder && mediaRecorder?.state == 'recording'){
mediaRecorder?.stop()
stream && (
stream.getTracks()
.forEach(track => track.stop())
)
}
}
onUnmounted(() => {
close()
})
async function start() {
if (navigator.mediaDevices.getUserMedia) {
const strm = await navigator.mediaDevices.getUserMedia(constraint)
// console.log(strm)
if (!strm) return false
// console.log('media', mediaRecorder)
stream = strm
mediaRecorder = new MediaRecorder(strm)
mediaRecorder.start(100)
// console.log('listingin for audio')
mediaRecorder.ondataavailable = (e) => {
// console.log(e.data)
chunks.push(e.data)
}
}
return true
}
function stop() {
close()
const blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
console.log(chunks)
chunks = []
// const audioURL = URL.createObjectURL(blob)
return blob
}
return {
start,
stop
}
}
export {
record
}
this is a code fragment in a .vue file
import {record} from '#util/recordAudio.js'
const { start, stop } = record()
async function recordAudio() {
if (!recording.value) {
let res = await start()
res && (recording.value = true)
} else {
stop()
recording.value = false
}
}
function sendAudio() {
let audioBlob = stop()
recording.value = false
console.log(audioBlob)
messages.addMessage({
id: props.chat.id,
message: {
id: 1,
to: 2,
from: 1,
message: audioBlob,
seen: true,
type: 'audio',
createdAt: new Date()
}
})
}
let url = {}
function getObjectUrl(id, message) {
if (!url[id]) {
url[id] = URL.createObjectURL(message)
return url[id]
}
return url[id]
}
//this is the template for the audio
<div v-if='message.type == "audio"'>
<audio controls :src='getObjectUrl(message.id, message.message)'></audio>
</div>
it seems to work for the first time and it doesnt work after.
in firefox i get the warning/error
Media resource blob:http://localhost:5374/861d13c5-533f-4cd7-8608-68eecc7deb4e could not be decoded
Media resource blob:http://localhost:5374/861d13c5-533f-4cd7-8608-68eecc7deb4e could not be decoded, error: Error Code: NS_ERROR_DOM_MEDIA_METADATA_ERR (0x806e0006)
error message

How do you initialize a WebRTC call using the transceiver API but only enable audio&video later after signaling completed?

I am trying to first connect two WebRTC peers. Once the connection is established I want to give the users on both sides the option to enable/disable video and audio. This should happen without triggering the signaling process again.
I do run into an issue though: If I call replaceTrack(audioTack) the remote peer will not playback audio until I also call replaceTrack(video).
I am unsure why this happen and can not find any clue in the documentation. It does play fine after 10 seconds once I also attach the video track. Without video track there is no audio playback. Why?
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
p1_video_in.srcObject = p1_stream_in;
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
p2_video_in.srcObject = p2_stream_in;
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}
Same example in the js fiddle (needs camera and microphone access):
https://jsfiddle.net/vnztcx5p/5/
Once audio works this will cause an echo.
that is a known issue. https://bugs.chromium.org/p/chromium/issues/detail?id=813243 and https://bugs.chromium.org/p/chromium/issues/detail?id=403710 have some background information.
In a nutshell the video element expect you to send audio and video data and these need to be synchronized. But you don't send any video data and the element needs to fire a loadedmetadata and resize event because that is what the specification says. Hence it will block audio indefinitely
You can enable/disable audio and video tracks, so you dont have to renegotiate. Note that this tracks have to be added before negotiation starts. You can achieve it with:
mediaStream.getAudioTracks()[0].enabled = false; // or true to enable it.
Or if you want to disable video:
mediaStream.getVideoTracks()[0].enabled = false; // or true to enable it.
Here is the documentation
getAudioTracks()
getVideoTracks()
I got this working. It looks like more a problem with how HTMLVideoElement works rather than WebRTC.
If I set
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
before I add the tracks to the stream it works.
Complete example looks like this:
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}

MediaRecorder class Not Available in Electron APP

I was following Fireships' Electron tutorial to build a desktop capturer.
One thing I know is, as of now there is a huge difference between the version I used and his.
The only problem I am having is during the instantiation of the MediaRecorder class.
The class is not identified at all.
Is there a way I can fix it?
Render.js - Source Code
// Buttons
const videoElement = document.querySelector('video');
const startBtn = document.getElementById('startBtn');
startBtn.onclick = e => {
mediaRecorder.start();
startBtn.classList.add('is-danger');
startBtn.innerText = 'Recording';
};
const stopBtn = document.getElementById('stopBtn');
stopBtn.onclick = e => {
mediaRecorder.stop();
startBtn.classList.remove('is-danger');
startBtn.innerText = 'Start';
};
const videoSelectBtn = document.getElementById('videoSelectBtn');
videoSelectBtn.onclick = getVideoSources;
const { desktopCapturer, remote } = require('electron');
const { dialog, Menu } = remote;
// Get the available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ['window', 'screen']
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map(source => {
return {
label: source.name,
click: () => selectSource(source)
};
})
);
videoOptionsMenu.popup();
}
let mediaRecorder; //MediaRecorder instance to capture footage
const recordedChunks = [];
// Change the videoSources window to record
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
};
// Create a Stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
//Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
// Create the Media Recorder
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new MediaRecorder(stream, options);
// Register Event Handlers
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onStop = handleStop;
}
// Captures allrecorded chunks
function handleDataAvailable(e) {
console.log('video data available')
recordedChunks.push(e.data);
}
const { writeFile } = require('fs');
//Saves the video file on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks,{
type: 'video/webm; codecs=vp9'
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save Video',
defaultPath: `vid -${Date.now()}.webm`
});
console.log(filePath);
writeFile(filePath, buffer, () => console.log('Video Saved Successfully!'));
}
Web Preferences - Index.js
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
contextIsolation: false,
enableRemoteModule: true,
}
Try this in the render.js file, using electron": "10.2.0
const { desktopCapturer, remote, dialog } = require('electron');
const { writeFile } = require('fs');
const { Menu } = remote;
//Buttons
const videoElement = document.querySelector('video');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const videoSelectBtn = document.getElementById('videoSelectBtn');
videoSelectBtn.onclick = getVideoSources();
//Get all available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ['window', 'screen'],
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map((source) => {
return {
label: source.name,
click: () => selectSource(source),
};
})
);
videoOptionsMenu.popup();
}
let mediaRecorder; //Mediarecorder instance to capture footage
const recordedChunks = [];
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id,
},
},
};
//Create a stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
//Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
//Create the Media Recorder
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new mediaRecorder(stream, options);
//Register Event Handlers
mediaRecorder.ondataavailable = handleAvailableData;
mediaRecorder.onstop = handleStop;
}
async function handleAvailableData(e) {
console.log('Video data available');
recordedChunks.push(e.data);
}
//Save video on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: 'video/webm; codecs=vp9',
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: 'Save Video',
defaultPath: `vid-${Date.now()}.webm`
})
console.log(filePath);
writeFile(filePath, buffer, () => console.log('Saved Successfully'))
}

poseNet on react native causing FileReader error

implementing tensorflow with posenet into my expo project so once a user hits a certain pose it would result in the camera to take a picture and then user can press a button to upload it firebase. As soon as I open the camera I'm getting an error saying [Unhandled promise rejection: Error: FileReader.readAsArrayBuffer is not implemented]. Odd thing is that nowhere in my code am I calling FileReader. I am using blob but that is to upload firebase but that is not triggered until a user pushes a button.
I am not even able to get the canvas to draw on the user or for posenet to console.log info such as users nose, ears, and so on. What am doing incorrectly?
Code that I have so far:
import * as tf from '#tensorflow/tfjs';
import * as posenet from "#tensorflow-models/posenet";
import { drawKeypoints, drawSkeleton } from "../utils/TfDrawingUtli";
import Canvas from 'react-native-canvas';
const cameraRef = useRef();
const canvasRef = useRef();
const [TfReady, setTfReady] = useState(true);
// tensor flow pose net
const runPosenet = async () => {
const net = await posenet.load({
architecture: 'MobileNetV1',
outputStride: 16,
inputResolution: { width: 640, height: 480 },
multiplier: 0.75
});
setInterval(() => {
detect(net);
}, 100);
};
const detect = async (net) => {
if (cameraRef.current) {
// Get Video Properties
const video = cameraRef.current;
const videoWidth = cameraRef.current.videoWidth;
const videoHeight = cameraRef.current.videoHeight;
// Set video width
cameraRef.current.width = videoWidth;
cameraRef.current.height = videoHeight;
// Make Detections
const pose = await net.estimateSinglePose(video);
console.log(pose);
drawCanvas(pose, video, videoWidth, videoHeight, canvasRef);
}
};
// drawing a canvas
const drawCanvas = (pose, video, videoWidth, videoHeight, canvas) => {
const ctx = canvas.current.getContext("2d");
canvas.current.width = videoWidth;
canvas.current.height = videoHeight;
drawKeypoints(pose["keypoints"], 0.5, ctx);
drawSkeleton(pose["keypoints"], 0.5, ctx);
};
runPosenet();
useEffect(() => {
(async () => {
// tensorflow
await tf.ready();
setTfReady(true);
console.log(` ======= TF is ready ==== ${TfReady} =======`)
}
)();
}, []);
upload to firebase code
const uploadImageAsync = async (uploadToFirebase) => {
var user = firebase.auth().currentUser.uid;
const blob = await new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.onload = function () {
resolve(xhr.response);
};
xhr.onerror = function (e) {
console.log(e);
reject(new TypeError('Network request failed'));
};
xhr.responseType = 'blob';
xhr.open('GET', uploadToFirebase, true);
xhr.send(null);
});
const ref = firebase
.storage()
.ref(user)
.child('test image')
const snapshot = await ref.put(blob);
blob.close();
return await snapshot.ref.getDownloadURL();
};
code inside return()
<Camera ref={cameraRef} />
<Canvas ref={canvasRef} />

Categories