implementing tensorflow with posenet into my expo project so once a user hits a certain pose it would result in the camera to take a picture and then user can press a button to upload it firebase. As soon as I open the camera I'm getting an error saying [Unhandled promise rejection: Error: FileReader.readAsArrayBuffer is not implemented]. Odd thing is that nowhere in my code am I calling FileReader. I am using blob but that is to upload firebase but that is not triggered until a user pushes a button.
I am not even able to get the canvas to draw on the user or for posenet to console.log info such as users nose, ears, and so on. What am doing incorrectly?
Code that I have so far:
import * as tf from '#tensorflow/tfjs';
import * as posenet from "#tensorflow-models/posenet";
import { drawKeypoints, drawSkeleton } from "../utils/TfDrawingUtli";
import Canvas from 'react-native-canvas';
const cameraRef = useRef();
const canvasRef = useRef();
const [TfReady, setTfReady] = useState(true);
// tensor flow pose net
const runPosenet = async () => {
const net = await posenet.load({
architecture: 'MobileNetV1',
outputStride: 16,
inputResolution: { width: 640, height: 480 },
multiplier: 0.75
});
setInterval(() => {
detect(net);
}, 100);
};
const detect = async (net) => {
if (cameraRef.current) {
// Get Video Properties
const video = cameraRef.current;
const videoWidth = cameraRef.current.videoWidth;
const videoHeight = cameraRef.current.videoHeight;
// Set video width
cameraRef.current.width = videoWidth;
cameraRef.current.height = videoHeight;
// Make Detections
const pose = await net.estimateSinglePose(video);
console.log(pose);
drawCanvas(pose, video, videoWidth, videoHeight, canvasRef);
}
};
// drawing a canvas
const drawCanvas = (pose, video, videoWidth, videoHeight, canvas) => {
const ctx = canvas.current.getContext("2d");
canvas.current.width = videoWidth;
canvas.current.height = videoHeight;
drawKeypoints(pose["keypoints"], 0.5, ctx);
drawSkeleton(pose["keypoints"], 0.5, ctx);
};
runPosenet();
useEffect(() => {
(async () => {
// tensorflow
await tf.ready();
setTfReady(true);
console.log(` ======= TF is ready ==== ${TfReady} =======`)
}
)();
}, []);
upload to firebase code
const uploadImageAsync = async (uploadToFirebase) => {
var user = firebase.auth().currentUser.uid;
const blob = await new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.onload = function () {
resolve(xhr.response);
};
xhr.onerror = function (e) {
console.log(e);
reject(new TypeError('Network request failed'));
};
xhr.responseType = 'blob';
xhr.open('GET', uploadToFirebase, true);
xhr.send(null);
});
const ref = firebase
.storage()
.ref(user)
.child('test image')
const snapshot = await ref.put(blob);
blob.close();
return await snapshot.ref.getDownloadURL();
};
code inside return()
<Camera ref={cameraRef} />
<Canvas ref={canvasRef} />
Related
When I run tesseract.js on a URL it works fine. But when I run it on a local file I get these errors. How can I solve this?
I am running tesseract.js v2.1.0 and here is my code:
const { createWorker } = require('tesseract.js');
const worker = createWorker({
logger: m => console.log(m), // Add logger here
});
(async () => {
await worker.load();
await worker.loadLanguage('eng');
await worker.initialize('eng');
const { data: { text } } = await worker.recognize('download.png');
console.log(text);
await worker.terminate();
})();
If you want to load local images in tesseract you have to load them via input tag, Here is the working example.
HTML
<input type="file" id="input_image" accept="image/*">
JavaScript
const input_image = document.getElementById("input_image");
const offscreen_canvas = new OffscreenCanvas(0, 0);
const offscreen_canvas_context = offscreen_canvas.getContext("2d");
input_image.addEventListener("change", () => {
var file = input_image.files[0];
if (file == undefined) return;
var reader = new FileReader();
reader.onload = function (event) {
const reader_image = event.target.result;
const image = new Image();
image.onload = function () {
offscreen_canvas.width = image.width;
offscreen_canvas.height = image.height;
offscreen_canvas_context.drawImage(image, 0, 0);
offscreen_canvas.convertToBlob().then((blob) => {
Tesseract.recognize(blob, "eng", {
logger: (m) => console.log(m)
}).then(({ data: { text } }) => {
console.log(text);
});
});
};
image.src = reader_image;
};
reader.readAsDataURL(file);
});
I am trying to first connect two WebRTC peers. Once the connection is established I want to give the users on both sides the option to enable/disable video and audio. This should happen without triggering the signaling process again.
I do run into an issue though: If I call replaceTrack(audioTack) the remote peer will not playback audio until I also call replaceTrack(video).
I am unsure why this happen and can not find any clue in the documentation. It does play fine after 10 seconds once I also attach the video track. Without video track there is no audio playback. Why?
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
p1_video_in.srcObject = p1_stream_in;
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
p2_video_in.srcObject = p2_stream_in;
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}
Same example in the js fiddle (needs camera and microphone access):
https://jsfiddle.net/vnztcx5p/5/
Once audio works this will cause an echo.
that is a known issue. https://bugs.chromium.org/p/chromium/issues/detail?id=813243 and https://bugs.chromium.org/p/chromium/issues/detail?id=403710 have some background information.
In a nutshell the video element expect you to send audio and video data and these need to be synchronized. But you don't send any video data and the element needs to fire a loadedmetadata and resize event because that is what the specification says. Hence it will block audio indefinitely
You can enable/disable audio and video tracks, so you dont have to renegotiate. Note that this tracks have to be added before negotiation starts. You can achieve it with:
mediaStream.getAudioTracks()[0].enabled = false; // or true to enable it.
Or if you want to disable video:
mediaStream.getVideoTracks()[0].enabled = false; // or true to enable it.
Here is the documentation
getAudioTracks()
getVideoTracks()
I got this working. It looks like more a problem with how HTMLVideoElement works rather than WebRTC.
If I set
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
before I add the tracks to the stream it works.
Complete example looks like this:
function createVideoElement() {
const vid = document.createElement("video")
vid.width = 320;
vid.controls = true;
vid.autoplay = true;
const root = document.body;
document.body.appendChild(vid);
return vid;
}
async function RunTestInit() {
console.log("get media access");
const p1_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
const p2_stream_out = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true
});
console.log("stream setup");
const p1_stream_in = new MediaStream();
const p2_stream_in = new MediaStream();
const p1_video_in = createVideoElement();
const p2_video_in = createVideoElement();
p1_video_in.srcObject = p1_stream_in;
p2_video_in.srcObject = p2_stream_in;
console.log("peer setup");
const p1 = new RTCPeerConnection();
const p2 = new RTCPeerConnection();
const p1_tca = p1.addTransceiver("audio", {
direction: "sendrecv"
});
const p1_tcv = p1.addTransceiver("video", {
direction: "sendrecv"
});
p1.onicecandidate = (ev) => {
p2.addIceCandidate(ev.candidate);
}
p2.onicecandidate = (ev) => {
p1.addIceCandidate(ev.candidate);
}
p1.onconnectionstatechange = (ev) => {
console.log("p1 state: ", p1.connectionState);
}
p2.onconnectionstatechange = async (ev) => {
console.log("p2 state: ", p2.connectionState);
}
p1.onnegotiationneeded = () => {
//triggers once
console.warn("p1.onnegotiationneeded");
}
p2.onnegotiationneeded = () => {
//should never trigger
console.warn("p2.onnegotiationneeded");
}
p1.ontrack = (ev) => {
console.log("p1.ontrack", ev);
p1_stream_in.addTrack(ev.track);
}
p2.ontrack = (ev) => {
console.log("p2.ontrack", ev);
p2_stream_in.addTrack(ev.track);
}
console.log("signaling");
const offer = await p1.createOffer();
await p1.setLocalDescription(offer);
await p2.setRemoteDescription(offer);
const p2_tca = p2.getTransceivers()[0];
const p2_tcv = p2.getTransceivers()[1];
p2_tca.direction = "sendrecv"
p2_tcv.direction = "sendrecv"
const answer = await p2.createAnswer();
await p2.setLocalDescription(answer);
await p1.setRemoteDescription(answer);
console.log("signaling done");
//send audio from p2 to p1 (direction doesn't matter)
//after this runs nothing will happen and no audio plays
setTimeout(async () => {
await p2_tca.sender.replaceTrack(p2_stream_out.getAudioTracks()[0]);
console.warn("audio playback should start now but nothing happens");
}, 1000);
//audio starts playing once this runs
setTimeout(async () => {
//uncomment this and it works just fine
await p2_tcv.sender.replaceTrack(p2_stream_out.getVideoTracks()[0]);
console.warn("now audio playback starts");
}, 10000);
}
function start() {
setTimeout(async () => {
console.log("Init test case");
await RunTestInit();
}, 1);
}
i follow along with the tutorial from firespace which is a electron tutorial and i get this massage saying that was an error even though the tutorial doesn't
Uncaught (in promise) TypeError: Cannot read property 'buildFromTemplate' of undefined
at HTMLButtonElement.getVideoSources
const { writeFile } = require("fs");
const dialog = remote;
const Menu = remote;
// Global state
let mediaRecorder; // MediaRecorder instance to capture footage
const recordedChunks = [];
// Buttons
const videoElement = document.querySelector("video");
const startBtn = document.getElementById("startBtn");
startBtn.onclick = (e) => {
mediaRecorder.start();
startBtn.classList.add("is-danger");
startBtn.innerText = "Recording";
};
const stopBtn = document.getElementById("stopBtn");
stopBtn.onclick = (e) => {
mediaRecorder.stop();
startBtn.classList.remove("is-danger");
startBtn.innerText = "Start";
};
const videoSelectBtn = document.getElementById("videoSelectBtn");
videoSelectBtn.onclick = getVideoSources;
// Get the available video sources
async function getVideoSources() {
const inputSources = await desktopCapturer.getSources({
types: ["window", "screen"],
});
const videoOptionsMenu = Menu.buildFromTemplate(
inputSources.map((source) => {
return {
label: source.name,
click: () => selectSource(source),
};
})
);
videoOptionsMenu.popup();
}
// Change the videoSource window to record
async function selectSource(source) {
videoSelectBtn.innerText = source.name;
const constraints = {
audio: false,
video: {
mandatory: {
chromeMediaSource: "desktop",
chromeMediaSourceId: source.id,
},
},
};
// Create a Stream
const stream = await navigator.mediaDevices.getUserMedia(constraints);
// Preview the source in a video element
videoElement.srcObject = stream;
videoElement.play();
// Create the Media Recorder
const options = { mimeType: "video/webm; codecs=vp9" };
mediaRecorder = new MediaRecorder(stream, options);
// Register Event Handlers
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.onstop = handleStop;
// Updates the UI
}
// Captures all recorded chunks
function handleDataAvailable(e) {
console.log("video data available");
recordedChunks.push(e.data);
}
// Saves the video file on stop
async function handleStop(e) {
const blob = new Blob(recordedChunks, {
type: "video/webm; codecs=vp9",
});
const buffer = Buffer.from(await blob.arrayBuffer());
const { filePath } = await dialog.showSaveDialog({
buttonLabel: "Save video",
defaultPath: `vid-${Date.now()}.webm`,
});
if (filePath) {
writeFile(filePath, buffer, () => console.log("video saved successfully!"));
}
}
This code is from the sourcefile from the tutorial github.
can you help or fix it, thank you
const {remote} = require("electron");
const {dialog, Menu} = remote;
not
const dialog = remote;
const Menu = remote;
And I believe you are using the newer version of Electron and you need to create BrowserWindow like this
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true
}
})
As you are not allowed to use remote on renderer as enableRemoteModuel is false by default from v9
I'm experimenting with Google Cloud Vision API and trying to extract colors from an image.
The problem occurs when I try to send a cropped image to the Vision API and I get an "Bad image data" error message back.
The Base64 I send to Vision API that getting the error is working
when I send it to the client and creating an image or canvas.
The plain URL that is getting sent to the server through a request
(req.headers.imageurl) is working when sent to the Vision API.
The images that is being created from the base64 from the server is fully functional when downloaded and uploaded to the "Test the api" at https://cloud.google.com/vision
Could it have something with some CrossOrigin problem? But then I don't get why I can't draw it to an canvas later on when sent to the client.
Thanks in advance for the help!
The function for requesting and sending the Vision API request out of the cropped image
// Imports the Google Cloud client library
const vision = require("#google-cloud/vision");
const client = new vision.ImageAnnotatorClient();
const fetch = require("../functions/Fetch");
const imageEdit = require("../functions/ImageEdit");
exports.colorDetection = async (req, res) => {
// Then you 'get' your image like so:
await fetch.getImage(req.headers.imageurl, async function (err, data) {
// Handle the error if there was an error getting the image.
if (err) {
throw new Error(err);
}
// Crop Image
const cropSettings = JSON.parse(req.headers.cropsettings);
const croppedImage = await imageEdit.CropImage(data, cropSettings);
const stringify = JSON.stringify(croppedImage);
const request = {
image: {
content: Buffer.from(croppedImage).toString("base64"),
},
};
// // Performs label detection on the image file
const [result] = await client.imageProperties(request);
console.log(result)
const colors = result.imagePropertiesAnnotation;
// const stringify = JSON.stringify(colors);
await res.json(stringify);
});
};
The crop image function
const { createCanvas, Image } = require("canvas");
exports.CropImage = (data, cropSettings) => {
"user strict";
// Settings
let cropLeft = cropSettings.startX,
cropTop = cropSettings.startY,
cropWidth = cropSettings.width,
cropHeight = cropSettings.height;
// Canvas
let resize_canvas = createCanvas(cropWidth / 2, cropHeight / 2);
//Image
let crop_img = new Image();
crop_img.src = `data:image/png;base64,${data.toString("base64")}`;
function Crop() {
const ctx = resize_canvas.getContext("2d");
ctx.drawImage(
crop_img,
cropLeft,
cropTop,
cropWidth * 2,
cropHeight * 2,
0,
0,
cropWidth,
cropHeight
);
}
if (crop_img && crop_img.complete) {
Crop();
try {
const base64Img = resize_canvas.toDataURL("image/png", 1.0);
return base64Img
} catch (e) {
console.log(e);
}
} else {
crop_img.onload = function () {
Crop();
try {
const base64Img = resize_canvas.toDataURL("image/png", 1.0);
return base64Img
} catch (e) {
console.log(e);
}
};
}
};
You can use my code given below. Basically you have to convert the image into base64 and remove 'data:image/png;base64' and pass the value into content key.
const getBase64FromUrl = async (url) => {
const data = await fetch(url);
const blob = await data.blob();
return new Promise((resolve) => {
const reader = new FileReader();
reader.readAsDataURL(blob);
reader.onloadend = () => {
const base64data = reader.result;
resolve(base64data);
};
});
};
const base64Image = getBase64FromUrl('url').split(',')[1]
image: {
content: base64Image.split(",")[1],
},
This should work perfectly!
Struggled with this for a a while. Turns out, removing the data:image/png;base64,
Sorts things out
I have a database listener in my code and I am trying to get every user's new posts and then (when I have all of them in an array) update the posts state.
My code looks like this but it is not working good, because setPosts is async and sometimes it might be called again before ending the state update. I think that I need to wrap the listener in a Promise but I have no idea how to do it detaching the listener when the component unmounts.
useEffect(() => {
const { firebase } = props;
// Realtime database listener
const unsuscribe = firebase
.getDatabase()
.collection("posts")
.doc(firebase.getCurrentUser().uid)
.collection("userPosts")
.onSnapshot((snapshot) => {
let changes = snapshot.docChanges();
changes.forEach(async (change) => {
if (change.type === "added") {
// Get the new post
const newPost = change.doc.data();
// TODO - Move to flatlist On end reached
const uri = await firebase
.getStorage()
.ref(`photos/${newPost.id}`)
.getDownloadURL();
// TODO - Add the new post *(sorted by time)* to the posts list
setPosts([{ ...newPost, uri }, ...posts]);
}
});
});
/* Pd: At the first time, this function will get all the user's posts */
return () => {
// Detach the listening agent
unsuscribe();
};
}, []);
Any ideas?
Also, I have think to do:
useEffect(() => {
const { firebase } = props;
let postsArray = [];
// Realtime database listener
const unsuscribe = firebase
.getDatabase()
.collection("posts")
.doc(firebase.getCurrentUser().uid)
.collection("userPosts")
.orderBy("time") // Sorted by date
.onSnapshot((snapshot) => {
let changes = snapshot.docChanges();
changes.forEach(async (change) => {
if (change.type === "added") {
// Get the new post
const newPost = change.doc.data();
// Add the new post to the posts list
postsArray.push(newPost);
}
});
setPosts(postsArray.reverse());
});
But in this case, the post uri is saved too in the firestore document (something I can do because I write on the firestore with a cloud function that gets the post from storage), and I don't know if it is a good practice.
Thanks.
Update
Cloud Function code:
exports.validateImageDimensions = functions
.region("us-central1")
.runWith({ memory: "2GB", timeoutSeconds: 120 })
.https.onCall(async (data, context) => {
// Libraries
const admin = require("firebase-admin");
const sizeOf = require("image-size");
const url = require("url");
const https = require("https");
const sharp = require("sharp");
const path = require("path");
const os = require("os");
const fs = require("fs");
// Lazy initialization of the Admin SDK
if (!is_validateImageDimensions_initialized) {
const serviceAccount = require("./serviceAccountKey.json");
admin.initializeApp({
// ...
});
is_validateImageDimensions_initialized = true;
}
// Create Storage
const storage = admin.storage();
// Create Firestore
const firestore = admin.firestore();
// Get the image's owner
const owner = context.auth.token.uid;
// Get the image's info
const { id, description, location, tags } = data;
// Photos's bucket
const bucket = storage.bucket("bucket-name");
// File Path
const filePath = `photos/${id}`;
// Get the file
const file = getFile(filePath);
// Check if the file is a jpeg image
const metadata = await file.getMetadata();
const isJpgImage = metadata[0].contentType === "image/jpeg";
// Get the file's url
const fileUrl = await getUrl(file);
// Get the photo dimensions using the `image-size` library
getImageFromUrl(fileUrl)
.then(async (image) => {
// Check if the image has valid dimensions
let dimensions = sizeOf(image);
// Create the associated Firestore's document to the valid images
if (isJpgImage && hasValidDimensions(dimensions)) {
// Create a thumbnail for the uploaded image
const thumbnailPath = await generateThumbnail(filePath);
// Get the thumbnail
const thumbnail = getFile(thumbnailPath);
// Get the thumbnail's url
const thumbnailUrl = await getUrl(thumbnail);
try {
await firestore
.collection("posts")
.doc(owner)
.collection("userPosts")
.add({
id,
uri: fileUrl,
thumbnailUri: thumbnailUrl, // Useful for progress images
description,
location,
tags,
date: admin.firestore.FieldValue.serverTimestamp(),
likes: [], // At the first time, when a post is created, zero users has liked it
comments: [], // Also, there aren't any comments
width: dimensions.width,
height: dimensions.height,
});
// TODO: Analytics posts counter
} catch (err) {
console.error(
`Error creating the document in 'posts/{owner}/userPosts/' where 'id === ${id}': ${err}`
);
}
} else {
// Remove the files that are not jpeg images, or whose dimensions are not valid
try {
await file.delete();
console.log(
`The image '${id}' has been deleted because it has invalid dimensions.
This may be an attempt to break the security of the app made by the user '${owner}'`
);
} catch (err) {
console.error(`Error deleting invalid file '${id}': ${err}`);
}
}
})
.catch((e) => {
console.log(e);
});
/* ---------------- AUXILIAR FUNCTIONS ---------------- */
function getFile(filePath) {
/* Get a file from the storage bucket */
return bucket.file(filePath);
}
async function getUrl(file) {
/* Get the public url of a file */
const signedUrls = await file.getSignedUrl({
action: "read",
expires: "01-01-2100",
});
// signedUrls[0] contains the file's public URL
return signedUrls[0];
}
function getImageFromUrl(uri) {
return new Promise((resolve, reject) => {
const options = url.parse(uri); // Automatically converted to an ordinary options object.
const request = https.request(options, (response) => {
if (response.statusCode < 200 || response.statusCode >= 300) {
return reject(new Error("statusCode=" + response.statusCode));
}
let chunks = [];
response.on("data", (chunk) => {
chunks.push(chunk);
});
response.on("end", () => {
try {
chunks = Buffer.concat(chunks);
} catch (e) {
reject(e);
}
resolve(chunks);
});
});
request.on("error", (e) => {
reject(e.message);
});
// Send the request
request.end();
});
}
function hasValidDimensions(dimensions) {
// Posts' valid dimensions
const validDimensions = [
{
width: 1080,
height: 1080,
},
{
width: 1080,
height: 1350,
},
{
width: 1080,
height: 750,
},
];
return (
validDimensions.find(
({ width, height }) =>
width === dimensions.width && height === dimensions.height
) !== undefined
);
}
async function generateThumbnail(filePath) {
/* Generate thumbnail for the progressive images */
// Download file from bucket
const fileName = filePath.split("/").pop();
const tempFilePath = path.join(os.tmpdir(), fileName);
const thumbnailPath = await bucket
.file(filePath)
.download({
destination: tempFilePath,
})
.then(() => {
// Generate a thumbnail using Sharp
const size = 50;
const newFileName = `${fileName}_${size}_thumb.jpg`;
const newFilePath = `thumbnails/${newFileName}`;
const newFileTemp = path.join(os.tmpdir(), newFileName);
sharp(tempFilePath)
.resize(size, null)
.toFile(newFileTemp, async (_err, info) => {
// Uploading the thumbnail.
await bucket.upload(newFileTemp, {
destination: newFilePath,
});
// Once the thumbnail has been uploaded delete the temporal file to free up disk space.
fs.unlinkSync(tempFilePath);
});
// Return the thumbnail's path
return newFilePath;
});
return thumbnailPath;
}
});