TypeError: Expected the input argument to be of type Uint8Array or Buffer or ArrayBuffer, got object - javascript

So I have made a Meme Gallery app.
Here is the link:https://meme-gallery-web-app.netlify.app/
You can upload an image via link. First of all I was trying to check if the submitted URL had the jpg/png extension, so I can work with it(whether to upload it or not).
That's why I tried to implement Image-type package. But it gives me the mentioned error.
Here is the code where I implemented.
const https = require("https");
const imageType = require("image-type");
const imageModel = require("../models/models.js");
var date = new Date();
var currentDate = date.toLocaleDateString();
const submitLink = async (req, res) => {
const { url } = req.body;
https.get(url, (response) => {
response.on("readable", () => {
const chunk = response.read(imageType.minimumBytes);
response.destroy();
console.log(imageType(chunk));
});
});
};
After submitting the link I got the following error:
TypeError: Expected the input argument to be of type Uint8Array or Buffer or ArrayBuffer, got object
So I checked and found out that the variable chunk is an object rather than an Uint8Array or Buffer. Why is that? And how to workaround it?

I think you should read the chunks, concat them and pass the resulting buffer to the imageType library:
https.get(url, response => {
const chunks = [];
response.on("data", (chunk) => {
chunks.push(chunk);
})
response.on("end", () => {
const resultBuffer = Buffer.concat(chunks);
console.log("image type is", imageType(resultBuffer ));
})
});

Related

Problem with read file pdf from URL nodeJS

I'm using pdf.js-extract to read data pdf file from URL. This is my code and it runs well:
const fs = require('fs');
const PDFExtract = require('pdf.js-extract').PDFExtract;
const pdfExtract = new PDFExtract();
const https = require('https');
const url = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf'
const readData = async (url) => {
https.get(url, async function (response) {
const file = fs.createWriteStream('./dummy.pdf');
response.pipe(file);
file.on("finish", () => {
pdfExtract.extract('./dummy.pdf', {})
.then(async function(data) {
console.log(data)
});
file.close();
fs.unlinkSync('./dummy.pdf')
});
});
}
readData(url)
But I have 2 problem about this code.
The first is that how can I deal with if the url changes from https
to http, I try node-fetch but it's seem not work as I expected.
The second one is that the code take too long to handle if the pdf
file is large, about 5 seconds for 2 Mbs. I wonder if there is a
faster way like reading the file from the buffer or something like
that without having to save it as a temporary file
Thanks for your attention

Microsoft Speech-to-Text SDK JS won't accept a file with a long array of bytes

I'm using Microsoft's Azure speech-to-text SDK to get text from a .wav file, using JavaScript. The problem is, the recognizer won't accept the File object and returns the error "Uncaught rangeerror: source array is too long". Calling .slice(0, 2248) on the blob that is used to make the File object works correctly, returning the correct first word of the .wav file. But if I try to slice the blob into chunks like (2249, 4497) returns the error "Uncaught rangeerror: offset is outside the bounds of the DataView". I'm at a loss for how to either a) get the recognizer to accept a blob with a long source array or b) break the blob into chunks that aren't out of bounds. The .wav url has been changed to dashes for anonymity and should be ignored. Any solutions are appreciated!
JS:
<script>
//get wav file from url, create File object with it
function fromFile() {
fetch("http://www.-----------.com/prod/wp-content/uploads/2020/12/cutafew.wav")
.then(response => response.blob())
.then(blob => {
var file = new File([blob], "http://www.---------.com/prod/wp-content/uploads/2020/12/cutafew.wav", {
type:"audio/x-wav", lastModified:new Date().getTime()
});
//if file got successfully, do the following:
var reader = new FileReader();
var speechConfig = SpeechSDK.SpeechConfig.fromSubscription("f6abc3bfabc64f0d820d537c0d738788", "centralus");
var audioConfig = SpeechSDK.AudioConfig.fromWavFileInput(file);
var recognizer = new SpeechSDK.SpeechRecognizer(speechConfig, audioConfig);
//use recognizer to convert wav file to text
recognizer.recognizing = (s, e) => {
console.log(e.result);
console.log(`RECOGNIZING: Text=${e.result.text}`);
};
recognizer.recognized = (s, e) => {
if (e.result.reason == ResultReason.RecognizedSpeech) {
console.log(`RECOGNIZED: Text=${e.result.text}`);
}
else if (e.result.reason == ResultReason.NoMatch) {
console.log("NOMATCH: Speech could not be recognized.");
}
};
recognizer.canceled = (s, e) => {
console.log(`CANCELED: Reason=${e.reason}`);
if (e.reason == CancellationReason.Error) {
console.log(`"CANCELED: ErrorCode=${e.errorCode}`);
console.log(`"CANCELED: ErrorDetails=${e.errorDetails}`);
console.log("CANCELED: Did you update the subscription info?");
}
recognizer.stopContinuousRecognitionAsync();
};
recognizer.sessionStopped = (s, e) => {
console.log("\n Session stopped event.");
recognizer.stopContinuousRecognitionAsync();
};
recognizer.startContinuousRecognitionAsync();
})
//throw error if file wasn't created
.catch(err => console.error(err));
}
fromFile();
</script>
you can use Recognize from in-memory stream example
const fs = require('fs');
const sdk = require("microsoft-cognitiveservices-speech-sdk");
const speechConfig = sdk.SpeechConfig.fromSubscription("<paste-your-speech-key-here>", "<paste-your-speech-location/region-here>");
function fromStream() {
let pushStream = sdk.AudioInputStream.createPushStream();
fs.createReadStream("YourAudioFile.wav").on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
let audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
let recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(result => {
console.log(`RECOGNIZED: Text=${result.text}`);
recognizer.close();
});
}
fromStream();

Downloading an Azure Storage Blob using pure JavaScript and Azure-Storage-Js

I'm trying to do this with just pure Javascript and the SDK. I am not using Node.js. I'm converting my application from v2 to v10 of the SDK azure-storage-js-v10
The azure-storage.blob.js bundled file is compatible with UMD
standard, if no module system is found, following global variable
will be exported: azblob
My code is here:
const serviceURL = new azblob.ServiceURL(`https://${account}.blob.core.windows.net${accountSas}`, pipeline);
const containerName = "container";
const containerURL = azblob.ContainerURL.fromServiceURL(serviceURL, containerName);
const blobURL = azblob.BlobURL.fromContainerURL(containerURL, blobName);
const downloadBlobResponse = await blobURL.download(azblob.Aborter.none, 0);
The downloadBlobResponse looks like this:
downloadBlobResponse
Using v10, how can I convert the downloadBlobResponse into a new blob so it can be used in the FileSaver saveAs() function?
In azure-storage-js-v2 this code worked on smaller files:
let readStream = blobService.createReadStream(containerName, blobName, (err, res) => {
if (error) {
// Handle read blob error
}
});
// Use event listener to receive data
readStream.on('data', data => {
// Uint8Array retrieved
// Convert the array back into a blob
var newBlob = new Blob([new Uint8Array(data)]);
// Saves file to the user's downloads directory
saveAs(newBlob, blobName); // FileSaver.js
});
I've tried everything to get v10 working, any help would be greatly appreciated.
Thanks,
You need to get the body by await blobBody.
downloadBlobResponse = await blobURL.download(azblob.Aborter.none, 0);
// data is a browser Blob type
const data = await downloadBlobResponse.blobBody;
Thanx Mike Coop and Xiaoning Liu!
I was busy making a Vuejs plugin to download blobs from a storage account. Thanx to you, I was able to make this work.
var FileSaver = require('file-saver');
const { BlobServiceClient } = require("#azure/storage-blob");
const downloadButton = document.getElementById("download-button");
const downloadFiles = async() => {
try {
if (fileList.selectedOptions.length > 0) {
reportStatus("Downloading files...");
for await (const option of fileList.selectedOptions) {
var blobName = option.text;
const account = '<account name>';
const sas = '<blob sas token>';
const containerName = '< container name>';
const blobServiceClient = new BlobServiceClient(`https://${account}.blob.core.windows.net${sas}`);
const containerClient = blobServiceClient.getContainerClient(containerName);
const blobClient = containerClient.getBlobClient(blobName);
const downloadBlockBlobResponse = await blobClient.download(blobName, 0, undefined);
const data = await downloadBlockBlobResponse.blobBody;
// Saves file to the user's downloads directory
FileSaver.saveAs(data, blobName); // FileSaver.js
}
reportStatus("Done.");
listFiles();
} else {
reportStatus("No files selected.");
}
} catch (error) {
reportStatus(error.message);
}
};
downloadButton.addEventListener("click", downloadFiles);
Thanks Xiaoning Liu!
I'm still learning about async javascript functions and promises. Guess I was just missing another "await". I saw that "downloadBlobResponse.blobBody" was a promise and also a blob type, but, I couldn't figure out why it wouldn't convert to a new blob. I kept getting the "Iterator getter is not callable" error.
Here's my final working solution:
// Create a BlobURL
const blobURL = azblob.BlobURL.fromContainerURL(containerURL, blobName);
// Download blob
downloadBlobResponse = await blobURL.download(azblob.Aborter.none, 0);
// In browsers, get downloaded data by accessing downloadBlockBlobResponse.blobBody
const data = await downloadBlobResponse.blobBody;
// Saves file to the user's downloads directory
saveAs(data, blobName); // FileSaver.js

Node.js - Getting empty files when unzipping and uploading to GCS

I am trying to create a service that gets a zip file, unpacks it, and uploads its contents to a Google Cloud Storage bucket.
The unzipping part seems to work well, but in my GCS bucket all the files seem to be empty.
I'm using the following code:
app.post('/fileupload', function(req, res) {
var form = new formidable.IncomingForm();
form.parse(req, function (err, fields, files) {
const uuid = uuidv4();
console.log(files.filetoupload.path); // temporary path to zip
fs.createReadStream(files.filetoupload.path)
.pipe(unzip.Parse())
.on('entry', function (entry) {
var fileName = entry.path;
var type = entry.type; // 'Directory' or 'File'
var size = entry.size;
const gcsname = uuid + '/' + fileName;
const blob = bucket.file(gcsname);
const blobStream = blob.createWriteStream(entry.path);
blobStream.on('error', (err) => {
console.log(err);
});
blobStream.on('finish', () => {
const publicUrl = format(`https://storage.googleapis.com/${bucket.name}/${blob.name}`);
console.log(publicUrl); // file on GCS
});
blobStream.end(entry.buffer);
});
});
});
I'm quite new to Node.js so I'm probably overlooking something - I've spent some time on documentation but I don't quite know what to do.
Could someone advise on what might be the problem?
The fs.createWriteStream() takes file path as argument but GCS createWriteStream() takes options
As per the example in this documentation the recommended way would be:
const stream = file.createWriteStream({
metadata: {
contentType: req.file.mimetype
},
resumable: false
});
instead of:
const blobStream = blob.createWriteStream(entry.path).
Check whether your buffer is undefined or not . It may be due to unspecified disk/Mem storage that the buffer remains undefined .

Node csv-parse halting after 16 rows

I'm experiencing very weird behavior running csv-parse in the following setup:
csv - ^1.1.0
stream-transform - ^0.1.1
node - v4.6.0
And running the following code to transform CSVs into an array of objects:
var parse = require('csv').parse
var fs = require('fs')
var streamtransform = require('stream-transform')
function mapCsvRow(headers, record) {
return record.reduce((p, c, i) => {
p[headers[i]] = c //eslint-disable-line
return p
}, {})
}
function parseFile(path) {
var headers
var output = []
var parser = parse({ delimiter: ',' })
var input = fs.createReadStream(path)
var transformer = streamtransform((record) => {
if (!headers) {
headers = record
return record
}
output.push(mapCsvRow(headers, record))
return record
})
// Return a new promise to wrap the parsing stream
return new Promise((resolve, reject) => {
input
.pipe(parser)
.pipe(transformer)
.on('error', e => reject(e))
.on('finish', () => resolve(output))
})
}
module.exports = parseFile
module.exports = parseFile
What happens is that the parser halts on processing files larger than 16 records. No error, no finish, no nothing.
I have no idea how to debug this, I couldn't get any input from the parser when that happens.
Looks like you have reader stream and transformer stream, but you don't have any writer stream. Hence transformer stream gets full and pauses read stream.
Try rewrite your code to not use output array. It's pointless to use stream if you hold results in memory.

Categories