How to upload image to S3 using Node - javascript

I am writing an Express app that takes in a base64 encoded string that represents an image. Right now, i'm not really sure how I can take that string and upload the image to AWS S3, so i'm reading in the encoded image string data, decoding it, writing a file using fs, and then trying to upload. I have this working for an endpoint that just takes in a raw file, and all of its content is correctly uploaded to AWS s3.
Now when I try to do what I described above, i'm able to upload to S3, but the file has 0kb and is empty, and i'm not sure why. I tested just taking the stringData and writing a file to a test file, and it works. However, when I try uploading to s3, the file shows but it's empty. Here is my code:
router.post('/images/tags/nutritionalInformation/image/base64encoded', function (req, res) {
console.log(req.body.imageString);
var base64Stream = req.body.imageString;
var imgDecodedBuffer = decodeBase64Image(base64Stream);
console.log(imgDecodedBuffer);
// write to image file
var prefix = guid().toString() + ".jpg";
var filePath = './uploads/' + prefix;
console.log(filePath);
fs.writeFile(filePath, imgDecodedBuffer.data, function(err) {
console.log(err);
});
var stream = fs.createReadStream(filePath);
console.log(stream);
return s3fsImpl.writeFile(prefix, stream).then(function () {
fs.unlink(filePath, function (err) {
if (err) {
console.error(err);
}
});
});
})
Here are the relevant import statements:
var fs = require('fs');
var s3fs = require('s3fs');
var multiparty = require('connect-multiparty'),
multipartyMidleware = multiparty();
var s3fsImpl = new s3fs('blahblah', {
accessKeyId: 'ACCESS_KEY_ID',
secretAccessKey: 'SECRET'
});
Any help would be greatly appreciated!

If you simply just pass in the buffer, which I presume is in your imgDecodedBuffer.data value, it should work.

Related

uploading multiple files to s3 with sailsjs

I am not using the package of multer because I am not using express so I am not sure how multer can work with sailsjs
Anyways, I am trying to upload multiple files to s3, at first I worked with for loop which did not work because for loop is synchronous and file upload is asynchronous.
But then I googled that using recurrsive would work so I tried it but somehow it still didn't though.
Files are uploaded but then the size isn't right for all of them.
Somehow the size might be bigger / smaller then when I download the file let's say if it's a doc file, either I get error saying it's not a msdoc file or what's inside is all scrambled. If it's a pdf, it'll say failed to open the pdf file.
If I try only with one file, it works sometimes but not always though.
Did I do something wrong with the codes below?
s3_upload_multi: async function(req){
try {
let fieldName = req._fileparser.upstreams[0].fieldName;
let files = req.file(fieldName)._files;
let return_obj = [];
const upload_rec = files => {
if (files.length <= 0) return return_obj;
const f = files.pop();
const fileUpload = f.stream;
const s3 = new AWS.S3();
// https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
s3.putObject(({ // uses s3 sdk
Bucket: sails.config.aws.bucket,
Key: 'blahblahblahblahblah',
Body: fileUpload._readableState.buffer.head.data, // buffer from file
ACL: 'public-read',
}, function ( err, data ) {
if (err) reject(err);
return_obj.push(data);
console.log(return_obj, 'return_obj');
});
return upload_rec(files);
};
upload_rec(files);
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
Thanks in advance for any advices and suggestions

React: Read Zip file using JSZip from the project directory

I am trying to read a zip file which has images from the project directory in React.
When I open the Zip file from <input type="file" />, it works by taking the event.target.files[0] .
But when I put that same file in react project and then try to open it by giving a path , it doesnt work.
Example : "./test.zip"
My current code:
let jsZip = new JSZip();
jsZip.loadAsync("./test.zip").then(function (zip) {
let imagess = [];
Object.keys(zip.files).forEach(function (filename) {
zip.files[filename].async("base64").then(function (fileData) {
const image = document.createElement("img");
image.src = "data:image/*;base64," + fileData;
document.querySelector(".unziped-container").appendChild(image);
});
});
});
I have been stuck on this for hours and the documentation is not helping either.
Any help is appreciated.
Anyone coming across this can use JSZip-utils and write the following code
JSZipUtils.getBinaryContent("../path/file.zip", function (err, data) {
if (err) {
throw err;
}
const jsZip = new JSZip();
jsZip.loadAsync(data).then(function (zip) {
Object.keys(zip.files).forEach(function (filename) {
zip.files[filename].async("base64").then(function (fileData) {
const image = document.createElement("img");
image.src = "data:image/*;base64," + fileData;
const unziped = document.querySelector(".unziped-container");
unziped.appendChild(image);
});
});
});
});
the documentation seems pretty clear: the first argument to loadAsync is the zip file data, but you're passing it a string.
(and what you're tring to do won't work anyway. your React code is running in the browser and has no knowledge of filesystem paths or filenames.)

Node.js - Getting empty files when unzipping and uploading to GCS

I am trying to create a service that gets a zip file, unpacks it, and uploads its contents to a Google Cloud Storage bucket.
The unzipping part seems to work well, but in my GCS bucket all the files seem to be empty.
I'm using the following code:
app.post('/fileupload', function(req, res) {
var form = new formidable.IncomingForm();
form.parse(req, function (err, fields, files) {
const uuid = uuidv4();
console.log(files.filetoupload.path); // temporary path to zip
fs.createReadStream(files.filetoupload.path)
.pipe(unzip.Parse())
.on('entry', function (entry) {
var fileName = entry.path;
var type = entry.type; // 'Directory' or 'File'
var size = entry.size;
const gcsname = uuid + '/' + fileName;
const blob = bucket.file(gcsname);
const blobStream = blob.createWriteStream(entry.path);
blobStream.on('error', (err) => {
console.log(err);
});
blobStream.on('finish', () => {
const publicUrl = format(`https://storage.googleapis.com/${bucket.name}/${blob.name}`);
console.log(publicUrl); // file on GCS
});
blobStream.end(entry.buffer);
});
});
});
I'm quite new to Node.js so I'm probably overlooking something - I've spent some time on documentation but I don't quite know what to do.
Could someone advise on what might be the problem?
The fs.createWriteStream() takes file path as argument but GCS createWriteStream() takes options
As per the example in this documentation the recommended way would be:
const stream = file.createWriteStream({
metadata: {
contentType: req.file.mimetype
},
resumable: false
});
instead of:
const blobStream = blob.createWriteStream(entry.path).
Check whether your buffer is undefined or not . It may be due to unspecified disk/Mem storage that the buffer remains undefined .

Ipfs-mini cat APi's output buffer seems like corrupted for the hash pointing the image file

I am a newbie to both Javascript and ipfs and I am trying an experiment to fetch an image buffer from the ipfs hash "QmdD8FL7N3kFnWDcPSVeD9zcq6zCJSUD9rRSdFp9tyxg1n" using ipfs-mini node module.
Below is my code
const IPFS = require('ipfs-mini');
const FileReader = require('filereader');
var multer = require('multer');
const ipfs = initialize();
app.post('/upload',function(req,res){
upload(req,res, function(err){
console.log(req.file.originalname);
ipfs.cat('QmdD8FL7N3kFnWDcPSVeD9zcq6zCJSUD9rRSdFp9tyxg1n', function(err, data){
if(err) console.log("could not get the image from the ipfs for hash " + ghash);
else {
var wrt = data.toString('base64');
console.log('size ; ' + wrt.length);
fs.writeFile('tryipfsimage.gif',wrt, (err) =>{
if(err)console.log('can not write file');
else {
//console.log(data);
ipfs.stat('QmdD8FL7N3kFnWDcPSVeD9zcq6zCJSUD9rRSdFp9tyxg1n', (err, data)=>{
// console.log(hexdump(wrt));
});
console.log("files written successfully");
}
});
}
});
});
});
function initialize() {
console.log('Initializing the ipfs object');
return new IPFS({
host: 'ipfs.infura.io',
protocol: 'https'
});
}
I could view the image properly in the browser using the link below "https://ipfs.io/ipfs/QmdD8FL7N3kFnWDcPSVeD9zcq6zCJSUD9rRSdFp9tyxg1n", but if I open the file 'tryipfsimage.gif' in which I dump the return buffer of the cat API in above program, the content of the image seems corrupted. I am not sure what the mistake I am doing in the code. it would be great If someone points me the mistake.
From ipfs docs https://github.com/ipfs/interface-ipfs-core/blob/master/SPEC/FILES.md#javascript---ipfsfilescatipfspath-callback
file in the callback is actually a Buffer so by toString('base64')'ing it you are writing actual base64 into the .gif file - no need to do this. you can pass the Buffer directly to the fs.writeFile api with
fs.writeFile('tryipsimage.gif', file, ...
For larger files I would recommend using the ipfs catReadableStream, where you can do something more like:
const stream = ipfs.catReadableStream('QmdD8FL7N3kFnWDcPSVeD9zcq6zCJSUD9rRSdFp9tyxg1n')
// don't forget to add error handlers to stream and whatnot
const fileStream = fs.createWriteStream('tryipsimage.gif')
stream.pipe(fileStream);

node.js upload and download pdf file

Framework: node.js/express.js/busboy/gridfs-stream(mongodb)
I am using busboy to upload files and then use gridfs-stream to store files in mongodb gridfs.
req.pipe(req.busboy);
req.busboy.on('file', function (bus_fieldname, bus_file, bus_filename) {
var writestream = gfs.createWriteStream({
filename: bus_filename,
});
bus_file.pipe(writestream);
writestream.on('close', function (file) {
res.redirect('/xxxxx/');
});
});
Download is simple: Use gridfs-stream's createReadStream I read the contents from mongodb and then use the following code to send it to browser.
gfs.findOne({_id: attachmentid}, function (err, file) {
if (err || !file){
res.send(404);
}else{
var fileName = file.filename;
var readstream = gfs.createReadStream({_id: attachmentid});
var buffer = "";
readstream.on("data", function (chunk) {
buffer += chunk;
});
// dump contents to buffer
readstream.on("end", function () {
res.set("Content-Disposition","attachment; filename=" + fileName);
res.send(buffer);
});
}
Problem: When I upload a 90kb pdf file, it uploads fine. I see the size is correct in mongodb. But when I download, the file size of the downloaded file is about 165kb. There is a mismatch. This does not happen with text files. Sure its something to do with data type.
can anyone please help?
Pipe the gfs read stream to the response directly. This is what works for me
res.set("Content-Disposition","attachment; filename=" + fileName);
var readstream = gfs.createReadStream({_id: attachmentid});
readstream.pipe(res);
Like Sasikanth said you can fix the problem with piping and that's the best approach.
But if you are wondering what's wrong with your current approach, then I suggest to read the Buffer documentation.You are naming your variable as buffer but assigning string.
Then in your data callback you are adding string with Buffer. When you do that the chunk buffer is converted to string using it's toString() method. So your binary data is converted to utf8 encoded string and here it goes wrong. Since utf8 is multi-byte encoding the output size becomes large(I don't know the exact mechanism of this conversion).
So the right way to do is to keep it in buffers:
var buffers = [];
readstream.on("data", function (chunk) {
buffers.push(chunk)
});
readstream.on("end", function () {
res.set("Content-Disposition","attachment; filename=" + fileName);
res.send(Buffer.concat(buffers));
});

Categories