Reduced file size after uploading to S3 bucket from nodejs - javascript

I am trying to upload files from a particular folder location to a sample S3 bucket. I am using standard nodejs aws-sdk for this. Files are deepzoom images (.dzi) files.
Files are getting uploaded to my S3 bucket but the contents of the file are not getting uploaded properly. Like I am uploading images of sizes 800B, but after uploading the size of image is only 7B. I tried downloading it to see its content but the file doesn't contains the image but just the file name. This is the code I am running for uploading files:
function read(file, numFiles) {
fs.readFile(file, function (err, data) {
if (err) console.log(err);
const fileContent = Buffer.from(file, "binary");
s3.putObject(
{
Bucket: "sample-bucket",
Key: file,
Body: fileContent,
},
function (resp) {
console.log(arguments);
console.log("Successfully uploaded, ", file);
uploadCount++;
console.log("uploadcount is:", uploadCount);
if (uploadCount == numFiles) {
res.send("All files uploaded");
}
}
).on("httpUploadProgress", (evt) => {
console.log(`Uploaded ${evt.loaded} out of ${evt.total}`);
});
});
}
I am passing files to this read function from another function. I am not sure why this is happening. Any help would be appreciated.
Before uploading image properties:
Property of image uploaded to S3 bucket:

Buffer.from(file) will not return the content of the file but return the buffer of the argument, this time the argument is "file". So the file uploaded to S3 has filename as contents.
Try to change this line
const fileContent = Buffer.from(file, "binary");
to like this.
const fileContent = fs.readFileSync(file);

Related

How to Upload file in a directory to minIO bucket

Hello everyone i have bucket in minio server and bucket name is 'geoxing' and geoxing have directory img/site. i want to upload picture in site directry using nodejs. below is code and i am getting error Invalid bucket name: geoxing/img/site. how can i solve this error. thanks
savefile() {
const filePath = 'D://repositories//uploads//geoxing//site//b57e46b4bcf879839b7074782sitePic.jpg';
const bucketname = 'geoxing/img/site'
var metaData = {
'Content-Type': 'image/jpg',
'Content-Language': 123,
'X-Amz-Meta-Testing': 1234,
example: 5678,
};
this.minioClient.fPutObject(
bucketname,
'b57e46b4bcf879839b7074782sitePic.jpg',
filePath,
metaData,
function (err, objInfo) {
if (err) {
return console.log(err);
}
return console.log('Success', objInfo.etag);
},
);
}
In Amazon S3 and Minio:
Bucket should just be the name of the bucket (eg geoxing)
Key should include the full path as well as the filename (eg img/site/b57e46b4bcf879839b7074782sitePic.jpg)
Amazon S3 and Minio do not have 'folders' or 'directories' but they emulate directories by including the path name in the Key. Folders do not need to be created prior to uploading to a folder -- they just magically appear when files are stored in that 'path'.

Node Js How to download file into memory without writing the file to the system or without creating the file in a directory

I have a feature in my program where in i download all the files from a server to my directory and then get the path of those file and name and data after being downloaded and store in to mongo database using mongo grid fs.but I have a problem isn't its already redundant that I download the file and then getting the file data ang storing it to mongo db. Is there a way we can download file in node js without writing it to disk or directory but still we could get the file data? so that I can still store those file in mongo without those file being created on my directory like for example storing it to memory ? and get deleted after all was finished.
The reason I want to implement this is that I no longer need to delete the files being downloaded to my directory after I store those files to mongo.
sample code on downloading file.
var download = function (url, dest, callback) {
request.get(url)
.on('error', function (err) { console.log(err) })
.pipe(fs.createWriteStream(dest))
.on('close', callback);
};
final_list.forEach(function (str) {
var filename = str.split('/').pop();
console.log('Downloading ' + filename);
download(str, filename, function () { console.log('Finished Downloading' + "" + filename) });
});
Code on storing on mongo
var tempfile = dest;
var origname = filename;
var writestream = gfs.createWriteStream({ filename: origname });
console.log('Finished Downloading' + " " + filename);
fs.createReadStream(tempfile)
.on('end', function () {
})
.on('error', function () {
})
.pipe(writestream);
You could just directly pipe the request stream to the gridfs write stream:
const fileStorage = gfs.createWriteStream({ filename: dest });
request.get(url)
.on('error', function (err) { console.log(err) })
.pipe(fileStorage)
.on('close', callback);

Convert blob to PDF and save to S3 using AWS SDK

I am trying to convert a document file to PDF and upload to S3 using my browser.
The API i'm using to convert the file is returning a blob. How can i convert the blob to a PDF file and save it to S3?
Currently my code looks like this
function addFile(file) {
console.log("File!", file);
var fileName = Date.now().toString();
var albumPhotosKey = encodeURIComponent("files") + '/';
var photoKey = albumPhotosKey + fileName;
s3.upload({
Key: photoKey,
Body: file + ".pdf",
ACL: 'public-read'
}, function(err, data) {
if (err) {
console.log(err);
return alert('There was an error uploading your photo: ', err.message);
}
alert('Successfully uploaded photo.');
});
}
I tried converting the blob to a file using this
var file = new File([blobData], "filename.pdf", {type: "application/pdf", lastModified: Date.now()});
and then passed the file to the addFile() function but it creates a file which contains [object File].pdf as it's content.
How can i create a PDF file with the blob contents?
To convert base64 string to PDF on fly and write on s3 as a file, I used this approach -
const AWS = require('aws-sdk');
const S3 = new AWS.S3();
const base64String = 'abcxyz';
let buffer = new Buffer.from(base64String, 'base64');
let params = {
Bucket : 'your-bucket-name',
Key : 'user.pdf',
Body : buffer
}
S3.upload(params, (err, data) => {
if (err) {
console.log(err)
} else
console.log(data.Location)
})
Well, the other approach is to first create pdf from this string on your local machine using 'fs' module.
And to submit it to S3, read that pdf first and then upload to s3. But this is a lazy process and I personally dont recommend it.
Hope it helps.

Upload HTML file to AWS S3 and then serving it instead of downloading

I am downloading a web page and then I am writing to a file named thisArticle.html, using the below code.
var file = fs.createWriteStream("thisArticle.html");
var request = http.get(req.body.url, response => response.pipe(file) );
After that I am trying to read file and uploading to S3, here is the code that I wrote:
fs.readFile('thisArticle.html', 'utf8', function(err, html){
if (err) {
console.log(err + "");
throw err;
}
var pathToSave = 'articles/ ' + req.body.title +'.html';
var s3bucket = new AWS.S3({ params: { Bucket: 'all-articles' } });
s3bucket.createBucket(function () {
var params = {
Key: pathToSave,
Body: html,
ACL: 'public-read'
};
s3bucket.upload(params, function (err, data) {
fs.unlink("thisArticle.html", function (err) {
console.error(err);
});
if (err) {
console.log('ERROR MSG: ', err);
res.status(500).send(err);
} else {
console.log(data.Location);
}
// ..., more code below
});
});
});
Now, I am facing two issues:
The file is uploading but with 0 bytes (empty)
When I am trying to upload manually via S3 dashboard is uploaded successfully but when I tried to load the URL in the browser it downloads the HTML file instead of serving it.
Any guides if I am missing something?
Set the ContentType to "text/html".
s3 = boto3.client("s3")
s3.put_object(
Bucket=s3_bucket,
Key=s3_key,
Body=html_string,
CacheControl="max-age=0,no-cache,no-store,must-revalidate",
ContentType="text/html",
ACL="public-read"
)
It looks like your upload function is deleting the file with fs.unlink before it gets uploaded. That's why its going up as 0 Bytes.
Also, to make the bucket serve the HTML, you need to turn on webserving as described in the AWS S3 Docs. http://docs.aws.amazon.com/AmazonS3/latest/UG/ConfiguringBucketWebsite.html

S3 file upload file object using node js

I'm using Sailsjs 0.12.1, node.js 4.2.6
I want to upload the file From front-end(angular.js) through an API and from backend I want to upload the file to the AWS S3 bucket.
front-end I'm sending the file to the api. In backend I'm getting the file with the name but while uploading the file to S3 I'm getting the error
Cannot determine length of [object Object]
I google the error and found the many links but my bad luck.
Back-end
uploadPersonAvtar: function(req, res) {
var zlib = require('zlib');
var file = req.file('image');
var mime = require('mime');
data = {
Bucket: 'bucket',
Key : 'my_key',
Key: file.name,
Body: file,
ContentType: mime.lookup(file.name)
};
// Upload the stream
var s3obj = new AWS.S3(S3options);
s3obj.upload(data, function(err, data) {
if (err) console.log("An error occurred", err);
console.log("Uploaded the file at", data);
})
}
Is my approach is correct
If yes what I'm doing wrong.
I want to know how to use the file object to upload the file.
I can create a read stream but I don't have the file path When I'm creating the file object I'm getting the error: path must be a string
You could look at following example about streaming data : Amazon S3: Uploading an arbitrarily sized stream (upload)
var fs = require('fs');
var zlib = require('zlib');
var body = fs.createReadStream('bigfile').pipe(zlib.createGzip());
var s3obj = new AWS.S3({params: {Bucket: 'myBucket', Key: 'myKey'}});
s3obj.upload({Body: body}).
on('httpUploadProgress', function(evt) { console.log(evt); }).
send(function(err, data) { console.log(err, data) });

Categories