using multer to post files into Digital Ocean Space - javascript

Trying to post local files to Digital Ocean Space
My post request body is:
[ 'http://localhost:8090/d/3534352009.png',
'http://localhost:8090/d/3534352009-600x600.png' ]
These files are located locally to where this API is running/
In the JS file i have config
// Change bucket property to your Space name
const upload = multer({
storage: multerS3({
s3: s3,
bucket: 'xxxx',
acl: 'public-read',
key: function (request, file, cb) {
console.log(file);
cb(null, file.originalname);
}
})
}).array('upload', 2);
And a route to post
app.post('/upload', function (request, response, next) {
upload(request, response, function (error) {
if (error) {
console.log(error);
return response.redirect("/error");
}
console.log('File uploaded successfully.');
response.redirect("/success");
});
});
Can't figure out how to post from an object these two images into the Digital Ocean space
There are plenty of tutorial on how to from a FORM browser user can post multiple images, but I got the files localy and I want to post it via script.

It can be done using aws sdk putObject API. This works for me.
const aws = require('aws-sdk');
var fs = require('fs');
const spacesEndpoint = new aws.Endpoint('sgp1.digitaloceanspaces.com');
const spaces = new aws.S3({
endpoint: spacesEndpoint,
accessKeyId: 'spaces_key',
secretAccessKey: 'spaces_secret'
});
fs.readFile('file_path', function (err, data) {
if (err) { throw err; }
var params = {Bucket: 'bucket_name', Key:'file_name', Body: data };
spaces.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log("Successfully uploaded data to DO speaces");
}
});
});
##Include this code in an API if you want to send file path as request body

Related

How to upload file to Digital Ocean Spaces using Javascript

I am looking to use Digital Oceans spaces (which seems to have an identical API to S3), and would like to try it by uploading a sample file. I am having lots of difficulty. Here's what I've done so far
{'hi' : 'world'}
Is the contents of a file hiworld.json that I would like to upload. I understand that I need to create an aws v4 signature before I can make this request.
var aws4 = require('aws4')
var request = require('request')
var opts = {'json': true,'body': "{'hi':'world'}",host: '${myspace}.nyc3.digitaloceanspaces.com', path: '/hiworld.json'}
aws4.sign(opts, {accessKeyId: '${SECRET}', secretAccessKey: '${SECRET}'})
Then I send the request
request.put(opts,function(error, response) {
if(error) {
console.log(error);
}
console.log(response.body);
});
However, when I check my Digital Ocean space, I see that my file was not created. I have noticed that if I changed my PUT to GET and try to access an existing file, I have no issues.
Here's what my headers look like
headers:
{ Host: '${myspace}.nyc3.digitaloceanspaces.com',
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'Content-Length': 14,
'X-Amz-Date': '20171008T175325Z',
Authorization: 'AWS4-HMAC-SHA256 Credential=${mykey}/20171008/us-east-1//aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=475e691d4ddb81cca28eb0dcdc7c926359797d5e383e7bef70989656822accc0' },
method: 'POST' }
As an alternative, using aws-sdk:
// 1. Importing the SDK
import AWS from 'aws-sdk';
// 2. Configuring the S3 instance for Digital Ocean Spaces
const spacesEndpoint = new AWS.Endpoint(
`${REGION}.digitaloceanspaces.com`
);
const url = `https://${BUCKET}.${REGION}.digitaloceanspaces.com/${file.path}`;
const S3 = new AWS.S3({
endpoint: spacesEndpoint,
accessKeyId: ACCESS_KEY_ID,
secretAccessKey: SECRET_ACCESS_KEY
});
// 3. Using .putObject() to make the PUT request, S3 signs the request
const params = { Body: file.stream, Bucket: BUCKET, Key: file.path };
S3.putObject(params)
.on('build', request => {
request.httpRequest.headers.Host = `https://${BUCKET}.${REGION}.digitaloceanspaces.com`;
// Note: I am assigning the size to the file Stream manually
request.httpRequest.headers['Content-Length'] = file.size;
request.httpRequest.headers['Content-Type'] = file.mimetype;
request.httpRequest.headers['x-amz-acl'] = 'public-read';
})
.send((err, data) => {
if (err) logger(err, err.stack);
else logger(JSON.stringify(data, '', 2));
});
var str = {
'hi': 'world'
}
var c = JSON.stringify(str);
request(aws4.sign({
'uri': 'https://${space}.nyc3.digitaloceanspaces.com/newworlds.json',
'method': 'PUT',
'path': '/newworlds.json',
'headers': {
"Cache-Control":"no-cache",
"Content-Type":"application/x-www-form-urlencoded",
"accept":"*/*",
"host":"${space}.nyc3.digitaloceanspaces.com",
"accept-encoding":"gzip, deflate",
"content-length": c.length
},
body: c
},{accessKeyId: '${secret}', secretAccessKey: '${secret}'}),function(err,res){
if(err) {
console.log(err);
} else {
console.log(res);
}
})
This gave me a successful PUT
It can be done using multer and aws sdk. It worked for me.
const aws = require('aws-sdk');
const multer = require('multer');
const express = require('express');
const multerS3 = require('multer-s3');
const app = express();
const spacesEndpoint = new aws.Endpoint('sgp1.digitaloceanspaces.com');
const spaces = new aws.S3({
endpoint: spacesEndpoint,
accessKeyId: 'your_access_key_from_API',
secretAccessKey: 'your_secret_key'
});
const upload = multer({
storage: multerS3({
s3: spaces,
bucket: 'bucket-name',
acl: 'public-read',
key: function (request, file, cb) {
console.log(file);
cb(null, file.originalname);
}
})
}).array('upload', 1);
Now you can also call this using an API like this
app.post('/upload', function (request, response, next) {
upload(request, response, function (error) {
if (error) {
console.log(error);
}
console.log('File uploaded successfully.');
});
});
HTML would look like this
<form method="post" enctype="multipart/form-data" action="/upload">
<label for="file">Upload a file</label>
<input type="file" name="upload">
<input type="submit" class="button">
</form>

Upload HTML file to AWS S3 and then serving it instead of downloading

I am downloading a web page and then I am writing to a file named thisArticle.html, using the below code.
var file = fs.createWriteStream("thisArticle.html");
var request = http.get(req.body.url, response => response.pipe(file) );
After that I am trying to read file and uploading to S3, here is the code that I wrote:
fs.readFile('thisArticle.html', 'utf8', function(err, html){
if (err) {
console.log(err + "");
throw err;
}
var pathToSave = 'articles/ ' + req.body.title +'.html';
var s3bucket = new AWS.S3({ params: { Bucket: 'all-articles' } });
s3bucket.createBucket(function () {
var params = {
Key: pathToSave,
Body: html,
ACL: 'public-read'
};
s3bucket.upload(params, function (err, data) {
fs.unlink("thisArticle.html", function (err) {
console.error(err);
});
if (err) {
console.log('ERROR MSG: ', err);
res.status(500).send(err);
} else {
console.log(data.Location);
}
// ..., more code below
});
});
});
Now, I am facing two issues:
The file is uploading but with 0 bytes (empty)
When I am trying to upload manually via S3 dashboard is uploaded successfully but when I tried to load the URL in the browser it downloads the HTML file instead of serving it.
Any guides if I am missing something?
Set the ContentType to "text/html".
s3 = boto3.client("s3")
s3.put_object(
Bucket=s3_bucket,
Key=s3_key,
Body=html_string,
CacheControl="max-age=0,no-cache,no-store,must-revalidate",
ContentType="text/html",
ACL="public-read"
)
It looks like your upload function is deleting the file with fs.unlink before it gets uploaded. That's why its going up as 0 Bytes.
Also, to make the bucket serve the HTML, you need to turn on webserving as described in the AWS S3 Docs. http://docs.aws.amazon.com/AmazonS3/latest/UG/ConfiguringBucketWebsite.html

How we can upload multiple blobs to azure using nodejs

I am trying to upload 6 images to azure blob from single endpoint that I get from a registration form. The code shows how to upload a single blob but I need to upload multiple blobs at the same time. How can I do it?
Here is my code:
app.post('/upload', function (req, res) {
//var dirname = require('path').dirname(__dirname);
//var dirname1 = require('path').dirname(dirname);
var filename = req.files[0].filename;
var path = req.files[0].path;
var type = req.files[0].mimetype;
var options = {
contentType: type,
metadata: { fileName: filename }
}
blobSvc.createBlockBlobFromLocalFile(containerName, filename, path, options, function (error, result, response) {
if (error != null) {
console.log('Azure Full Error: ', error)
} else {
console.log(result);
console.log(response);
var user = new User();
user.name = req.body.name;
user.picture = 'https://yourblob.blob.core.windows.net/profile/' + result.name;
user.save(function (err) {
if (err) {
return res.json(err.message);
}
else {
return res.json({ User: user });
}
});
}
});
});
As Azure Storage for node sdk is based on RESTful APIs, and we implement upload functionality via Put Blob.
There is no such RESTful API or function in SDK for us to directly upload multiple independent blobs to Azure at once time.
You can implement this functionality for yourself by uploading files in loop.

S3 file upload file object using node js

I'm using Sailsjs 0.12.1, node.js 4.2.6
I want to upload the file From front-end(angular.js) through an API and from backend I want to upload the file to the AWS S3 bucket.
front-end I'm sending the file to the api. In backend I'm getting the file with the name but while uploading the file to S3 I'm getting the error
Cannot determine length of [object Object]
I google the error and found the many links but my bad luck.
Back-end
uploadPersonAvtar: function(req, res) {
var zlib = require('zlib');
var file = req.file('image');
var mime = require('mime');
data = {
Bucket: 'bucket',
Key : 'my_key',
Key: file.name,
Body: file,
ContentType: mime.lookup(file.name)
};
// Upload the stream
var s3obj = new AWS.S3(S3options);
s3obj.upload(data, function(err, data) {
if (err) console.log("An error occurred", err);
console.log("Uploaded the file at", data);
})
}
Is my approach is correct
If yes what I'm doing wrong.
I want to know how to use the file object to upload the file.
I can create a read stream but I don't have the file path When I'm creating the file object I'm getting the error: path must be a string
You could look at following example about streaming data : Amazon S3: Uploading an arbitrarily sized stream (upload)
var fs = require('fs');
var zlib = require('zlib');
var body = fs.createReadStream('bigfile').pipe(zlib.createGzip());
var s3obj = new AWS.S3({params: {Bucket: 'myBucket', Key: 'myKey'}});
s3obj.upload({Body: body}).
on('httpUploadProgress', function(evt) { console.log(evt); }).
send(function(err, data) { console.log(err, data) });

How do I delete an object on AWS S3 using JavaScript?

I want to delete a file from Amazon S3 using JavaScript. I have already uploaded the file using JavaScript. How can I delete it?
You can use the JS method from S3:
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./credentials-ehl.json');
var s3 = new AWS.S3();
var params = { Bucket: 'your bucket', Key: 'your object' };
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // error
else console.log(); // deleted
});
Be aware that S3 never returns the object if it has been deleted.
You have to check it before or after with getobject, headobject, waitfor, etc
You can use construction like this:
var params = {
Bucket: 'yourBucketName',
Key: 'fileName'
/*
where value for 'Key' equals 'pathName1/pathName2/.../pathNameN/fileName.ext'
- full path name to your file without '/' at the beginning
*/
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
And don't forget to wrap it to the Promise.
Before deleting the file you have to check the 1) file whether it is in the bucket because If the file is not available in the bucket and using deleteObject API this doesn't throw any error 2)CORS Configuration of the bucket. By using headObject API gives the file status in the bucket.
AWS.config.update({
accessKeyId: "*****",
secretAccessKey: "****",
region: region,
version: "****"
});
const s3 = new AWS.S3();
const params = {
Bucket: s3BucketName,
Key: "filename" //if any sub folder-> path/of/the/folder.ext
}
try {
await s3.headObject(params).promise()
console.log("File Found in S3")
try {
await s3.deleteObject(params).promise()
console.log("file deleted Successfully")
}
catch (err) {
console.log("ERROR in file Deleting : " + JSON.stringify(err))
}
} catch (err) {
console.log("File not Found ERROR : " + err.code)
}
As params are constant, the best way to use it with const. If the file is not found in the s3 it throws the error NotFound : null.
If you want to apply any operations in the bucket, you have to change the permissions of CORS Configuration in the respective bucket in the AWS. For changing permissions Bucket->permission->CORS Configuration and Add this code.
<CORSConfiguration>
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>PUT</AllowedMethod>
<AllowedMethod>POST</AllowedMethod>
<AllowedMethod>DELETE</AllowedMethod>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>HEAD</AllowedMethod>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
for more information about CROS Configuration : https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
You can use deleteObjects API to delete multiple objects at once instead of calling API for each key to delete. Helps save time and network bandwidth.
You can do following-
var deleteParam = {
Bucket: 'bucket-name',
Delete: {
Objects: [
{Key: 'a.txt'},
{Key: 'b.txt'},
{Key: 'c.txt'}
]
}
};
s3.deleteObjects(deleteParam, function(err, data) {
if (err) console.log(err, err.stack);
else console.log('delete', data);
});
For reference see - https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#deleteObjects-property
You can follow this GitHub gist link https://gist.github.com/jeonghwan-kim/9597478.
delete-aws-s3.js:
var aws = require('aws-sdk');
var BUCKET = 'node-sdk-sample-7271';
aws.config.loadFromPath(require('path').join(__dirname, './aws-config.json'));
var s3 = new aws.S3();
var params = {
Bucket: 'node-sdk-sample-7271',
Delete: { // required
Objects: [ // required
{
Key: 'foo.jpg' // required
},
{
Key: 'sample-image--10.jpg'
}
],
},
};
s3.deleteObjects(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
Very straight forward
At first, create an instance of s3 and configure it with credentials
const S3 = require('aws-sdk').S3;
const s3 = new S3({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
});
Afterward, follow the docs
var params = {
Bucket: "ExampleBucket",
Key: "HappyFace.jpg"
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
}
*/
});

Categories