S3 node module, attempting to upload file gives ENETUNREACH error - javascript

I'm trying to upload a file to my Amazon S3 bucket but I'm getting an ENETUNREACH error. I do have permissions to upload/delete files for my buckets and also edited the CORS configuration to allow POST/GET requests from all origins. I'm thinking it might be a faulty key(s) that I received from someone. What is a good way to test if the keys I have are valid if that happens to be the issue?
Code below:
var s3 = require('s3');
/* Create a client for uploading or deleting files */
var client = s3.createClient({
maxAsyncS3: 20, // this is the default
s3RetryCount: 3, // this is the default
s3RetryDelay: 1000, // this is the default
multipartUploadThreshold: 20971520, // this is the default (20 MB)
multipartUploadSize: 15728640, // this is the default (15 MB)
s3Options: {
accessKeyId: 'xxxxxxxx',
secretAccesskey: 'xxxxxxxx',
region: 'xxxxxxxx'
},
});
exports.uploadFile = function(fileName, bucket){
console.log('Uploading File: ' +fileName+'\nBucket: ' +bucket);
var params = {
localFile: fileName,
s3Params: {
Bucket: bucket,
Key: 'testfile',
},
};
var uploader = client.uploadFile(params);
uploader.on('error', function(err) {
console.error("unable to upload:", err.stack);
});
uploader.on('progress', function() {
console.log("progress", uploader.progressMd5Amount, uploader.progressAmount, uploader.progressTotal);
});
uploader.on('end', function() {
console.log("done uploading");
});
};
Console log when trying to upload a txt small file:
Console Log

Disabled IIS services to fix my error.

Related

Linode Storage With NodeJs

I am new with linode. i see linode provide cloud storage just aws s3. i want to use it with my nodejs app.i can not find any sdk to do it like s3 any solution please help me .
any body tell me how can we upload file from nodejs to linode storage in javascript
new to linode too. Got my free $100 2 month trial and I figured I'd try the bucket feature.
I used AWS S3 in the past, this is pretty much identical as far as the SDK goes. The only hurdle here was to configure the endpoint. With AWS S3 you put the region, with linode you put the endpoint instead. The list of endpoints is here:
https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
As you didn't mention if you wanted an example on the server (nodejs) or the browser, I'll go with the one I've got. It's for nodejs (server side).
Steps
I used node stable (currently 18.7). I set up package.json to start the index.js script (e.g. "scripts": {"start": "node index.js"}).
Install aws-sdk
npm i aws-sdk
Code for index.js
const S3 = require('aws-sdk/clients/s3')
const fs = require('fs')
const config = {
endpoint: 'https://us-southeast-1.linodeobjects.com/',
accessKeyId: 'BLEEPBLEEPBLEEP',
secretAccessKey: 'BLOOPBLOOPBLOOP',
}
var s3 = new S3(config)
function listObjects() {
console.debug("List objects")
const bucketParams = {
Bucket: 'vol1'
}
s3.listObjects(bucketParams, (err, data) => {
if(err) {
console.error("Error ", err)
} else {
console.info("Objects vol1 ", data)
}
})
}
function uploadFile() {
const fileStream = fs.createReadStream('./testfile.txt')
var params = {Bucket: 'vol1', Key: 'testfile', Body: fileStream}
s3.upload(params, function(err, data) {
if(err) {
console.error("Error uploading test file", err)
} else {
console.info("Test file uploaded ", data)
listObjects()
}
})
}
// Start
uploadFile()
Run "npm start".
Output I get:
Test file uploaded {
ETag: '"0ea76c859582d95d2c2c0caf28e6d747"',
Location: 'https://vol1.us-southeast-1.linodeobjects.com/testfile',
key: 'testfile',
Key: 'testfile',
Bucket: 'vol1'
}
List objects
Objects vol1 {
IsTruncated: false,
Marker: '',
Contents: [
{
Key: 'Inflation isnt transitory.mp4',
LastModified: 2023-01-10T15:38:42.045Z,
ETag: '"4a77d408defc08c15fe42ad4e63fefbd"',
ChecksumAlgorithm: [],
Size: 58355708,
StorageClass: 'STANDARD',
Owner: [Object]
},
{
Key: 'testfile',
LastModified: 2023-02-13T20:28:01.178Z,
ETag: '"0ea76c859582d95d2c2c0caf28e6d747"',
ChecksumAlgorithm: [],
Size: 18,
StorageClass: 'STANDARD',
Owner: [Object]
}
],
Name: 'vol1',
Prefix: '',
MaxKeys: 1000,
CommonPrefixes: []
}
Adjust the config with your own creds/data center. Hope this helps.
Note: if you want to upload files > 1gb, you'll want to use the multipart upload feature. It's a bit more complex, but this should get you started. Any AWS S3 code example should do, there are plenty out there.

Javascript || AWS S3 SDK & croppie file upload errors

I am trying to upload the cropped results of croppie to an S3 bucket. I am currently getting a blank error when I successfully crop and then try to upload the cropped results.
I have followed Amazon docs including setting up the S3 bucket, identity pools, and configuring my CORS.
I believe the error has something to do with how croppie is packaging the cropped results. I have included my app.js file (where I handle the upload) and the code where the addPhoto function is being called. Resp is the response from croppie.
The expected outcome is that I can successfully crop a photo and then upload it to my S3 bucket.
$('.crop').on('click', function (ev) {
$uploadCrop.croppie('result', {
type: 'canvas',
size: 'original'
}).then(function (resp) {
Swal.fire({
imageUrl: resp,
showCancelButton: true,
confirmButtonText: "Upload",
reverseButtons: true,
showCloseButton: true
}).then((result) => {
if(result.value) {
addPhoto(resp);
}
app.js
var albumBucketName = "colorsort";
var bucketRegion = "xxx";
var IdentityPoolId = "xxx";
AWS.config.update({
region: bucketRegion,
credentials: new AWS.CognitoIdentityCredentials({
IdentityPoolId: IdentityPoolId
})
});
var s3 = new AWS.S3({
apiVersion: "2006-03-01",
params: { Bucket: albumBucketName }
});
function addPhoto(resp) {
var file = resp;
var fileName = file.name;
console.log(resp.type);
var photoKey = fileName;
// Use S3 ManagedUpload class as it supports multipart uploads
var upload = new AWS.S3.ManagedUpload({
params: {
Bucket: albumBucketName,
Key: photoKey,
Body: file,
ACL: "public-read"
}
});
var promise = upload.promise();
promise.then(
function(data) {
alert("Successfully uploaded photo.");
},
function(err) {
return alert("There was an error uploading your photo: ", err.message);
}
);
}
The solution I found involved adding the following snippet to my CORS config as well as changing the croppie result 'type:' from canvas to base64.
<AllowedHeader>*</AllowedHeader>
Useful resources: Upload missing ETag, Uploading base64 image to Amazon with Node.js

Access Denied Excemtion when Using AWS Elastic Transcoder on website

I am getting an 403 error when trying to use AWS Elastic Transcoder on my test site and I can’t seem to figure out where the problem lies. I have checked in my IAM policies and identity pools etc. to no avail.
The error:
GET https://
elastictranscoder.us-east-1.amazonaws.com/2012-09-25/pipelines/xxxxxxxxxxxxx-xxxxxx
403 (Forbidden) index.html:xxx AccessDeniedException: User:
arn:aws:sts::xxxxxxxxxxxx:assumed-role/Cognito_Unauth_Role/CognitoIdentityCredentials
is not authorized to perform: elastictranscoder:CreateJob on resource:
arn:aws:elastictranscoder:us-east-1:
xxxxxxxxxxxxx:pipeline/xxxxxxxxxxxxx-xxxxxx
AWS.config.region = ‘xx - xxx - x’; // Region
AWS.config.credentials = new AWS.CognitoIdentityCredentials({
IdentityPoolId: "xx-xxx-x:xxxxx-xxxxx-xxxx-xxxx-xxxxxxxxx”,
});
var elastictranscoder = new AWS.ElasticTranscoder();
var button = document.getElementById('button');
button.addEventListener('click', function() {
var params = {
PipelineId: ‘xxxxxxxxxxxxxx’,
/* required */
Input: {
Key: “xxxxxxxxxx / xxxxxxx.xxx”
},
OutputKeyPrefix: ‘xxxxxxx / ‘,
Outputs: [{
Key: ‘xxxx.xxx’,
PresetId: ‘xxxxxxxxxxxx’,
}, ],
};
elastictranscoder.createJob(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
});
Solution! turns out I didn't know that I needed to include the AWS access key and secret key in the options section of the elastic transcoder constructor.
var elastictranscoder = new AWS.ElasticTranscoder(options = {
accessKeyId: 'xxxxxxxxxxxxxx',
secretAccessKey: 'xxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxx'
}

Node.js script works once, then fails subsequently

I need a Node.js script that does the following:
1 - Triggers when an image is added to a specified S3 bucket. 2
- Creates a thumbnail of that image (360x203 pixels). 3 - Saves a copy of that thumbnail inside of a separate S3 folder. 4 -
Uploads the thumbnail to a specified FTP server, SIX (6) times using a
"FILENAME-X"naming convention.
The code works just as expected at first. The sample event pulls the image. Creates a thumnail. Saves it to the other S3 bucket. Then uploads it to the FTP server.
The problem: It works for the test file HappyFace.jpg once, but then each subsequent test fails. Also, I tried doing it with a different file, but was unsuccessful.
Also: If I could get some help writing a loop to name the different files that get uploaded, it would be very much appreciated. I usually code in PHP, so it'd probably take me longer than I hope to write.
Note: I removed my FTP credentials for privacy.
Problem Code Snippet:
function upload(contentType, data, next) {
// Upload test file to FTP server
c.append(data, 'testing.jpg', function(err) {
console.log("CONNECTION SUCCESS!");
if (err) throw err;
c.end();
});
// Connect to ftp
c.connect({
host: "",
port: 21, // defaults to 21
user: "", // defaults to "anonymous"
password: "" // defaults to "#anonymous"
});
// S3 Bucket Upload Function Goes Here
}
Full Code:
// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var util = require('util');
var Client = require('ftp');
var fs = require('fs');
var gm = require('gm')
.subClass({ imageMagick: true }); // Enable ImageMagick integration.
// get reference to FTP client
var c = new Client();
// get reference to S3 client
var s3 = new AWS.S3();
exports.handler = function(event, context) {
// Read options from the event.
console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
// Get source bucket
var srcBucket = event.Records[0].s3.bucket.name;
// Get source object key
// Object key may have spaces or unicode non-ASCII characters.
var srcKey =
decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
var url = 'http://' + srcBucket + ".s3.amazonaws.com/" + srcKey;
// Set destination bucket
var dstBucket = srcBucket + "-thumbs";
// Set destination object key
var dstKey = "resized-" + srcKey;
// Infer the image type.
var typeMatch = srcKey.match(/\.([^.]*)$/);
if (!typeMatch) {
console.error('unable to infer image type for key ' + srcKey);
return;
}
var imageType = typeMatch[1];
if (imageType != "jpg" && imageType != "png") {
console.log('skipping non-image ' + srcKey);
return;
}
// Download the image from S3, transform, and upload to a different S3 bucket.
async.waterfall([
function download(next) {
// Download the image from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: srcKey
},
next);
},
function transform(response, next) {
gm(response.Body).size(function(err, size) {
// Transform the image buffer in memory.
this.toBuffer(imageType, function(err, buffer) {
if (err) {
next(err);
} else {
next(null, response.ContentType, buffer);
}
});
});
},
function upload(contentType, data, next) {
// Upload test file to FTP server
c.append(data, 'testing.jpg', function(err) {
console.log("CONNECTION SUCCESS!");
if (err) throw err;
c.end();
});
// Connect to ftp
c.connect({
host: "",
port: 21, // defaults to 21
user: "", // defaults to "anonymous"
password: "" // defaults to "#anonymous"
});
// Stream the thumb image to a different S3 bucket.
s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: data,
ContentType: contentType
},
next);
}
], function (err) {
if (err) {
console.error(
'Unable to resize ' + srcBucket + '/' + srcKey +
' and upload to ' + dstBucket + '/' + dstKey +
' due to an error: ' + err
);
} else {
console.log(
'Successfully resized ' + srcBucket + '/' + srcKey +
' and uploaded to ' + dstBucket + '/' + dstKey
);
}
// context.done();
}
);
};
The logs:
START RequestId: edc808c1-712b-11e5-aa8a-ed7c188ee86c Version: $LATEST
2015-10-12T21:55:20.481Z edc808c1-712b-11e5-aa8a-ed7c188ee86c Reading options from event: { Records: [ { eventVersion: '2.0', eventTime: '1970-01-01T00:00:00.000Z', requestParameters: { sourceIPAddress: '127.0.0.1' }, s3: { configurationId: 'testConfigRule', object: { eTag: '0123456789abcdef0123456789abcdef', sequencer: '0A1B2C3D4E5F678901', key: 'HappyFace.jpg', size: 1024 }, bucket: { arn: 'arn:aws:s3:::images', name: 'images', ownerIdentity: { principalId: 'EXAMPLE' } }, s3SchemaVersion: '1.0' }, responseElements: { 'x-amz-id-2': 'EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH', 'x-amz-request-id': 'EXAMPLE123456789' }, awsRegion: 'us-east-1', eventName: 'ObjectCreated:Put', userIdentity: { principalId: 'EXAMPLE' }, eventSource: 'aws:s3' } ] }
2015-10-12T21:55:22.411Z edc808c1-712b-11e5-aa8a-ed7c188ee86c Successfully resized images/HappyFace.jpg and uploaded to images-thumbs/resized-HappyFace.jpg
2015-10-12T21:55:23.432Z edc808c1-712b-11e5-aa8a-ed7c188ee86c CONNECTION SUCCESS!
END RequestId: edc808c1-712b-11e5-aa8a-ed7c188ee86c
REPORT RequestId: edc808c1-712b-11e5-aa8a-ed7c188ee86c Duration: 3003.76 ms Billed Duration: 3000 ms Memory Size: 128 MB Max Memory Used: 43 MB
Task timed out after 3.00 seconds
START RequestId: d347e7e3-712d-11e5-bfdf-05baa36d50fd Version: $LATEST
2015-10-12T22:08:55.910Z d347e7e3-712d-11e5-bfdf-05baa36d50fd Reading options from event: { Records: [ { eventVersion: '2.0', eventTime: '1970-01-01T00:00:00.000Z', requestParameters: { sourceIPAddress: '127.0.0.1' }, s3: { configurationId: 'testConfigRule', object: { eTag: '0123456789abcdef0123456789abcdef', sequencer: '0A1B2C3D4E5F678901', key: 'HappyFace.jpg', size: 1024 }, bucket: { arn: 'arn:aws:s3:::images', name: 'images', ownerIdentity: { principalId: 'EXAMPLE' } }, s3SchemaVersion: '1.0' }, responseElements: { 'x-amz-id-2': 'EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH', 'x-amz-request-id': 'EXAMPLE123456789' }, awsRegion: 'us-east-1', eventName: 'ObjectCreated:Put', userIdentity: { principalId: 'EXAMPLE' }, eventSource: 'aws:s3' } ] }
END RequestId: d347e7e3-712d-11e5-bfdf-05baa36d50fd
REPORT RequestId: d347e7e3-712d-11e5-bfdf-05baa36d50fd Duration: 3003.33 ms Billed Duration: 3000 ms Memory Size: 128 MB Max Memory Used: 17 MB
Task timed out after 3.00 seconds
The line:
var c = new Client();
is only going to get executed once; all calls to your handler() function will use the same instance of your FTP client.
If there could be multiple overlapping calls to handler()—and in an async world it sure seems likely—then the calls to the FTP client, including c.connect(…) and c.end() will be invoked multiple times against the same FTP client, which may already have an upload in progress, leading to a scenario like this:
Call to handler(). Upload begins.
Call to handler(). Second upload begins.
First upload completes and calls c.end().
Second upload is canceled.
The solution is to create a new FTP client instance for each upload or, if your FTP server has a problem with that (limits the number of client connections), you’ll need to serialize your uploads somehow. One way to do that, since you’re using the async library, would be to use async.queue.

How do I delete an object on AWS S3 using JavaScript?

I want to delete a file from Amazon S3 using JavaScript. I have already uploaded the file using JavaScript. How can I delete it?
You can use the JS method from S3:
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./credentials-ehl.json');
var s3 = new AWS.S3();
var params = { Bucket: 'your bucket', Key: 'your object' };
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // error
else console.log(); // deleted
});
Be aware that S3 never returns the object if it has been deleted.
You have to check it before or after with getobject, headobject, waitfor, etc
You can use construction like this:
var params = {
Bucket: 'yourBucketName',
Key: 'fileName'
/*
where value for 'Key' equals 'pathName1/pathName2/.../pathNameN/fileName.ext'
- full path name to your file without '/' at the beginning
*/
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
And don't forget to wrap it to the Promise.
Before deleting the file you have to check the 1) file whether it is in the bucket because If the file is not available in the bucket and using deleteObject API this doesn't throw any error 2)CORS Configuration of the bucket. By using headObject API gives the file status in the bucket.
AWS.config.update({
accessKeyId: "*****",
secretAccessKey: "****",
region: region,
version: "****"
});
const s3 = new AWS.S3();
const params = {
Bucket: s3BucketName,
Key: "filename" //if any sub folder-> path/of/the/folder.ext
}
try {
await s3.headObject(params).promise()
console.log("File Found in S3")
try {
await s3.deleteObject(params).promise()
console.log("file deleted Successfully")
}
catch (err) {
console.log("ERROR in file Deleting : " + JSON.stringify(err))
}
} catch (err) {
console.log("File not Found ERROR : " + err.code)
}
As params are constant, the best way to use it with const. If the file is not found in the s3 it throws the error NotFound : null.
If you want to apply any operations in the bucket, you have to change the permissions of CORS Configuration in the respective bucket in the AWS. For changing permissions Bucket->permission->CORS Configuration and Add this code.
<CORSConfiguration>
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>PUT</AllowedMethod>
<AllowedMethod>POST</AllowedMethod>
<AllowedMethod>DELETE</AllowedMethod>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>HEAD</AllowedMethod>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
for more information about CROS Configuration : https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
You can use deleteObjects API to delete multiple objects at once instead of calling API for each key to delete. Helps save time and network bandwidth.
You can do following-
var deleteParam = {
Bucket: 'bucket-name',
Delete: {
Objects: [
{Key: 'a.txt'},
{Key: 'b.txt'},
{Key: 'c.txt'}
]
}
};
s3.deleteObjects(deleteParam, function(err, data) {
if (err) console.log(err, err.stack);
else console.log('delete', data);
});
For reference see - https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#deleteObjects-property
You can follow this GitHub gist link https://gist.github.com/jeonghwan-kim/9597478.
delete-aws-s3.js:
var aws = require('aws-sdk');
var BUCKET = 'node-sdk-sample-7271';
aws.config.loadFromPath(require('path').join(__dirname, './aws-config.json'));
var s3 = new aws.S3();
var params = {
Bucket: 'node-sdk-sample-7271',
Delete: { // required
Objects: [ // required
{
Key: 'foo.jpg' // required
},
{
Key: 'sample-image--10.jpg'
}
],
},
};
s3.deleteObjects(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
Very straight forward
At first, create an instance of s3 and configure it with credentials
const S3 = require('aws-sdk').S3;
const s3 = new S3({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
});
Afterward, follow the docs
var params = {
Bucket: "ExampleBucket",
Key: "HappyFace.jpg"
};
s3.deleteObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
}
*/
});

Categories