I'm trying to upload files to my Amazon S3 Bucket. S3 and amazon is set up.
This is the error message from Amazon:
Conflicting query string parameters: acl, policy
Policy and signature is encoded, with Crypto.js for Node.js
var crypto=Npm.require("crypto");
I'm trying to build POST request with Meteor HTTP.post method. This could be wrong as well.
var BucketName="mybucket";
var AWSAccessKeyId="MY_ACCES_KEY";
var AWSSecretKey="MY_SECRET_KEY";
//create policy
var POLICY_JSON={
"expiration": "2009-01-01T00:00:00Z",
"conditions": [
{"bucket": BucketName},
["starts-with", "$key", "uploads/"],
{"acl": 'public-read'},
["starts-with", "$Content-Type", ""],
["content-length-range", 0, 1048576],
]
}
var policyBase64=encodePolicy(POLICY_JSON);
//create signature
var SIGNATURE = encodeSignature(policyBase64,AWSSecretKey);
console.log('signature: ', SIGNATURE);
This is the POST request I'm using with Meteor:
//Send data----------
var options={
"params":{
"key":file.name,
'AWSAccessKeyId':AWSAccessKeyId,
'acl':'public-read',
'policy':policyBase64,
'signature':SIGNATURE,
'Content-Type':file.type,
'file':file,
"enctype":"multipart/form-data",
}
}
HTTP.call('POST','https://'+BucketName+'.s3.amazonaws.com/',options,function(error,result){
if(error){
console.log("and HTTP ERROR:",error);
}else{
console.log("result:",result);
}
});
and her I'm encoding the policy and the signature:
encodePolicy=function(jsonPolicy){
// stringify the policy, store it in a NodeJS Buffer object
var buffer=new Buffer(JSON.stringify(jsonPolicy));
// convert it to base64
var policy=buffer.toString("base64");
// replace "/" and "+" so that it is URL-safe.
return policy.replace(/\//g,"_").replace(/\+/g,"-");
}
encodeSignature=function(policy,secret){
var hmac=crypto.createHmac("sha256",secret);
hmac.update(policy);
return hmac.digest("hex");
}
A can't figure out whats going on. There might already be a problem at the POST method, or the encryption, because I don't know these methods too well. If someone could point me to the right direction, to encode, or send POST request to AmazonS3 properly, it could help a lot.
(I don't like to use filepicker.io, because I don't want to force the client to sign up there as well.)
Thanks in advance!!!
Direct uploads to S3 you can use the slingshot package:
meteor add edgee:slingshot
On the server side declare your directive:
Slingshot.createDirective("myFileUploads", Slingshot.S3Storage, {
bucket: "mybucket",
allowedFileTypes: ["image/png", "image/jpeg", "image/gif"],
acl: "public-read",
authorize: function () {
//You can add user restrictions here
return true;
},
key: function (file) {
return file.name;
}
});
This directive will generate policy and signature automatically.
And them just upload it like this:
var uploader = new Slingshot.Upload("myFileUploads");
uploader.send(document.getElementById('input').files[0], function (error, url) {
Meteor.users.update(Meteor.userId(), {$push: {"profile.files": url}});
});
Why don't you use the aws-sdk package? It packs all the needed methods for you. For example, here's the simple function for adding a file to bucket:
s3.putObject({
Bucket: ...,
ACL: ...,
Key: ...,
Metadata: ...,
ContentType: ...,
Body: ...,
}, function(err, data) {
...
});
check out the S3 meteor package. The readme has a very comprehensive walkthrough of how to get started
First thing is to add the package for s3 file upload.
For Installation: ADD (AWS SDK Smart Package)
$ meteor add peerlibrary: aws-sdk
1.Create Directive upload.js and paste this code.
angular.module('techno')
.directive("fileupload", [function () {
return {
scope: {
fileupload: "="
},
link: function(scope,element, attributes){
$('.button-collapse').sideNav();
element.bind("change", function (event) {
scope.$apply(function () {
scope.fileupload = event.target.files[0];
});
})
}};
}]);
2.Get Access key and paste it in your fileUpload.js file.
AWS.config.update({
accessKeyId: ' AKIAJ2TLJBEUO6IJLKMN ',
secretAccessKey: lqGE9o4WkovRi0hCFPToG0B6w9Okg/hUfpVr6K6g'
});
AWS.config.region = 'us-east-1';
let bucket = new AWS.S3();
3.Now put this upload code in your directive fileUpload.js
vm.upload = (Obj) =>{
vm.loadingButton = true;
let name = Obj.name;
let params = {
Bucket: 'technodheeraj',
Key: name,
ContentType: 'application/pdf',
Body: Obj,
ServerSideEncryption: 'AES256'
};
bucket.putObject(params, (err, data) => {
if (err) {
console.log('---err------->', err);
}
else {
vm.fileObject = {
userId: Meteor.userId(),
eventId: id,
fileName: name,
fileSize: fileObj.size,
};
vm.call("saveFile", vm.fileObject, (error, result) => {
if (!error){
console.log('File saved successfully');
}
})
}
})
};
4.Now in “saveFile” method paste this code
saveFile: function(file){
if(file){
return Files.insert(file);
}
};
5.In HTML paste this code
<input type="file" name="file" fileupload="file">
<button type="button" class="btn btn-info " ng-click="vm.upload(file)"> Upload File</button>
Related
I have a project where it uses Filepond to upload files and I need it to load file from server.
I already follow the docs but It doesn't work. The Filepond gives error Error during load 400 and it even doesn't send the request to load the file from server
This is my javascript
let pond = FilePond.create(value, {
files: [
{
// the server file reference
source: 'e958818e-92de-4953-960a-d8157467b766',
// set type to local to indicate an already uploaded file
options: {
type: 'local'
}
}
]
});
FilePond.setOptions({
labelFileProcessingError: (error) => {
return error.body;
},
server: {
headers: {
'#tokenSet.HeaderName' : '#tokenSet.RequestToken'
},
url: window.location.origin,
process: (fieldName, file, metadata, load, error, progress, abort) => {
// We ignore the metadata property and only send the file
fieldName = "File";
const formData = new FormData();
formData.append(fieldName, file, file.name);
const request = new XMLHttpRequest();
request.open('POST', '/UploadFileTemp/Process');
request.setRequestHeader('#tokenSet.HeaderName', '#tokenSet.RequestToken');
request.upload.onprogress = (e) => {
progress(e.lengthComputable, e.loaded, e.total);
};
request.onload = function () {
if (request.status >= 200 && request.status < 300) {
load(request.responseText);
}
else {
let errorMessageFromServer = request.responseText;
error('oh no');
}
};
request.send(formData);
},
revert: "/UploadFileTemp/revert/",
load: "/UploadFileTemp/load"
}
})
This is my controller
public async Task<IActionResult> Load(string p_fileId)
{
//Code to get the files
//Return the file
Response.Headers.Add("Content-Disposition", cd.ToString());
Response.Headers.Add("X-Content-Type-Options", "nosniff");
return PhysicalFile(filePath, "text/plain");
}
NB
I already test my controller via postman and it works. I also check the content-disposition header
I'd advise to first set all the options and then set the files property.
You're setting the files, and then you're telling FilePond where to find them, it's probably already trying to load them but doesn't have an endpoint (yet).
Restructuring the code to look like this should do the trick.
let pond = FilePond.create(value, {
server: {
headers: {
'#tokenSet.HeaderName': '#tokenSet.RequestToken',
},
url: window.location.origin,
process: (fieldName, file, metadata, load, error, progress, abort) => {
// your processing method
},
revert: '/UploadFileTemp/revert',
load: '/UploadFileTemp/load',
},
files: [
{
// the server file reference
source: 'e958818e-92de-4953-960a-d8157467b766',
// set type to local to indicate an already uploaded file
options: {
type: 'local',
},
},
],
});
I'm having a problem with the google drive API.
I'm trying to upload an excel file with this API, but it's not working. Even copying the google API documentation doesn't work.
Here is a sample of my code:
#Get('teste')
async teste(){
const keys = require(path.resolve('src', 'files', 'api', 'keys'))
const client = new google.auth.JWT(
keys.client_email,
null,
keys.private_key,
['https://www.googleapis.com/auth/drive.metadata.readonly']
)
client.authorize((err, tokens) =>{
if(err){
console.log(err)
return;
} else{
this.gdrun(client)
}
})
}
gdrun(client){
const drive = google.drive({version: 'v3', auth: client});
var fileMetadata = {
name: 'My Report',
mimeType: 'application/vnd.google-apps.spreadsheet'
};
var media = {
mimeType: 'application/vnd.ms-excel',
body: require(path.resolve('src', 'files', 'excel', 'solargroup.xlsx'))
};
drive.files.create({
resource: fileMetadata,
media: media,
fields: 'id'
}, function (err, file: any) {
if (err) {
// Handle error
console.error(err);
} else {
console.log('File Id:', file.id);
}
});
}
I received this error:
I believe your goal as follows.
You want to upload a file (XLSX file) to Google Drive of the service account.
You want to achieve this using the service account with googleapis for Node.js.
From your script, I thought that you might wanted to upload a XLSX file as Google Spreadsheet by converting.
Modification points:
When you want to upload a file to Google Drive, in this case, please use the scope of https://www.googleapis.com/auth/drive instead of https://www.googleapis.com/auth/drive.metadata.readonly.
When you want to upload XLSX file as the XLSX file, the mimeType is application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.
When you want to upload a file, body: require(path.resolve('src', 'files', 'excel', 'solargroup.xlsx')) cannot be used. In this case, please use body: fs.createReadStream(path.resolve('src', 'files', 'excel', 'solargroup.xlsx')). I thought that your error message might be due to this.
When you want to retrieve the file ID of the uploaded file, please modify file.id to file.data.id.
When above points are reflected to your script, it becomes as follows.
Modified script:
From:
const client = new google.auth.JWT(
keys.client_email,
null,
keys.private_key,
['https://www.googleapis.com/auth/drive.metadata.readonly']
)
To:
const client = new google.auth.JWT(
keys.client_email,
null,
keys.private_key,
['https://www.googleapis.com/auth/drive'] // Modified
)
And also, please modify your gdrun() as follows.
gdrun(client){
const drive = google.drive({ version: "v3", auth: client });
var fileMetadata = {
name: "My Report",
mimeType: "application/vnd.google-apps.spreadsheet",
};
var media = {
mimeType: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", // Modified
body: fs.createReadStream(path.resolve('src', 'files', 'excel', 'solargroup.xlsx')), // Modified
};
drive.files.create(
{
resource: fileMetadata,
media: media,
fields: "id",
},
function (err, file) {
if (err) {
console.error(err);
} else {
console.log("File Id:", file.data.id); // Modified
}
}
);
}
In this case, please use const fs = require("fs").
Result:
When above script is run, the following result is obtained.
File Id: ###fileId###
Note:
Your script uploads a XLSX file to Google Drive of service account as Google Spreadsheet. In this case, you cannot directly seen the uploaded file at your Google Drive. Because the Google Drive of the service account is different from your Google Drive. When you want to see the uploaded file at your Google Drive, please create a folder on your Google Drive and share the created folder with the email of the service account. And then, please upload the file to the shared folder. By this, you can see the uploaded file on your Google Drive with your browser. For this, please modify fileMetadata as follows.
var fileMetadata = {
name: "My Report",
mimeType: "application/vnd.google-apps.spreadsheet",
parents: ["### folderId ###"], // Please set the folder ID of the folder shared with the service account.
};
In above script, the maximum file size is 5 MB. Please be careful this. When you want to upload a file more than 5 MB, please use resumable upload. Ref
References:
Upload file data
Files: create
I'm trying to generate a presigned post to give the browser privileges to upload / delete a specific file from a bucket, but it seems the createPresignedPost is not generating some of the required fields. getSignedUrl works.
const signedUrl = await new Promise<PresignedPost>( (resolve, reject) => {
this.s3.createPresignedPost({
Bucket: this.env.config.s3.buckets.images,
Fields: { key },
Conditions: [
["content-length-range", 0, 10 * 1024 * 1024]
],
Expires: 3600,
}, (err, preSigned) => { if (err) { reject(err) } else { resolve(preSigned) }});
});
// This works, but doesn't allow the object to be deleted, and does not allow setting a maximum file size
//
// const rawUrl = new URL(await this.s3.getSignedUrlPromise('putObject', {
// Bucket: this.env.config.s3.buckets.images,
// Key: key,
// Expires: 3600,
// }));
//
// const signedUrl = {
// url: rawUrl.origin + rawUrl.pathname,
// fields: Object.fromEntries(Array.from(rawUrl.searchParams.entries()))
// };
The createPresignedPost generates:
{
url: 'https://s3.eu-west-3.amazonaws.com/xxx-images',
fields: {
key: 'incoming/ae83pfxu7kf4dfdv4hbvorsxq31hadtjcp97ehwt30ds5',
bucket: 'xxx-images',
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': 'xxx',
'X-Amz-Date': '20200509T145014Z',
Policy:
'eyJleHBpcmF0aW9uIjoiMjAyMC0wNS0wOVQxNTo1MDoxNFoiLCJjb25kaXRpb25zIjpbWyJjb250ZW50LWxlbmd0aC1yYW5nZSIsMCwxMDQ4NTc2MF0seyJrZXkiOiJpbmNvbWluZy9hZTgzcGZ4dTdrZjRkZmR2NGhidm9yc3hxMzFoYWR0amNwOTdlaHd0MzBkczUifSx7ImJ1Y2tldCI6InByZWZsaWdodGVtYWlsLWltYWdlcyJ9LHsiWC1BbXotQWxnb3JpdGhtIjoiQVdTNC1ITUFDLVNIQTI1NiJ9LHsiWC1BbXotQ3JlZGVudGlhbCI6IkFLSUE1RE5UN0lOWjJKTU5TQVhILzIwMjAwNTA5L2V1LXdlc3QtMy9zMy9hd3M0X3JlcXVlc3QifSx7IlgtQW16LURhdGUiOiIyMDIwMDUwOVQxNDUwMTRaIn1dfQ==',
'X-Amz-Signature': 'xxx' } }
Trying to PUT a file with those parameters gives:
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AuthorizationQueryParametersError</Code>
<Message>Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.</Message>
<RequestId>xxx</RequestId>
<HostId>xxx</HostId>
</Error>
The older API call generates the missing 'X-Amz-SignedHeaders', and 'X-Amz-Expires' parameters too.
Can anyone help me in what am I doing wrong?
You should use POST instead of PUT since you are using createPresignedPost to generate the URL.
I have an application using Node and the AWS-SDK package. I am copying objects from one bucket to another using the copyObject method. I'm getting an error that says SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. Check your key and signing method.
I've been able to successfully run the code on my local machine and it copies the files from one bucket to another. The error occurs on our AWS server, which I deployed the application to. The full error is:
{ [SignatureDoesNotMatch: The request signature we calculated does not
match the signature you provided. Check your key and signing method.]
message: 'The request signature we calculated does not match the signature you provided. Check your key and signing method.',
code: 'SignatureDoesNotMatch',
region: null,
time: Mon Jul 11 2016 12:11:36 GMT-0400 (EDT),
requestId: <requestId>,
extendedRequestId: <extendedRequestId>,
cfId: undefined,
statusCode: 403,
retryable: false,
retryDelay: 66.48076744750142 }
Also, I'm able to perform the listObjects command. The error is only happening on copyObject.
So far, I've tried
setting correctClockSkew to true
checked the servers time (same as local computer)
checked the key/secret (loading from a config file and is working locally)
checked the file names (there are no strange characters. Alphanumeric, '.', '-' and '/')
Here is the code causing the problem:
AWS.config.update({
accessKeyId: <accessKeyId>,
secretAccessKey: <secretAccessKey>,
correctClockSkew: true
});
var s3 = new AWS.S3();
var params = {
Bucket: <bucket>,
Prefix: <prefix>
};
s3.listObjects(params, function(err, data) {
if (data.Contents.length) {
async.each(data.Contents, function(file, cb) {
var file_name = file.Key.substr(file.Key.indexOf('/')+1);
var copy_params = {
Bucket: <bucket2>,
CopySource: <bucket> + '/' + file.Key,
Key: file_name,
ACL: 'public-read'
};
s3.copyObject(copy_params, function(copyErr, copyData){
if (copyErr) {
console.log('Error:', copyErr);
}
else {
cb();
}
});
}, function(err){
...
}
});
} else {
...
}
});
Not sure if you've found a solution to this or not, but this was an issue raised on github and the solution seems to simply URL encode your CopySource parameter with encodeURI():
https://github.com/aws/aws-sdk-js/issues/1949
I can retrieve basic information using "listObjects" but would like to get each objects metadata without doing another request. At the moment I'm using:
var bucket = new AWS.S3({
params: {
Bucket: 'Bucketname',
Prefix: req.body.params.objectId + '/',
Delimiter: '/'
}
});
bucket.listObjects(function(err, data) {
if (err) {
console.log('Could not load objects from S3');
} else {
res.json(data);
}
});
This returns
CommonPrefixes: Array[0]
Contents: Array[1]
Delimiter: "/"
IsTruncated: false
Marker: ""
MaxKeys: 1000
Name: "encore-storage"
Prefix: "Music/Demos/song.mp3"
But I would love to get the meta-data such as the "ContentType" without making another call for each object.