AWS S3 multipart / uploadPart silent fail from browser JS SDK - javascript

I'm trying a multipart s3 upload from the browser with the JS SDK. I have no trouble with createMultipartUpload, but I get no data back from uploadPart. I can't call completeMultipartUpload because I don't get any eTags back. I get the $response part of the object only, which indicates a 200 status and that all the parameters I passed were defined and the proper datatypes. I can't see any of the parts in my bucket, although I don't know if they're going to a special "parts" place that I can't access.
Here's my code:
const createParams = {
Bucket,
Key: `${uuid()}.${getExtension(file.type)}`,
ContentType: file.type,
ContentDisposition: 'attachment'
}
return s3.createMultipartUpload(createParams).promise()
.then(result => {
console.log(result);
console.log('chunking...')
let chunkArr = chunker(file);
let chunkMap = Promise.map(chunkArr, (chunk, index) => {
const chunkParams = {
Body: chunk,
Bucket: result.Bucket,
Key: result.Key,
PartNumber: index + 1,
UploadId: result.UploadId,
ContentLength: chunk.size
}
console.log(chunkParams)
return s3.uploadPart(chunkParams).promise();
});
return Promise.all(chunkMap);
})
.then(result => {
console.log(result);
return Promise.resolve(true)
// let stopParams = {
//
// }
// return s3.completeMultipartUpload(stopParams).promise();
})
.catch(err => {
throw err;
return Promise.reject(err);
});
s3 instance looks like this:
import AWS from 'aws-sdk';
AWS.config.setPromisesDependency(Promise);
const s3 = new AWS.S3({
apiVersion: '2006-03-01',
accessKeyId: credentials.credentials.AccessKeyId,
secretAccessKey: credentials.credentials.SecretAccessKey,
sessionToken: credentials.credentials.SessionToken,
sslEnabled: true,
s3ForcePathStyle: true,
httpOptions: {
xhrAsync: true,
xhrWithCredentials: true
}
})
chunker function looks like this:
const chunkFile = (file) => {
console.log(typeof(file));
const fileSize = file.size;
const chunkSize = 5242881; // bytes
let offset = 0;
let chunkArr = [];
const chunkReaderBlock = (_offset, _file) => {
console.log(_offset);
if (_offset >= fileSize) {
console.log("Done reading file");
return chunkArr;
}
let blob = _file.slice(_offset, chunkSize + _offset);
console.log(blob);
console.log(typeof(blob));
chunkArr.push(blob);
return chunkReaderBlock(chunkSize + _offset, _file);
}
return chunkReaderBlock(offset, file);
}
The response object I'm getting back looks like this:
(2)[{…}, {…}]
0: {
$response: Response
}
1: $response: Response
cfId: undefined
data: {
$response: Response
}
error: null
extendedRequestId: undefined
httpResponse: HttpResponse
body: Uint8Array[]
headers: {}
statusCode: 200
statusMessage: "OK"
stream: EventEmitter {
_events: {…},
_maxListeners: undefined,
statusCode: 200,
headers: {…}
}
streaming: false
_abortCallback: ƒ callNextListener(err) __proto__: Object
maxRedirects: 10
maxRetries: 3
redirectCount: 0
request: Request {
domain: undefined,
service: f… s.constructor,
operation: "uploadPart",
params: {…},
httpRequest: HttpRequest,
…
}
retryCount: 0
__proto__: Object
__proto__: Object
length: 2
__proto__: Array(0)
Any ideas? This is in React and my test file is 9.xx MB. I also tried with callbacks, and uploading one part at a time, and got the same thing.

In a cross-origin context, you'd need this in your bucket's CORS configuration:
<ExposeHeader>ETag</ExposeHeader>
ExposeHeader — Identifies the response headers ... that customers will be able to access from their applications (for example, from a JavaScript XMLHttpRequest object)."
https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
To clarify what's going in here, CORS isn't an access restriction mechanism -- it's a mechanism for giving the browser permission to do something that it otherwise assumes might not be something the user would want to happen. It tells the browser to give JavaScript permission to do and see things that would not otherwise be allowed.
From the Mozilla CORS documentation:
By default, only the 6 simple response headers are exposed:
Cache-Control Content-Language Content-Type Expires Last-Modified Pragma
If you want clients to be able to access other headers, you have to list them using the Access-Control-Expose-Headers header.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
In S3, the way you set the Access-Control-Expose-Headers response header is by configuring <ExposeHeaders> (above). Otherwise, JavaScript can't see them.
I can't see any of the parts in my bucket, although I don't know if they're going to a special "parts" place that I can't access.
They are. Use the listMultipartUploads to find abandoned uploads, and abortMultipartUploads to delete partial uploads and free the allocated storage space for the parts you uploaded. Otherwise, uploads you don't complete will linger indefinitely and you'll be billed for storage of the parts. Also, you can create a bucket lifecycle policy to dispose of them automatically after so many days -- almost always a good idea.

Related

API Gateway doesn't receive headers from AWS Lambda

I am using AWS Lambda service to create a serverless function (triggered by API Gateway) that has to do the following:
Get the data from the DynamoDB table
Create a .docx based on that data
Return a .docx document (so that it automatically downloads when the function is triggered).
I managed to successfully accomplish first 2 tasks but no matter what I do it returns a base64 string instead of a document. When I check the Network tab, I always get content-type: application/json in the Response, despite the fact that I specify the headers in the return of my Lambda function. Is there something I need to configure in my API Gateway to make it work? Or is there an issue with my code?
Updates: now the headers are coming and the document download is triggered successfully. But when I try to open it, I get an error: Word found unreadable content. I opened the file in the text editor and its content is the base64 string instead of what I am passing to it. What can be causing this issue?
const AWS = require('aws-sdk');
const { encode } = require("js-base64");
const { Document, Packer, Paragraph, TextRun } = require("docx");
const dynamoDb = new AWS.DynamoDB.DocumentClient();
exports.handler = async (event) => {
const params = {
TableName: 'table-name',
Key: {
'id': 'item-key',
},
};
const dynamoDbResult = await dynamoDb.get(params).promise();
const data = dynamoDbResult.Item;
const doc = new Document({
sections: [
{
properties: {},
children: [
new Paragraph({
children: [
new TextRun(data.projectName),
new TextRun({
text: data.clientEmail1,
bold: true
}),
]
})
]
}
]
});
const buffer = await Packer.toBuffer(doc);
return {
statusCode: 200,
headers: {
'Content-Type': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'Content-Disposition': 'attachment; filename="your_file_name.docx"'
},
body: encode(buffer),
isBase64Encoded: true
};
}

Cannot get the contact attributes of Amazon Connect by aws-sdk Javascript

I am using call recording on Amazon Connect.
I am trying to get the contact attribute of Amazon Connect by using the metadata value of the .wav file on S3 where the conversation was recorded.
This is my Lambda function.
Object.defineProperty(exports, "__esModule", { value: true });
const AWS = require("aws-sdk");
const connect = new AWS.Connect();
const s3 = new AWS.S3();
exports.handler = async (event, context) => {
await Promise.all(event.Records.map(async (record) => {
const bucket = event.Records[0].s3.bucket.name;
const key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key,
};
const metadata = await s3.headObject(params).promise();
console.log(metadata);
const contactid = metadata.Metadata['contact-id'];
const instanceid = metadata.Metadata['organization-id'];
var params = {
InitialContactId: contactid,
InstanceId: instanceid,
};
console.log(params);
const connectdata = await connect.getContactAttributes(params).promise();
console.log(connectdata);
}));
};
This is the JSON value of the .wav file (I hide my personal information).
{
AcceptRanges: 'bytes',
LastModified: 2021-09-01TXX:XX:XX.000Z,
ContentLength: 809644,
ETag: '"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"',
ContentType: 'audio/wav',
ServerSideEncryption: 'aws:kms',
Metadata: {
'contact-id': 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX',
'aws-account-id': 'XXXXXXXXXXXX',
'organization-id': 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
},
SSEKMSKeyId: 'arn:aws:kms:ap-northeast-1:XXXXXXXXXXXX:key/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
}
However, when I used Connect's getContactAttributes method using aws-sdk, there was no value in the obtained parameters.
Even though the parameter values are certainly included.
console.log(params)
{
InitialContactId: 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX',
InstanceId: 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
}
console.log(connectdata)
{ Attributes: {} }
I want to know what {Attributes: {}} stands for.
Is there something wrong with the argument of the getContactAttributes method, or the output method of console.log?
In the first place, can't I get the contact attribute from the metadata of the .wav file?
There may be many mistakes because it is a beginner, but I would like advice for this.
Thanks.
This problem has been self-solved.
The connect.getContactAttributes method seems to get only the value of Attributes in the contact flow. I misunderstood that it was to get the JSON itself sent from the contact flow.
I found that the value of Attributes is set by posting a key-value pair in the "Set contact attributes" block of the Amazon Connect contact flow.

Sometimes data is not getting written in the AWS S3 bucket by Lambda

I'm facing a freaking issue with my Lambda code written using TypeScript that creates S3 bucket and write an array of JSON data into it. This Lambda gets triggered based on messages arrived in the SQS queue. Sometimes, there can be many message all of a sudden.
I suspect that when there is just 1 message, then my Lambda works fine by first creating a S3 bucket and write array of JSON into it however when the messages grow say 10 messages at a time then Lambda just creates a bucket only and could not write the contents in it, as a result I just get an empty JSON in it like {}.
Not sure if it is due to no. of messages or not. Because all messages has to do same task that is creating same bucket (if not exists already) and write similar contents into or it has something to do CacheControl property describe below.
Below is my code snippet :-
exports.createBucketAndUploadToS3 = async (s3Client, bucket, prefix, contents) => {
const params = {
Bucket: bucket,
Key: `${prefix}/data.json`,
Body: JSON.stringify(contents),
ContentType: 'application/json; charset=utf-8',
CacheControl: 'max-age=60'
};
await s3Client.createBucket({ Bucket: bucket }).promise();
return await s3Client.putObject(params).promise();
};
I would propose to check if the bucket exists before calling the createBucket command, this often causes an exception when you try to create an existing object again. You can do this using the following code:
const checkBucketExists = async bucket => {
const s3 = new AWS.S3();
const options = {
Bucket: bucket,
};
try {
await s3.headBucket(options).promise();
return true;
} catch (error) {
if (error.statusCode === 404) {
return false;
}
throw error;
}
};
// in your code
let isBucketExisting = await checkBucketExists(bucket);
if (isBucketExisting) {
await s3Client.createBucket({ Bucket: bucket }).promise();
}
return await s3Client.putObject(params).promise();

aww4 credentials not accepted when sent via pactjs

I am attempting to verify my pact.json that has been generated by my consumer. However for verifying I need to include AWS4 credentials in order to be able to get a response from my provider. I am attempting to do this using customProviderHeaders. I am using the library AWS4(https://github.com/mhart/aws4) to generate the token. Below is my code:
const aws4 = require('aws4');
const path = require('path');
import { before, beforeEach, describe, it } from 'mocha';
const {
Verifier
} = require('../../../node_modules/#pact-foundation/pact');
function getToken() {
const opts: any = {
method: 'GET',
region: 'us-east-2',
service: 'execute-api',
path: '/qa/api/',
host: '123456789.execute-api.us-east-2.amazonaws.com',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
}
};
aws4.sign(opts, {accessKeyId: '$AWSACCESSKEY', secretAccessKey: '$AWSSECRETKEY'});
return opts.headers;
}
describe('Pact Verification', () => {
it('should validate the watchlist expectations', () => {
let headers = getToken();
let authToken = headers.Authorization;
let date = headers[`X-Amz-Date`];
let opts = {
provider: 'DealerBlock',
providerBaseUrl: 'https://3ua1cprd53.execute-api.us-east-2.amazonaws.com',
pactUrls: [path.resolve(process.cwd(), 'src/test/pact/path_to_my_json')],
customProviderHeaders: [`Authorization: ${authToken}`, `X-Amz-Date: ${date}`]
};
return new Verifier().verifyProvider(opts)
.then(output => {
console.log('STARTED');
console.log(opts.pactUrls);
console.log('Pact Verification Complete');
console.log(output);
});
});
});
The function getToken() generates a new token and I then grab the token and date and insert them into my request using the customer provider headers.
I see the following:
INFO: Replacing header 'Authorization: ' with 'Authorization: AWS4-HMAC-SHA256 Credential=AKIAJ5FTCODVMSUTEST/2018908/us-east-2/execute-api/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=ceea9aac0303769da58357cb37cb849cb0bbfc13ff0a25cea977385368531349'
INFO: Replacing header 'X-Amz-Date: ' with 'X-Amz-Date: 20180528T184202Z'
However I get the following error:
Actual: {"message":"The request signature we calculated does not match the signature you provided.
Check your AWS Secret Access Key and signing method. Consult the service documentation for details."}
Am I using the customProviderHeaders in the correct manner? Or does anyone have any suggestions as to what I should do differently? I am able to send a request using the same credentials via Postman so not sure whats going on here.
Thanks!
It looks OK to my eyes.
Could it be that you're not interpolating the variables (that also appear not to be defined anywhere) in the following statement:
aws4.sign(opts, {accessKeyId: '$AWSACCESSKEY', secretAccessKey: '$AWSSECRETKEY'});
Was able to get this working when I passed in headers of: 'Content-Type': 'application/x-www-form-urlencoded' via customProviderHeaders.
Even though this header was listed in my consumer generated json contract, the pact provider did not seem to see it.

Javascript - How to know the type of the response in Fetch API?

How to know the type of the response in Fetch API?
In XMLHttpRequest, there's the responseType property which indicates the type of the returned response's body (json, text, blob, etc.). While in the Fetch API response, even though there are useful methods to parse its body (json(), text(), blob(), etc.), I still didn't find any property like the XMLHttpRequest's responseType property, to indicate what's the type of the response.
I think you can check the headers of the response for the content-type as below:
response.headers.get("content-type")
I think orangespark is right. The content-type response header should be used.
If content-type header is missing or invalid. It is OK to top futher processing the response:
When extract a MIME type returns failure or a MIME type whose essence is incorrect for a given format, treat this as a fatal error. Existing web platform features have not always followed this pattern, which has been a major source of security vulnerabilities in those features over the years. In contrast, a MIME type’s parameters can typically be safely ignored.
https://fetch.spec.whatwg.org/#content-type-header
Since there are many valid media types, some libs lazy probe the Content-Type header: response.headers.get("content-type").includes('json') for json existence, before response.json() is called.
There are 1500+ Media types registered with the IANA which can be set as the Content-Type for a request.
If content-type is not set (or forgotten to set). A default of text/plain might be set by the server, which would 'break' your response handling.
If content-type is not one of the accepted media types, or body does not match the content-type ...
Request:
accept: text/html, image/avif;q=0.9, image/apng;q=0.8
Response (bad):
content-type: application/json
... you could make your app more failsafe and clone() the response before reading the body. So you can still return JSON e.g. an error with the text response that could not be parsed as JSON.
const response2 = response.clone();
let data;
try {
data = await response.json(); // SyntaxError: Unexpected token in JSON
} catch (e) {
text = await response2.text(); // response clone can still read as a fallback
data = {
error: e.message,
invalidJson: text
};
}
You could use response.blob() as an alternative for response.text(), response.json(),...
The returned Blob has a property blob.type that holds the content-type header value.
Here are some samples, how to handle those blobs:
Blob SVG: image/svg+xml content
Blob HTML: text/html content
Blob JSON application/json content
Blob JSON application/octet-stream content
Blob Bad ???/??? content
(async() => {
const c = document.body;
imageBlob = () => {
const svg = `<svg viewBox="0 0 200 200" width="80" height="80" xmlns="http://www.w3.org/2000/svg"><path fill="#FF0066" d="M31.5,-16.8C35.9,3.2,31,19.6,16.3,32.8C1.5,46,-23.2,55.9,-36.6,46.9C-50.1,38,-52.3,10.1,-44.3,-14.8C-36.4,-39.8,-18.2,-61.9,-2.3,-61.2C13.6,-60.4,27.2,-36.8,31.5,-16.8Z" transform="translate(100 100)" /></svg>`;
return new Blob(
[svg], {
type: 'image/svg+xml'
}
);
}
htmlBlob = () => {
return new Blob(
['<span>HTML <b style="color: red">blob</b></span>'], {
type: 'text/html'
}
);
}
jsonBlob = type => {
const json = {
a: 1,
b: {
c: 'val'
}
}
const jsonStr = JSON.stringify(json);
return new Blob([jsonStr], {
type
});
}
// Blob instances you might get from: await response.blob()
// blob.type is set from 'Content-Type' header of its response
const blobs = [
imageBlob(), // 1
htmlBlob(), // 2
jsonBlob('application/json'), // 3
jsonBlob('application/octet-stream'), // 4
jsonBlob('???/???') // 5
]
for (const [i, b] of Object.entries(blobs)) {
c.append(Object.assign(document.createElement('h3'), {
textContent: `${1+parseInt(i)}. ${b.type}:`
})) // b.type === 'Content-Type'━━┛
if (b.type.startsWith('text/html')) { // 1
const text = await b.text();
c.append(Object.assign(document.createElement('div'), {
innerHTML: text
}));
} else if (b.type.startsWith('image/')) { // 2
c.append(Object.assign(document.createElement('img'), {
src: URL.createObjectURL(b)
}));
} else if (b.type.startsWith('application/json')) { // 3
c.append(Object.assign(document.createElement('pre'), {
textContent: JSON.stringify(JSON.parse(await b.text()), null, ' ')
}));
} else if (b.type.startsWith('application/octet-stream')) { // 4
c.append(Object.assign(document.createElement('a'), {
textContent: 'download json',
href: URL.createObjectURL(b),
download: 'data.json'
}));
} else { // 5
// .... create a clone Response from blob
// -> response2 = new Response(await response1.blob())
const response2 = new Response(b);
const b2 = await response2.blob(); // .json() .text(),...
const text2 = await b2.text();
console.log('blob2', text2, b2.type);
// Blogs are b === b2
const text = await b.text();
console.log('blob1', text, b.type);
console.log('blob2 === blob1', text === text2); // true
}
}
c.append(Object.assign(document.createElement('h3'), {
innerHTML: ` <br> `
}))
})()

Categories