Is it possible to upload stream on amazon s3 from browser? - javascript

I want to capture webcam video stream, and directly stream it to S3 storage.
I've learned that you can upload via stream to s3:
https://aws.amazon.com/blogs/aws/amazon-s3-multipart-upload/
I've learned that you can upload via browser:
http://docs.aws.amazon.com/AmazonS3/latest/dev/HTTPPOSTExamples.html#HTTPPOSTExamplesFileUpload
But Im still lost on how to actually do it.
I need an example of someone uploadin getusermediastream to S3 like above.
Buffer, Binary data, multipart upload, stream... this is all beyond my knowledge. Stuff I wish I knew, but don't even now where to learn.

Currently, you cannot simply pass the media stream to any S3 method to do the multipart upload automatically.
But still, there is an event called dataavailable which produces the chunks of video each given time interval. So we can subscribe to dataavailable and do the S3 Multipart Upload manually.
This approach brings some complications: say chunks of video are generated each 1 second, but we don't know how long does it take to upload the chunk to S3. E.g. the upload can take 3 times longer due to the connection speed. So we can get stuck trying to make multiple PUT requests at the same time.
The potential solution would be to upload the chunks one by one and don't start uploading the next chunk until the prev. one is uploaded.
Here is a snippet of how this can be handled using Rx.js and AWS SDK. Please see my comments.
// Configure the AWS. In this case for the simplicity I'm using access key and secret.
AWS.config.update({
credentials: {
accessKeyId: "YOUR_ACCESS_KEY",
secretAccessKey: "YOUR_SECRET_KEY",
region: "us-east-1"
}
});
const s3 = new AWS.S3();
const BUCKET_NAME = "video-uploads-123";
let videoStream;
// We want to see what camera is recording so attach the stream to video element.
navigator.mediaDevices
.getUserMedia({
audio: true,
video: { width: 1280, height: 720 }
})
.then(stream => {
console.log("Successfully received user media.");
const $mirrorVideo = document.querySelector("video#mirror");
$mirrorVideo.srcObject = stream;
// Saving the stream to create the MediaRecorder later.
videoStream = stream;
})
.catch(error => console.error("navigator.getUserMedia error: ", error));
let mediaRecorder;
const $startButton = document.querySelector("button#start");
$startButton.onclick = () => {
// Getting the MediaRecorder instance.
// I took the snippet from here: https://github.com/webrtc/samples/blob/gh-pages/src/content/getusermedia/record/js/main.js
let options = { mimeType: "video/webm;codecs=vp9" };
if (!MediaRecorder.isTypeSupported(options.mimeType)) {
console.log(options.mimeType + " is not Supported");
options = { mimeType: "video/webm;codecs=vp8" };
if (!MediaRecorder.isTypeSupported(options.mimeType)) {
console.log(options.mimeType + " is not Supported");
options = { mimeType: "video/webm" };
if (!MediaRecorder.isTypeSupported(options.mimeType)) {
console.log(options.mimeType + " is not Supported");
options = { mimeType: "" };
}
}
}
try {
mediaRecorder = new MediaRecorder(videoStream, options);
} catch (e) {
console.error("Exception while creating MediaRecorder: " + e);
return;
}
//Generate the file name to upload. For the simplicity we're going to use the current date.
const s3Key = `video-file-${new Date().toISOString()}.webm`;
const params = {
Bucket: BUCKET_NAME,
Key: s3Key
};
let uploadId;
// We are going to handle everything as a chain of Observable operators.
Rx.Observable
// First create the multipart upload and wait until it's created.
.fromPromise(s3.createMultipartUpload(params).promise())
.switchMap(data => {
// Save the uploadId as we'll need it to complete the multipart upload.
uploadId = data.UploadId;
mediaRecorder.start(15000);
// Then track all 'dataavailable' events. Each event brings a blob (binary data) with a part of video.
return Rx.Observable.fromEvent(mediaRecorder, "dataavailable");
})
// Track the dataavailable event until the 'stop' event is fired.
// MediaRecorder emits the "stop" when it was stopped AND have emitted all "dataavailable" events.
// So we are not losing data. See the docs here: https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/stop
.takeUntil(Rx.Observable.fromEvent(mediaRecorder, "stop"))
.map((event, index) => {
// Show how much binary data we have recorded.
const $bytesRecorded = document.querySelector("span#bytesRecorded");
$bytesRecorded.textContent =
parseInt($bytesRecorded.textContent) + event.data.size; // Use frameworks in prod. This is just an example.
// Take the blob and it's number and pass down.
return { blob: event.data, partNumber: index + 1 };
})
// This operator means the following: when you receive a blob - start uploading it.
// Don't accept any other uploads until you finish uploading: http://reactivex.io/rxjs/class/es6/Observable.js~Observable.html#instance-method-concatMap
.concatMap(({ blob, partNumber }) => {
return (
s3
.uploadPart({
Body: blob,
Bucket: BUCKET_NAME,
Key: s3Key,
PartNumber: partNumber,
UploadId: uploadId,
ContentLength: blob.size
})
.promise()
// Save the ETag as we'll need it to complete the multipart upload
.then(({ ETag }) => {
// How how much bytes we have uploaded.
const $bytesUploaded = document.querySelector("span#bytesUploaded");
$bytesUploaded.textContent =
parseInt($bytesUploaded.textContent) + blob.size;
return { ETag, PartNumber: partNumber };
})
);
})
// Wait until all uploads are completed, then convert the results into an array.
.toArray()
// Call the complete multipart upload and pass the part numbers and ETags to it.
.switchMap(parts => {
return s3
.completeMultipartUpload({
Bucket: BUCKET_NAME,
Key: s3Key,
UploadId: uploadId,
MultipartUpload: {
Parts: parts
}
})
.promise();
})
.subscribe(
({ Location }) => {
// completeMultipartUpload returns the location, so show it.
const $location = document.querySelector("span#location");
$location.textContent = Location;
console.log("Uploaded successfully.");
},
err => {
console.error(err);
if (uploadId) {
// Aborting the Multipart Upload in case of any failure.
// Not to get charged because of keeping it pending.
s3
.abortMultipartUpload({
Bucket: BUCKET_NAME,
UploadId: uploadId,
Key: s3Key
})
.promise()
.then(() => console.log("Multipart upload aborted"))
.catch(e => console.error(e));
}
}
);
};
const $stopButton = document.querySelector("button#stop");
$stopButton.onclick = () => {
// After we call .stop() MediaRecorder is going to emit all the data it has via 'dataavailable'.
// And then finish our stream by emitting 'stop' event.
mediaRecorder.stop();
};
button {
margin: 0 3px 10px 0;
padding-left: 2px;
padding-right: 2px;
width: 99px;
}
button:last-of-type {
margin: 0;
}
p.borderBelow {
margin: 0 0 20px 0;
padding: 0 0 20px 0;
}
video {
height: 232px;
margin: 0 12px 20px 0;
vertical-align: top;
width: calc(20em - 10px);
}
video:last-of-type {
margin: 0 0 20px 0;
}
<div id="container">
<video id="mirror" autoplay muted></video>
<div>
<button id="start">Start Streaming</button>
<button id="stop">Stop Streaming</button>
</div>
<div>
<span>Recorded: <span id="bytesRecorded">0</span> bytes</span>;
<span>Uploaded: <span id="bytesUploaded">0</span> bytes</span>
</div>
<div>
<span id="location"></span>
</div>
</div>
<!-- include adapter for srcObject shim -->
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/aws-sdk/2.175.0/aws-sdk.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/rxjs/5.5.6/Rx.js"></script>
Caveats:
All Multipart Uploads need to be either completed or aborted. You will be charged if you leave it pending forever. See the "Note" here.
Each chunk that you Upload (except the last one) must be larger than 5 MB. Or an error will be thrown. See the details here. So you need to adjust the timeframe/resolution.
When you are instantiating the SDK make sure that there is a policy that with the s3:PutObject permission.
You need to expose the ETag in your bucket CORS configuration. Here is the example of CORS configuration:
<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>POST</AllowedMethod>
<AllowedMethod>PUT</AllowedMethod>
<ExposeHeader>ETag</ExposeHeader>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
Limitations:
Be carefull as the MediaRecorder API is still not widely adopted. Make sure you check you visit caniuse.com before using it in prod.

Related

Write or stream audio (live-voice) file to variable as a binary in Node.js

I am working with audio streams in Node.js. As for now, my code doesn't have utils.promisfy and I have 3 stages of it. So after the 2nd .pipe I am writing file to disk in wav audio format with required params.
Code example below:
import { FileWriter } from 'wav';
const filename = `./${Date.now()}-${userId}.wav`;
const encoder = new OpusEncoder(16000, 1);
receiver
.subscribe(userId, {
end: {
behavior: EndBehaviorType.AfterSilence,
duration: 100,
},
})
// OpusDecodingStream is a custom class, which convert audio, like a gzip stage for file.
.pipe(new OpusDecodingStream({}, encoder))
.pipe(
// Writes wav file to disk, also can be replaces with FileRead, part of wav module
new FileWriter(filename, {
channels: 1,
sampleRate: 16000,
}),
);
The problem is: I need to transfer (not streaming!) resulting audio file in binary format via axios POST method. So I guess, it's a bit wrong to write file on disk instead of writing it in variable, and after stream ends, send it right to required URL. Something (by logic) which I'd like to see:
// other code
const fileStringBinary = await receiver
.subscribe(userId, {
end: {
behavior: EndBehaviorType.AfterSilence,
duration: 100,
},
})
.pipe(new OpusDecodingStream({}, encoder))
.pipe(
return new FileWriter(filename, {
channels: 1,
sampleRate: 16000,
}),
);
await axios.post('https://url.com', {
data: fileStringBinary
});
Unfortunately I am not so good with streams and especially with audio one, so I am looking for a bit help or any useful advice will be welcome for me.
I understand, that I could write my file to directory, find it there, read once again with node:steam createReadStream and then POST it to required URL. This is not what I need. I'd like to skip this useless stages with writing and then reading. I believe that there is a way to transform steam to binary format and write it down to js variable.
That was a bit treaky after all, but I guess I figure it out:
const stream = receiver
.subscribe(userId, {
end: {
behavior: EndBehaviorType.AfterSilence,
duration: 100,
},
})
.pipe(
new opus.OggLogicalBitstream({
opusHead: new opus.OpusHead({
channelCount: 2,
sampleRate: 48000,
}),
pageSizeControl: {
maxPackets: 10,
},
crc: false,
}),
);
const data = [];
stream.on('data', (chunk) => {
data.push(chunk);
});
stream.on('end', async () => {
try {
const response = await axios.post(
`https://url.com${postParams}`,
Buffer.concat(data),
{
headers: {
Authorization: `Api-Key ${token}`,
'Content-Type': 'application/x-www-form-urlencoded',
},
},
);
console.log(response);
} catch (e) {
console.log(e);
}
});
Unfortunately, I haven't found a better solution, then using old-school events model with data and on end. My working case is connected with Discord.js voice recording without file and using stream for voice recognition.
I will be glad if someone will provide a better-syntax solution, and in that case I'll accept this answer as solved.

LinkedIn Marketing API- Invalid complete multipartUpload request

I'm trying to upload a video to the Linkedin API as per the marketing API documentation. I've done the following so far:
Registered for a multi part asset upload and received the response containing an array of unique multipart URLs to push the corresponding chunks to
Fetched the chunks from an Amazon S3 bucket where they live using the Range header
Successfully uploaded all those chunks via PUT to their corresponding URLs, and stored their ETag and HTTP Status code values.
Created the finalise POST request body as per the link above using the etags and status codes.
Unfortunately my request fails with:
'com.linkedin.vector.utils.logic.LogicLayerInvalidException: Invalid complete multipartUpload request ...(stringified payload)'
The only part of the request body I haven't added from the example given is the "metadata" field- there's nothing in the documentation to explain what this is or where it's generated from. I'm assuming that's what is missing. Can anyone point me in the right direction please?
Code for the request in question is as follows:
// Loop over chunked download URLs and upload segments of S3 file.
for(let i = 0, l = uploadDetails.partUploadRequests.length; i < l; i++) {
const item: PartUploadRequest = uploadDetails.partUploadRequests[i];
const partialParams: GetObjectRequest = { Bucket: video.dynamoData.mp4Bucket, Key: video.dynamoData.mp4Outputs[0], Range: `bytes=${item.byteRange.firstByte}-${item.byteRange.lastByte}` };
console.log(`Requesting bytes ${item.byteRange.firstByte}-${item.byteRange.lastByte}`);
const s3PartialObject = await s3Client.getObject(partialParams).promise();
const response = await axios.put(item.url, s3PartialObject.Body, {
headers: {
...item.headers
}
});
const { status, headers } = response;
responses.push({
headers: {
ETag: headers.etag
},
httpStatusCode: status
});
};
// Send all chunk responses off and confirm video upload
const finaliseVideoPayload: LinkedinFinaliseVideoPostRequest = {
completeMultipartUploadRequest: {
mediaArtifact: registerVideoRequest.value.mediaArtifact,
partUploadResponses: responses
}
};
console.log(`Fetched all parts, readying finalise request with ${finaliseVideoPayload.completeMultipartUploadRequest.partUploadResponses.length} parts.`);
const json = await axios.post('https://api.linkedin.com/v2/assets?action=completeMultiPartUpload', finaliseVideoPayload, {
headers: {
'X-RestLi-Protocol-Version': '2.0.0',
'Authorization': 'Bearer ' + channel.token,
'Host': 'api.linkedin.com'
}
});
Thanks
I guess this has been already solved. Just in case if it is not.
There is a field 'metadata' which comes in the registerApi for multipart upload. This comes in the field.
uploadMechanism["com.linkedin.digitalmedia.uploading.MultipartUpload"].metadata
Apparently, this is required by the completeMultipart api.
So data will be
completeMultipartUploadRequest: {
mediaArtifact: registerVideoRequest.value.mediaArtifact,
metadata: registerVideoRequest.value.uploadMechanism["com.linkedin.digitalmedia.uploading.MultipartUpload"].metadata,
partUploadResponses: responses
}
It took me a day to figure out that the field which has no value(empty string) is required. Maybe a bug.
Hopefully this solves the issue.

Node.js using amazon transcoder to format video / audio files

My goal is to make sure that all videos that are being uploaded to my application is the right format and that they are formatted to fit minimum size.
I did this before using ffmpeg however i have recently moved my application to an amazon server.
This gives me the option to use Amazon Elastic Transcoder
However by the looks of it from the interface i am unable to set up automatic jobs that look for video or audio files and converts them.
For this i have been looking at their SDK / api references but i am not quite sure how to use that in my application.
My question is has anyone successfully started transcoding jobs in node.js and know how to convert videos from one format to another and / or down set the bitrate? I would really appreciate it if someone could point me in the right direction with some examples of how this might work.
However by the looks of it from the interface i am unable to set up
automatic jobs that look for video or audio files and converts them.
The Node.js SDK doesn't support it but you can do the followings: if you store the videos in S3 (if not move them to S3 because elastic transcoder uses S3) you can run a Lambda function on S3 putObject triggered by AWS.
http://docs.aws.amazon.com/lambda/latest/dg/with-s3.html
My question is has anyone successfully started transcoding jobs in
node.js and know how to convert videos from one format to another and
/ or down set the bitrate? I would really appreciate it if someone
could point me in the right direction with some examples of how this
might work.
We used AWS for video transcoding with node without any problem. It was time consuming to find out every parameter, but I hope these few line could help you:
const aws = require('aws-sdk');
aws.config.update({
accessKeyId: config.AWS.accessKeyId,
secretAccessKey: config.AWS.secretAccessKey,
region: config.AWS.region
});
var transcoder = new aws.ElasticTranscoder();
let transcodeVideo = function (key, callback) {
// presets: http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/system-presets.html
let params = {
PipelineId: config.AWS.transcode.video.pipelineId, // specifies output/input buckets in S3
Input: {
Key: key,
},
OutputKeyPrefix: config.AWS.transcode.video.outputKeyPrefix,
Outputs: config.AWS.transcode.video.presets.map(p => {
return {Key: `${key}${p.suffix}`, PresetId: p.presetId};
})
};
params.Outputs[0].ThumbnailPattern = `${key}-{count}`;
transcoder.createJob(params, function (err, data) {
if (!!err) {
logger.err(err);
return;
}
let jobId = data.Job.Id;
logger.info('AWS transcoder job created (' + jobId + ')');
transcoder.waitFor('jobComplete', {Id: jobId}, callback);
});
};
An example configuration file:
let config = {
accessKeyId: '',
secretAccessKey: '',
region: '',
videoBucket: 'blabla-media',
transcode: {
video: {
pipelineId: '1450364128039-xcv57g',
outputKeyPrefix: 'transcoded/', // put the video into the transcoded folder
presets: [ // Comes from AWS console
{presetId: '1351620000001-000040', suffix: '_360'},
{presetId: '1351620000001-000020', suffix: '_480'}
]
}
}
};
If you want to generate master playlist you can do it like this.
".ts" files can not playable via hls players. Generate ".m3u8" file
async function transcodeVideo(mp4Location, outputLocation) {
let params = {
PipelineId: elasticTranscoderPipelineId,
Input: {
Key: mp4Location,
AspectRatio: 'auto',
FrameRate: 'auto',
Resolution: 'auto',
Container: 'auto',
Interlaced: 'auto'
},
OutputKeyPrefix: outputLocation + "/",
Outputs: [
{
Key: "hls2000",
PresetId: "1351620000001-200010",
SegmentDuration: "10"
},
{
Key: "hls1500",
PresetId: "1351620000001-200020",
SegmentDuration: "10"
}
],
Playlists: [
{
Format: 'HLSv3',
Name: 'hls',
OutputKeys: [
"hls2000",
"hls1500"
]
},
],
};
let jobData = await createJob(params);
return jobData.Job.Id;
}
async function createJob(params) {
return new Promise((resolve, reject) => {
transcoder.createJob(params, function (err, data) {
if(err) return reject("err: " + err);
if(data) {
return resolve(data);
}
});
});
}

Integrating HTML5 EME video with edX platform: Why is "initDataType" empty?

I have been trying to integrate HTML5 EME videos with edX video xblock
In that I am facing this error:
Failed to generate a license request DOMException: The initData
parameter is empty.
<script >
'use strict';
// Define a key: hardcoded in this example
// This corresponds to the key used for encryption
var KEY = new Uint8Array([
0xeb, 0xdd, 0x62, 0xf1, 0x68, 0x14, 0xd2, 0x7b,
0x68, 0xef, 0x12, 0x2a, 0xfc, 0xe4, 0xae, 0x3c
]);
var config = [{
initDataTypes: ['webm'],
videoCapabilities: [{
contentType: 'video/webm; codecs="vp8"'
}]
}];
var video = document.getElementById('v');
video.addEventListener('encrypted', handleEncrypted, false);
navigator.requestMediaKeySystemAccess('org.w3.clearkey', config).then(
function(keySystemAccess) {
return keySystemAccess.createMediaKeys();
}
).then(
function(createdMediaKeys) {
return video.setMediaKeys(createdMediaKeys);
}
).catch(
function(error) {
console.error('Failed to set up MediaKeys', error);
}
);
function handleEncrypted(event) {
console.log('encrypted event:', event);
var session = video.mediaKeys.createSession();
session.addEventListener('message', handleMessage, false);
session.generateRequest(event.initDataType, event.initData).catch(
function(error) {
console.error('Failed to generate a license request', error);
}
);
}
function handleMessage(event) {
console.log('message event: ', event);
// If you had a license server, you would make an asynchronous XMLHttpRequest
// with event.message as the body. The response from the server, as a
// Uint8Array, would then be passed to session.update().
// Instead, we will generate the license synchronously on the client, using
// the hard-coded KEY at the top.
var license = generateLicense(event.message);
console.log('license: ', license);
var session = event.target;
session.update(license).catch(
function(error) {
console.error('Failed to update the session', error);
}
);
}
// Convert Uint8Array into base64 using base64url alphabet, without padding.
function toBase64(u8arr) {
return btoa(String.fromCharCode.apply(null, u8arr)).
replace(/\+/g, '-').replace(/\//g, '_').replace(/=*$/, '');
}
// This takes the place of a license server.
// kids is an array of base64-encoded key IDs
// keys is an array of base64-encoded keys
function generateLicense(message) {
// Parse the clearkey license request.
var request = JSON.parse(new TextDecoder().decode(message));
// We only know one key, so there should only be one key ID.
// A real license server could easily serve multiple keys.
console.assert(request.kids.length === 1);
var keyObj = {
kty: 'oct',
alg: 'A128KW',
kid: request.kids[0],
k: toBase64(KEY)
};
return new TextEncoder().encode(JSON.stringify({
keys: [keyObj]
}));
}
</script>
<%page expression_filter="h"/>
<%! from django.utils.translation import ugettext as _ %>
<%inherit file="../main.html" />
<%block name="pagetitle">${_("About")}</%block>
<main id="main" aria-label="Content" tabindex="-1">
<section class="container about">
<h1>${_("About")}</h1>
<p>${_("This page left intentionally blank. Feel free to add your own content.")}</p>
<video autoplay controls id='v'>
<source src="http://simpl.info/eme/video/Chrome_44-enc_av.webm">
</video>
</section>
</main>
this is the function from where the error occurs.
The thing is when I do it simply without edX integration it works fine and there event.initDataType is "webm" whereas with edX integration it is empty.
Can anyone please help ?

Client side compression with HTML5 and Javascript

Am working on a web application and we allow users to upload files to our server. Am trying to do client side compression before uploading files to the server. What would be the better way to achieve this using HTML5 and JavaScript.
Thanks.
The common mechanism to do what you want is using FileReader and a JavaScript client-side compression library (i.e. compressjs).
In 2022 it's almost too simple, if the browser supports CompressionStream, FormData and Response.
In the example below I use FormData to collect all the fields from the form.
Then I use the readable stream from the file, and pipe it though the compression stream. Then I use Response to read everything from the compressed stream and return it in a blob.
async function compress(file, encoding = 'gzip') {
try {
return {
data: await new Response(file.stream().pipeThrough(new CompressionStream(encoding)), {
headers: {
'Content-Type': file.type
},
}).blob(),
encoding,
};
} catch (error) {
// If error, return the file uncompressed
console.error(error.message);
return {
data: file,
encoding: null
};
}
}
theForm.addEventListener(
'submit',
(event) => event.preventDefault()
)
theForm.addEventListener(
'input',
async function(event) {
// collect all fields
const fd = new FormData(theForm);
// Get 'file handle' from imput elemen
const file = fd.get('theFile');
if (!file) return
const encoding = fd.get('theEncoding');
const compressed = await compress(file, encoding);
theMessage.value = [
'Compressed with', compressed.encoding,
'Source file was', file.size, 'bytes',
'and the compressed file', compressed.data.size,
'saving', ((1 - compressed.data.size / file.size) * 100)
.toFixed(0),
'%.'
].join(' ')
}
)
form>* {
display: block;
width: 100%;
}
<form id="theForm">
<select name="theEncoding">
<option>gzip</option>
<option>deflate</option>
<option>deflate-raw</option>
</select>
<input type="file" name="theFile" id="theFile">
</form>
<output id="theMessage"></output>

Categories