Decode audio from ArrayBuffer received over web-socket in node - javascript

I am trying to send audio over web-sockets to node server where I want to store it in local file system. I am able to append the data received in a file, but it is not playable.
Most of the solution I found are for client side which use AudioContext.decodeAudiodata().
client
localAudioStreamRecorder = getMediaStreamRecording(
localAudioStream,
'audio/webm;codecs=opus',
(data: ArrayBuffer) => {
handleSendRecordingChunk(socket, {
...getIdentityPayload({ sessionId, userId, role }),
data,
streamType: 'audio',
})
}
)
server
const audioStreamPass = fs.createWriteStream(audioFilePath, { flags: 'a' });
const newData = async (socket, eventData, cb) => {
const { sessionId } = eventData.body;
if (eventData.body.streamType === 'audio') {
// Need help here
audioStreamPass.write(Buffer.from(new Uint8Array(eventData.body.data)));
}
};
I just want to know, how can I decode this data to something which is playable.
Thanks.

Try this on server:
const { Readable } = require('stream');
const audioStreamPass = fs.createWriteStream(audioFilePath, { flags: 'a' });
const newData = async (socket, eventData, cb) => {
const { sessionId } = eventData.body;
if (eventData.body.streamType === 'audio') {
// Create the readable stream from buffer
const readableStream = Readable.from(buffer);
//Pipe the stream to the writeable
readableStream.pipe(audioStreamPass)
};

Related

string to bufferstream not always writing data

I have a cloud function receiving a json string in a pubsub topic.
The goal is to extracts some data into a new json string.
Next parse it as JSONL.
And finally stream it to Google Cloud Storage.
I notice that sometimes the files seem to contain data and sometimes they do not.
The pubsub is working fine and data is coming into this cloud function just fine.
I tried adding some async awaits where I seem it might fit but I am afraid it has do to with the bufferstream. Both topics on where I have trouble getting my head around.
What could be the issue?
const stream = require('stream');
const { Storage } = require('#google-cloud/storage');
// Initiate the source
const bufferStream = new stream.PassThrough();
// Creates a client
const storage = new Storage();
// save stream to bucket
const toBucket = (message, filename) => {
// Write your buffer
bufferStream.end(Buffer.from(message));
const myBucket = storage.bucket(process.env.BUCKET);
const file = myBucket.file(filename);
// Pipe the 'bufferStream' into a 'file.createWriteStream' method.
bufferStream.pipe(file.createWriteStream({
validation: 'md5',
}))
.on('error', (err) => { console.error(err); })
.on('finish', () => {
// The file upload is complete.
console.log(`${filename} is uploaded`);
});
};
// extract correct fields
const extract = (entry) => ({
id: entry.id,
status: entry.status,
date_created: entry.date_created,
discount_total: entry.discount_total,
discount_tax: entry.discount_tax,
shipping_total: entry.shipping_total,
shipping_tax: entry.shipping_tax,
total: entry.total,
total_tax: entry.total_tax,
customer_id: entry.customer_id,
payment_method: entry.payment_method,
payment_method_title: entry.payment_method_title,
transaction_id: entry.transaction_id,
date_completed: entry.date_completed,
billing_city: entry.billing.city,
billing_state: entry.billing.state,
billing_postcode: entry.billing.postcode,
coupon_lines_id: entry.coupon_lines.id,
coupon_lines_code: entry.coupon_lines.code,
coupon_lines_discount: entry.coupon_lines.discount,
coupon_lines_discount_tax: entry.coupon_lines.discount_tax,
});
// format json to jsonl
const format = async (message) => {
let jsonl;
try {
// extract only the necessary
const jsonMessage = await JSON.parse(message);
const rows = await jsonMessage.map((row) => {
const extractedRow = extract(row);
return `${JSON.stringify(extractedRow)}\n`;
});
// join all lines as one string with no join symbol
jsonl = rows.join('');
console.log(jsonl);
} catch (e) {
console.error('jsonl conversion failed');
}
return jsonl;
};
exports.jsonToBq = async (event, context) => {
const message = Buffer.from(event.data, 'base64').toString();
const { filename } = event.attributes;
console.log(filename);
const jsonl = await format(message, filename);
toBucket(jsonl, filename);
};
it's fixed by moving the bufferstream const into the tobucket function.

Upload Image from form-data to S3 using a Lambda

So I am writing a Lambda that will take in some form data via a straight POST through API Gateway (testing using Postman for now) and then send that image to S3 for storage. Every time I run it, the image uploaded to S3 is corrupted and won't open properly. I have seen people having to decode/encode the incoming data but I feel like I have tried everything using Buffer.from. I am only looking to store either .png or .jpg. The below code does not reflect my attempts using Base64 encoding/decoding seeing they all failed. Here is what I have so far -
Sample Request in postman
{
image: (uploaded .jpg/.png),
metadata: {tag: 'iPhone'}
}
Lambda
const AWS = require('aws-sdk')
const multipart = require('aws-lambda-multipart-parser')
const s3 = new AWS.S3();
exports.handler = async (event) => {
const form = multipart.parse(event, false)
const s3_response = await upload_s3(form)
return {
statusCode: '200',
body: JSON.stringify({ data: data })
}
};
const upload_s3 = async (form) => {
const uniqueId = Math.random().toString(36).substr(2, 9);
const key = `${uniqueId}_${form.image.filename}`
const request = {
Bucket: 'bucket-name',
Key: key,
Body: form.image.content,
ContentType: form.image.contentType,
}
try {
const data = await s3.putObject(request).promise()
return data
} catch (e) {
console.log('Error uploading to S3: ', e)
return e
}
}
EDIT:
I am now atempting to save the image into the /tmp directory then use a read stream to upload to s3. Here is some code for that
s3 upload function
const AWS = require('aws-sdk')
const fs = require('fs')
const s3 = new AWS.S3()
module.exports = {
upload: (file) => {
return new Promise((resolve, reject) => {
const key = `${Date.now()}.${file.extension}`
const bodyStream = fs.createReadStream(file.path)
const params = {
Bucket: process.env.S3_BucketName,
Key: key,
Body: bodyStream,
ContentType: file.type
}
s3.upload(params, (err, data) => {
if (err) {
return reject(err)
}
return resolve(data)
}
)
})
}
}
form parser function
const busboy = require('busboy')
module.exports = {
parse: (req, temp) => {
const ctype = req.headers['Content-Type'] || req.headers['content-type']
let parsed_file = {}
return new Promise((resolve) => {
try {
const bb = new busboy({
headers: { 'content-type': ctype },
limits: {
fileSize: 31457280,
files: 1,
}
})
bb.on('file', function (fieldname, file, filename, encoding, mimetype) {
const stream = temp.createWriteStream()
const ext = filename.split('.')[1]
console.log('parser -- ext ', ext)
parsed_file = { name: filename, path: stream.path, f: file, type: mimetype, extension: ext }
file.pipe(stream)
}).on('finish', () => {
resolve(parsed_file)
}).on('error', err => {
console.err(err)
resolve({ err: 'Form data is invalid: parsing error' })
})
if (req.end) {
req.pipe(bb)
} else {
bb.write(req.body, req.isBase64Encoded ? 'base64' : 'binary')
}
return bb.end()
} catch (e) {
console.error(e)
return resolve({ err: 'Form data is invalid: parsing error' })
}
})
}
}
handler
const form_parser = require('./form-parser').parse
const s3_upload = require('./s3-upload').upload
const temp = require('temp')
exports.handler = async (event, context) => {
temp.track()
const parsed_file = await form_parser(event, temp)
console.log('index -- parsed form', parsed_file)
const result = await s3_upload(parsed_file)
console.log('index -- s3 result', result)
temp.cleanup()
return {
statusCode: '200',
body: JSON.stringify(result)
}
}
The above edited code is a combination of other code and a github repo I found that is trying to achieve the same results. Even with this solution the file is still corrupted
Figured out this issue. Code works perfectly fine - it was an issue with API Gateway. Need to go into the API Gateway settings and set thee Binary Media Type to multipart/form-data then re-deploy the API. Hope this helps someone else who is banging their head against the wall on figuring out sending images via form data to a lambda.

How to read content of JSON file uploaded to google cloud storage using node js

I manually upload the JSON file to google cloud storage by creating a new project. I am able to read the metadata for a file but I don't know how to read the JSON content.
The code I used to read the metadata is:
var Storage = require('#google-cloud/storage');
const storage = Storage({
keyFilename: 'service-account-file-path',
projectId: 'project-id'
});
storage
.bucket('project-name')
.file('file-name')
.getMetadata()
.then(results => {
console.log("results is", results[0])
})
.catch(err => {
console.error('ERROR:', err);
});
Can someone guide me to the way to read the JSON file content?
I've used the following code to read a json file from Cloud Storage:
'use strict';
const Storage = require('#google-cloud/storage');
const storage = Storage();
exports.readFile = (req, res) => {
console.log('Reading File');
var archivo = storage.bucket('your-bucket').file('your-JSON-file').createReadStream();
console.log('Concat Data');
var buf = '';
archivo.on('data', function(d) {
buf += d;
}).on('end', function() {
console.log(buf);
console.log("End");
res.send(buf);
});
};
I'm reading from a stream and concat all the data within the file to the buf variable.
Hope it helps.
UPDATE
To read multiple files:
'use strict';
const {Storage} = require('#google-cloud/storage');
const storage = new Storage();
listFiles();
async function listFiles() {
const bucketName = 'your-bucket'
console.log('Listing objects in a Bucket');
const [files] = await storage.bucket(bucketName).getFiles();
files.forEach(file => {
console.log('Reading: '+file.name);
var archivo = file.createReadStream();
console.log('Concat Data');
var buf = '';
archivo.on('data', function(d) {
buf += d;
}).on('end', function() {
console.log(buf);
console.log("End");
});
});
};
I was using the createWriteStream method like the other answers but I had a problem with the output in that it randomly output invalid characters (�) for some characters in a string. I thought it could be some encoding problems.
I came up with my workaround that uses the download method. The download method returns a DownloadResponse that contains an array of Buffer. We then use Buffer.toString() method and give it an encoding of utf8 and parse the result with JSON.parse().
const downloadAsJson = async (bucket, path) => {
const file = await new Storage()
.bucket(bucket)
.file(path)
.download();
return JSON.parse(file[0].toString('utf8'));
}
There exists a convenient method:'download' to download a file into memory or to a local destination. You may use download method as follows:
const bucketName='bucket name here';
const fileName='file name here';
const storage = new Storage.Storage();
const file = storage.bucket(bucketName).file(fileName);
file.download(function(err, contents) {
console.log("file err: "+err);
console.log("file data: "+contents);
});
A modern version of this:
const { Storage } = require('#google-cloud/storage')
const storage = new Storage()
const bucket = storage.bucket('my-bucket')
// The function that returns a JSON string
const readJsonFromFile = async remoteFilePath => new Promise((resolve, reject) => {
let buf = ''
bucket.file(remoteFilePath)
.createReadStream()
.on('data', d => (buf += d))
.on('end', () => resolve(buf))
.on('error', e => reject(e))
})
// Example usage
(async () => {
try {
const json = await readJsonFromFile('path/to/json-file.json')
console.log(json)
} catch (e) {
console.error(e)
}
})()

how to pipe an archive (zip) to an S3 bucket

I’m a bit confused with how to proceed. I am using Archive ( node js module) as a means to write data to a zip file. Currently, I have my code working when I write to a file (local storage).
var fs = require('fs');
var archiver = require('archiver');
var output = fs.createWriteStream(__dirname + '/example.zip');
var archive = archiver('zip', {
zlib: { level: 9 }
});
archive.pipe(output);
archive.append(mybuffer, {name: ‘msg001.txt’});
I’d like to modify the code so that the archive target file is an AWS S3 bucket. Looking at the code examples, I can specify the bucket name and key (and body) when I create the bucket object as in:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myMsgArchive.zip' Body: myStream};
s3.upload( params, function(err,data){
…
});
Or
s3 = new AWS.S3({ parms: {Bucket: ‘myBucket’ Key: ‘myMsgArchive.zip’}});
s3.upload( {Body: myStream})
.send(function(err,data) {
…
});
With regards to my S3 example(s), myStream appears to be a readable stream and I am confused as how to make this work as archive.pipe requires a writeable stream. Is this something where we need to use a pass-through stream? I’ve found an example where someone created a pass-through stream but the example is too terse to gain proper understanding. The specific example I am referring to is:
Pipe a stream to s3.upload()
Any help someone can give me would greatly be appreciated. Thanks.
This could be useful for anyone else wondering how to use pipe.
Since you correctly referenced the example using the pass-through stream, here's my working code:
1 - The routine itself, zipping files with node-archiver
exports.downloadFromS3AndZipToS3 = () => {
// These are my input files I'm willing to read from S3 to ZIP them
const files = [
`${s3Folder}/myFile.pdf`,
`${s3Folder}/anotherFile.xml`
]
// Just in case you like to rename them as they have a different name in the final ZIP
const fileNames = [
'finalPDFName.pdf',
'finalXMLName.xml'
]
// Use promises to get them all
const promises = []
files.map((file) => {
promises.push(s3client.getObject({
Bucket: yourBubucket,
Key: file
}).promise())
})
// Define the ZIP target archive
let archive = archiver('zip', {
zlib: { level: 9 } // Sets the compression level.
})
// Pipe!
archive.pipe(uploadFromStream(s3client, 'someDestinationFolderPathOnS3', 'zipFileName.zip'))
archive.on('warning', function(err) {
if (err.code === 'ENOENT') {
// log warning
} else {
// throw error
throw err;
}
})
// Good practice to catch this error explicitly
archive.on('error', function(err) {
throw err;
})
// The actual archive is populated here
return Promise
.all(promises)
.then((data) => {
data.map((thisFile, index) => {
archive.append(thisFile.Body, { name: fileNames[index] })
})
archive.finalize()
})
}
2 - The helper method
const uploadFromStream = (s3client) => {
const pass = new stream.PassThrough()
const s3params = {
Bucket: yourBucket,
Key: `${someFolder}/${aFilename}`,
Body: pass,
ContentType: 'application/zip'
}
s3client.upload(s3params, (err, data) => {
if (err)
console.log(err)
if (data)
console.log('Success')
})
return pass
}
The following example takes the accepted answer and makes it work with local files as requested.
const archiver = require("archiver")
const fs = require("fs")
const AWS = require("aws-sdk")
const s3 = new AWS.S3()
const stream = require("stream")
const zipAndUpload = async () => {
const files = [`test1.txt`, `test2.txt`]
const fileNames = [`test1target.txt`, `test2target.txt`]
const archive = archiver("zip", {
zlib: { level: 9 } // Sets the compression level.
})
files.map((thisFile, index) => {
archive.append(fs.createReadStream(thisFile), { name: fileNames[index] })
})
const uploadStream = new stream.PassThrough()
archive.pipe(uploadStream)
archive.finalize()
archive.on("warning", function (err) {
if (err.code === "ENOENT") {
console.log(err)
} else {
throw err
}
})
archive.on("error", function (err) {
throw err
})
archive.on("end", function () {
console.log("archive end")
})
await uploadFromStream(uploadStream)
console.log("all done")
}
const uploadFromStream = async pass => {
const s3params = {
Bucket: "bucket-name",
Key: `streamtest.zip`,
Body: pass,
ContentType: "application/zip"
}
return s3.upload(s3params).promise()
}
zipAndUpload()

React Native. MP3 Binary String (Uint8Array(9549)) to stream or file

I am trying to play an audio file with binary string format that Amazon Polly returns.
For that, I am using 'react-native-fetch-blob' and reading a stream, but just keep getting errors from the bridge saying 'Invalid data message - all must be length: 8'.
It happens when I try to open the stream: ifstream.open()
This is the code:
//polly config
const params = {
LexiconNames: [],
OutputFormat: "mp3",
SampleRate: "8000",
Text: "All Gaul is divided into three parts",
TextType: "text",
VoiceId: "Joanna"
};
Polly.synthesizeSpeech(params, function(err, data) {
let _data = "";
RNFetchBlob.fs.readStream(
// file path
data.AudioStream,
// encoding, should be one of `base64`, `utf8`, `ascii`
'ascii'
)
.then((ifstream) => {
ifstream.open()
ifstream.onData((chunk) => {
_data += chunk
})
ifstream.onError((err) => {
console.log('oops', err.toString())
})
ifstream.onEnd(() => {
//pasing _data to streaming player or normal audio player
ReactNativeAudioStreaming.play(_data, {showIniOSMediaCenter: true, showInAndroidNotifications: true});
})
})
});
Another solution I have also tried is to save the stream into a file to load it later on, but I got similars bugs.
RNFetchBlob.fs.createFile("myfile.mp3", dataG.AudioStream, 'ascii');
Huge thanks in advance
You could use the getSynthesizeSpeechUrl method from AWS.Polly.Presigner. I’m doing this and using react-native-sound to play the mp3. I ran into an issue where the mp3 wouldn’t play because my presigned URL contained special characters, but there’s a fix here.
You can use fetch() to request one or more media resources, return Response.body.getReader() from .then() to get a ReadableStream of the response. Read the Uint8Array values returned as the stream as read with .read() method of the ReadableStream, append to value to SourceBuffer of MediaSource to stream the media at an HTMLMediaElement.
For example, to output the audio of two requested audio resources, in sequence
window.addEventListener("load", () => {
const audio = document.createElement("audio");
audio.controls = "controls";
document.body.appendChild(audio);
audio.addEventListener("canplay", e => {
audio.play();
});
const words = ["hello", "world"];
const mediaSource = new MediaSource();
const mimeCodec = "audio/mpeg";
const mediaType = ".mp3";
const url = "https://ssl.gstatic.com/dictionary/static/sounds/de/0/";
Promise.all(
words.map(word =>
fetch(`https://query.yahooapis.com/v1/public/yql?q=select * from data.uri where url="${url}${word}${mediaType}"&format=json&callback=`)
.then(response => response.json())
.then(({
query: {
results: {
url
}
}
}) =>
fetch(url).then(response => response.body.getReader())
.then(readers => readers)
)
)
)
.then(readers => {
audio.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
async function sourceOpen() {
var sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
// set `sourceBuffer` `.mode` to `"sequence"`
sourceBuffer.mode = "segments";
const processStream = ({
done,
value
}) => {
if (done) {
return;
}
// append chunk of stream to `sourceBuffer`
sourceBuffer.appendBuffer(value);
}
// at `sourceBuffer` `updateend` call `reader.read()`,
// to read next chunk of stream, append chunk to
// `sourceBuffer`
for (let [index, reader] of Object.entries(readers)) {
sourceBuffer.addEventListener("updateend", function() {
reader.read().then(processStream);
});
let stream = await reader.read().then(processStream)
.then(() => reader.closed)
.then(() => "done reading stream " + index);
console.log(stream);
}
}
})
})
plnkr http://plnkr.co/edit/9zHwmcdG3UKYMghD0w3q?p=preview

Categories