I can upload a single image converted to a blob to firebase 3 successfully. However, when I try to upload multiple images to firebase 3 from my ionic 1 app, it fails.
The console logs that the data was successfully uploaded. I cannot see it in the firebase storage UI. I only see the first image I selected.
This is the code that gets the images:
$scope.getImages = function () {
var options = {
maximumImagesCount: 10,
width: 1000,
height: 1000,
quality: 100
};
$cordovaImagePicker.getPictures(options)
.then(function (results) {
for (var i = 0; i < results.length; i++) {
$scope.selectedImages.push(results[i]);
var fileName = results[i].replace(/^.*[\\\/]/, '');
// the image storage path is different on android
var path = '';
if ($ionicPlatform.is("android")) {
path = cordova.file.cacheDirectory;
}
else {
path = cordova.file.tempDirectory;
} // end of android image rectification
$cordovaFile.readAsArrayBuffer(path, fileName)
.then(function (realImage) {
var imageBlob = new Blob([realImage], { type: "image/jpeg" });
imgUpload(imageBlob, fileName)
})
}
}, function (error) {
// error getting photos
console.log(error.name);
})
Below is the code for firebase service
function imgUpload(_imgBlob, _filename) {
var uploadsMetadata = {
cacheControl: "max-age=" + (60 * 60 * 24 * 365) // One year of seconds
};
//create the storage reference and use it to access
var storeRef = firebase.storage().ref();
var uploadTask = storeRef.child('images/' + _filename).put(_imgBlob, uploadsMetadata);
return new Promise(function (resolve, reject) {
uploadTask.on('state_changed', function (snap) {
console.log('Progress: ', snap.bytesTransferred, '/', snap.totalBytes, ' bytes');
}, function (err) {
console.log('upload error', err);
reject(err);
}, function () {
var metadata = uploadTask.snapshot.metadata;
var key = metadata.md5Hash.replace(/\//g, ':');
var fileRecord = {
downloadURL: uploadTask.snapshot.downloadURL,
key: key,
metadata: {
fullPath: metadata.fullPath,
md5Hash: metadata.md5Hash,
name: metadata.name
}
};
// uploadsRef.child(key).set(fileRecord).then(resolve, reject);
});
}); // end of Promise
// return snapshot;
} // end of function imgUpload
[Edited 2/15/2017]
Padrian, without knowing what your specific error(s) were in the code above I can only assume that your issue(s) are the same as what was dealing with, namely that the metadata.md5Hash was failing since the metadata wasn't defined. My code and your code are nearly identical barring the UI framework differences.
My first refactoring to remove the error was to removed the listening on events and went with just having a callback on the .put() like so:
storageRef.child(uploadFile.name).put(uploadFile).then(cb(snap)).catch(errCB(err))
I further refactored my code and just as mysteriously as there was an issue, there was no longer an issue. Below is my full code for processing the upload file. I placed the code inside an async.queue so I could limit the file uploads to 4 files at a time.
const q = async.queue(function (file, callback) {
let reader = new window.FileReader()
reader.onload = function (e) {
const tags = ExifReader.load(e.target.result)
if (tags['Orientation'].description === 'left-bottom') {
file.rotation = 'rotate(-90deg)'
}
}
reader.readAsArrayBuffer(file.file.slice(0, 128 * 1024))
let uploadTask = storageRef.child(file.file.name).put(file.file, uploadsMetadata)
file.uploadSuccess = false
file.uploadError = false
file.active = 'active'
uploadTask.on('state_changed',
function (snap) {
file.progress = snap.bytesTransferred / snap.totalBytes * 100
},
function (err) {
file.uploadError = true
file.errorMessage = err
callback(err)
},
function () {
let metadata = uploadTask.snapshot.metadata
let key = metadata.md5Hash.replace(/\//g, ':')
let pendingInventoryRecord = {
downloadURL: uploadTask.snapshot.downloadURL,
key: key,
metadata: {
fullPath: metadata.fullPath,
md5Hash: metadata.md5Hash,
name: metadata.name
},
style: file.invStyle,
size: file.invSize,
count: file.invCount,
rotate: file.rotation || ''
}
uploadRef.child(key).set(pendingInventoryRecord)
.then(function () {
pendingInventoryCountRef.child('counter').transaction(function (currentVal) {
return (currentVal || 0) + 1
})
callback(null, file)
})
.catch(function (err) { console.log(err) })
})
}, 4)
Related
I was using this code to upload files to dropbox with Javascript (vuejs); however, I have not been able to get the files larger than 350 MB to load. I have been trying to use chunks to load the file. The code doesn't error but I have been getting a 400 error when the dropbox api returns the result:
Dropbox-sdk.min.js?0032:1 POST https://content.dropboxapi.com/2/files/upload_session/append_v2 400 (Bad Request)
I am wondering if there was something wrong with the code or could there be something that needs to be changed in the dropbox settings? I have been using this code as a guide: https://github.com/dropbox/dropbox-sdk-js/blob/master/examples/javascript/upload/index.html#L2
uploadToDropbox: function (path, file) {
var dbx = this.dropbox()
console.log("File upload .. " + path)
console.log("File upload .. " + file)
console.log(UPLOAD_FILE_SIZE_LIMIT)
if (file.size < UPLOAD_FILE_SIZE_LIMIT) {
this.dropbox().filesUpload({ path: path, contents: file })
//__PIPELINE\assets\test
.then(response => {
console.log(response)
//this.structure = response.result.entries;
console.log("This was successful")
})
.catch(error => {
console.log(error)
console.log("This is an error")
});
}
else {
// File is bigger than 150 Mb - use filesUploadSession* API
const maxBlob = 8 * 1000 * 1000; // 8Mb - Dropbox JavaScript API suggested max file / chunk size
var workItems = [];
var offset = 0;
while (offset < file.size) {
var chunkSize = Math.min(maxBlob, file.size - offset);
workItems.push(file.slice(offset, offset + chunkSize));
offset += chunkSize;
}
console.log ("Work Items : ")
console.log (workItems)
const task = workItems.reduce((acc, blob, idx, items) => {
if (idx == 0) {
// Starting multipart upload of file
console.log("idx is 0")
return acc.then(function () {
return dbx.filesUploadSessionStart({ close: false, contents: blob })
.then(response => response.session_id)
});
} else if (idx < items.length - 1) {
console.log("idx is less than items.length")
// Append part to the upload session
return acc.then(function (sessionId) {
var cursor = { session_id: sessionId, offset: idx * maxBlob };
return dbx.filesUploadSessionAppendV2({ cursor: cursor, close: false, contents: blob }).then(() => sessionId);
});
} else {
// Last chunk of data, close session
console.log("finishing session")
return acc.then(function (sessionId) {
var cursor = { session_id: sessionId, offset: file.size - blob.size };
var commit = { path: '/' + file.name, mode: 'add', autorename: true, mute: false };
return dbx.filesUploadSessionFinish({ cursor: cursor, commit: commit, contents: blob });
});
}
}, Promise.resolve());
task.then(function (result) {
console.log(result)
//var results = document.getElementById('results');
//results.appendChild(document.createTextNode('File uploaded!'));
}).catch(function (error) {
console.error(error);
});
}
},
The session id was missing in the code.
The response object was updated in a new version of the sdk and the example code doesn't work anymore:
https://github.com/dropbox/dropbox-sdk-js/blob/master/UPGRADING.md#4-updating-the-response-object
The fix is changing this line:
.then(response => response.result.session_id)
Here is a link to a thread on github with the same issue:
https://github.com/dropbox/dropbox-sdk-js/issues/351
I've written an application in node.js consisting of a server and a client for storing/uploading files.
For reproduction purposes, here's a proof of concept using a null write stream in the server and a random read stream in the client.
Using node.js 12.19.0 on Ubuntu 18.04. The client depends on node-fetch v2.6.1.
The issue I have is after 60 seconds the connection is reset and haven't found a way to make this work.
Any ideas are appreciated.
Thank you.
testServer.js
// -- DevNull Start --
var util = require('util')
, stream = require('stream')
, Writable = stream.Writable
, setImmediate = setImmediate || function (fn) { setTimeout(fn, 0) }
;
util.inherits(DevNull, Writable);
function DevNull (opts) {
if (!(this instanceof DevNull)) return new DevNull(opts);
opts = opts || {};
Writable.call(this, opts);
}
DevNull.prototype._write = function (chunk, encoding, cb) {
setImmediate(cb);
}
// -- DevNull End --
const http = require('http');
const server = http.createServer();
server.on('request', async (req, res) => {
try {
req.socket.on('end', function() {
console.log('SOCKET END: other end of the socket sends a FIN packet');
});
req.socket.on('timeout', function() {
console.log('SOCKET TIMEOUT');
});
req.socket.on('error', function(error) {
console.log('SOCKET ERROR: ' + JSON.stringify(error));
});
req.socket.on('close', function(had_error) {
console.log('SOCKET CLOSED. IT WAS ERROR: ' + had_error);
});
const writeStream = DevNull();
const promise = new Promise((resolve, reject) => {
req.on('end', resolve);
req.on('error', reject);
});
req.pipe(writeStream);
await promise;
res.writeHead(200);
res.end('OK');
} catch (err) {
res.writeHead(500);
res.end(err.message);
}
});
server.listen(8081)
.on('listening', () => { console.log('Listening on port', server.address().port); });
testClient.js
// -- RandomStream Start --
var crypto = require('crypto');
var stream = require('stream');
var util = require('util');
var Readable = stream.Readable;
function RandomStream(length, options) {
// allow calling with or without new
if (!(this instanceof RandomStream)) {
return new RandomStream(length, options);
}
// init Readable
Readable.call(this, options);
// save the length to generate
this.lenToGenerate = length;
}
util.inherits(RandomStream, Readable);
RandomStream.prototype._read = function (size) {
if (!size) size = 1024; // default size
var ready = true;
while (ready) { // only cont while push returns true
if (size > this.lenToGenerate) { // only this left
size = this.lenToGenerate;
}
if (size) {
ready = this.push(crypto.randomBytes(size));
this.lenToGenerate -= size;
}
// when done, push null and exit loop
if (!this.lenToGenerate) {
this.push(null);
ready = false;
}
}
};
// -- RandomStream End --
const fetch = require('node-fetch');
const runSuccess = async () => { // Runs in ~35 seconds
const t = Date.now();
try {
const resp = await fetch('http://localhost:8081/test', {
method: 'PUT',
body: new RandomStream(256e6) // new RandomStream(1024e6)
});
const data = await resp.text();
console.log(Date.now() - t, data);
} catch (err) {
console.warn(Date.now() - t, err);
}
};
const runFail = async () => { // Fails after 60 seconds
const t = Date.now();
try {
const resp = await fetch('http://localhost:8081/test', {
method: 'PUT',
body: new RandomStream(1024e6)
});
const data = await resp.text();
console.log(Date.now() - t, data);
} catch (err) {
console.warn(Date.now() - t, err);
}
};
// runSuccess().then(() => process.exit(0));
runFail().then(() => process.exit(0));
I tried (unsuccessfully) to reproduce what you are seeing based on your code example. Neither the success call is completing in ~35 seconds nor is the error being thrown in 60 seconds.
However, that being said, I think what is happening here is that your client is terminating the request.
You can increase the timeout by adding a httpAgent to the fetch PUT call. You can then set a timeout in the httpAgent.
const http = require('http');
...
const runFail = async () => { // Fails after 60 seconds
const t = Date.now();
try {
const resp = await fetch('http://localhost:8081/test', {
method: 'PUT',
body: new RandomStream(1024e6),
agent: new http.Agent({ keepAlive: true, timeout: 300000 })
});
const data = await resp.text();
console.log(Date.now() - t, data);
} catch (err) {
console.warn(Date.now() - t, err);
}
};
See the fetch docs for adding a custom http(s) agent here
See options for creating http(s) agent here
This turned out to be a bug in node.js
Discussion here: https://github.com/nodejs/node/issues/35661
I've got a script that adds JSON data from a file to a DynamoDB table. The script uses the "fs" module to open a read stream to the json file and retrieve the data line by line. As the data is returned, it's inserted into a DynamoDB table. When the operation ends, an execution summary is given with number of records processed, successfully inserted, and unsuccessfully inserted. The problem is the summary executes before the file has completely processed. As result the numbers are wrong.
The script...
ddb_table_has_records(table_name, (err, dat) => {
if (dat.Count === 0 || force) {
const transformStream = JSONStream.parse("*");
const inputStream = fs.createReadStream(import_file);
let record_position = 0;
let count_imported_successful = 0;
let count_imported_fail = 0;
inputStream.pipe(transformStream).on("data", (Item) => {
const params = {
TableName: table_name,
Item
}
ddb_client.put(params, (err, data) => {
++record_position;
if (err) {
console.error("Unable to add mapping for record " + record_position + ", error = " + err);
++count_imported_fail;
} else {
console.log("PutItem succeeded " + record_position);
++count_imported_successful;
}
});
}).on("close", () => {
console.log("=".repeat(70));
console.log(`'Completed: ${import_file}' has been loaded into '${table_name}'.`);
console.log(` Record Count: ${record_position}`);
console.log(` Imported Record Count: ${count_imported_successful}`);
console.log(` Rejected Record Count: ${count_imported_fail}`);
});
} else {
console.log("=".repeat(70));
console.log(`Completed: Skipping import of '${import_file}' into '${table_name}'.`);
};
});
When this runs, it looks like the following
PS C:\> node --max-old-space-size=8192 .\try.js 'foo' 'us-west-2' 'development' '.\data.json' true
Target Profile: development
Target Region: us-west-2
Target Table: foo
Source File: .\data.json
Force Import: true
Confirming Table's State...
======================================================================
'Completed: .\data.json' has been loaded into 'foo'.
Record Count: 0
Imported Record Count: 0
Rejected Record Count: 0
PutItem succeeded 1
PutItem succeeded 2
PutItem succeeded 3
PutItem succeeded 4
...
The portion of the code that gets the record counts runs before the inserts completes so the records imported and rejected numbers are always wrong. It looks like the file stream closes while inserts are occurring. I've tried changing from the "close" to "end" event, same result.
Test this script with the following call...
node --max-old-space-size=8192 .\data.load.js 'foo' 'us-west-1' 'dev' '.\foo.default.json' true
Here is the content for the script I ultimately used...
'use strict'
if (process.argv.length < 6) {
throw new Error ('Please pass the table-name, aws-Region, aws-Profile, and file-path to the script.');
}
let [, , TableName, Region, Profile, ImportFile, Force] = process.argv;
process.env.AWS_SDK_LOAD_CONFIG = true;
process.env.AWS_PROFILE = Profile;
Force = typeof(Force) !== 'undefined' ? Force : false;
const AWS = require('aws-sdk');
const fs = require('fs');
const JSONStream = require('JSONStream');
AWS.config.update({ region: Region });
const ddbc = new AWS.DynamoDB.DocumentClient();
console.log('Target Profile: ', Profile);
console.log('Target Region: ', Region);
console.log('Target Table: ', TableName);
console.log('Source File: ', ImportFile);
console.log('Force Import: ', Force);
// Returns the number of records in a specified table
const ddb_table_has_items = (TableName) => {
return new Promise((resolve, reject) => {
const ddb_query_parameters = { TableName, Select: 'COUNT' }
ddbc.scan(ddb_query_parameters, (error, data) => {
(error) ? reject(error) : resolve(data);
});
});
}
const ddb_table_upsert_items = (TableName, Item) => {
return new Promise((reject, resolve) => {
const ddb_insert_payload = { TableName, Item };
ddbc.put(ddb_insert_payload, (error, data) => {
(error) ? reject(error) : resolve(data);
});
});
}
const ddb_bulk_load = (TableName, ImportFile) => {
return new Promise ( (resolve, reject) => {
let count_succeeded = 0;
let count_failed = 0;
let count_attempted = 0;
let inserts = [];
const json_stream = JSONStream.parse( "*" );
const source_data_stream = fs.createReadStream(ImportFile);
const ddb_source_item = source_data_stream.pipe(json_stream);
ddb_source_item.on("data", (source_data_item) => {
count_attempted++;
let ddb_insert = ddb_table_upsert_items(TableName, source_data_item)
.then( (data) => count_succeeded++ )
.catch( (error) => count_failed++ );
inserts.push(ddb_insert);
});
ddb_source_item.on("end", () => {
Promise.all(inserts)
.then(() => {
resolve({count_succeeded, count_failed, count_attempted});
})
.catch((error) => {
console.log(error);
reject(error);
});
});
ddb_source_item.on("error", (error) => {
reject(error);
});
});
}
(async () => {
try {
let proceed_with_import = false;
if ( Force.toString().toLowerCase() === 'true' ) {
proceed_with_import = true;
} else {
const table_scan = await ddb_table_has_items(TableName);
proceed_with_import = ( table_scan.Count === 0 );
}
if (proceed_with_import) {
let ddb_inserts = await ddb_bulk_load(TableName, ImportFile);
console.log("=".repeat(75));
console.log("Completed: '%s' has been loaded into '%s'.", ImportFile, TableName);
console.log(" Insert Attempted: %s", ddb_inserts.count_attempted);
console.log(" Insert Succeeded: %s", ddb_inserts.count_succeeded);
console.log(" Insert Failed : %s", ddb_inserts.count_failed);
}
} catch (error) {
console.log(error);
}
})();
Wrapping each insert in a promise, pushing the insert-promises into an array, and using promise all on that array did the trick. I execute the promise all once we're finished reading from the file; once the "end" event is emitted on the ddb_source_item stream.
I'm reading, bit per bit, all the PNG files inside a directory and I've to summarize some data in a json.
The problem is that, if I understand, the PNG reader send an async event "parsed" when finished. That cause that the function exits beforse json is populated...
I'm using node 6.11.5, so I cannot use sync/await.
var fs = require('fs'),
PNG = require('pngjs').PNG;
exports.movie = functions.https.onRequest((req, res) => {
console.log('********** START FUNCTION ************');
var movieFolder = 1;
if (req.query.id) movieFolder = '../movies/' + req.query.id + '/png/';
var exitJson = [];
fs.readdir(movieFolder, (err, files) => {
files.forEach((file) => {
fs.createReadStream(movieFolder + file)
.pipe(new PNG({
filterType: 1
}))
.on('parsed', function () {
console.log('Parsing: ' + movieFolder + file);
exitJson.push({
width: this.width,
height: this.height,
data: []
});
});
});
});
console.log('************* FINISHED *************');
res.status(200).json(exitJson);
});
You can use a simple itemsProcessed counter to detect if all of your callbacks were resolved.
var movieFolder = 1;
if (req.query.id) movieFolder = '../movies/' + req.query.id + '/png/';
var exitJson = [];
var itemsProcessed = 0;
fs.readdir(movieFolder, (err, files) => {
files.forEach((file) => {
fs.createReadStream(movieFolder + file)
.pipe(new PNG({
filterType: 1
}))
.on('parsed', function () {
console.log('Parsing: ' + movieFolder + file);
exitJson.push({
width: this.width,
height: this.height,
data: []
});
itemsProcessed++;
if (itemsProcessed === files.length) {
console.log('************* FINISHED *************');
res.status(200).json(exitJson);
}
});
});
});
You can load files one by one through recursion calls.
Don't forget to check errors.
exports.movie = functions.https.onRequest((req, res) => {
var movieFolder = 1;
if (req.query.id)
movieFolder = '../movies/' + req.query.id + '/png/';
var exitJson = [];
fs.readdir(movieFolder, function (err, files) {
var sendError = (err) => res.status(500).send(err.message);
if (err)
return sendError(err);
function loadFile (i) {
if (i == files.length)
return res.status(200).json(exitJson); // !!!DONE!!!
var file = files[i];
fs.createReadStream(movieFolder + file)
.pipe(new PNG({filterType: 1}))
.on('parsed', function () {
console.log('Parsing: ' + movieFolder + file);
exitJson.push({width: this.width, height: this.height, data: []});
loadFile (i + 1); // go to next file
})
.on('error', sendError);
}
loadFile(0); // start recursion
});
});
const exports={};const sizes={'foo.png':[100,200],'bar.png':[200,200],'baz.png':[300,200]};Promise.delay = (t) => new Promise(r => setTimeout(r, t)); const randomTime = (a = 500, b = 1500) => Math.floor(Math.random() * b) + a;
const require=src=>({'fs':{readdir:(d,c)=>{Promise.delay(randomTime()).then(() => c(null,['foo.png','bar.png','baz.png']))},createReadStream:(path)=>({pipe:(f)=>({on:(e,c)=>{const s=sizes[path.split('/').slice(-1)[0]];const a={width:s[0],height:s[1]};a.c=c;Promise.delay(randomTime()).then(() => a.c())}})})},'pngjs':{PNG:class PNG{constructor(a){}}},'firebase-functions':{https:{onRequest:(handler)=>{handler({query:({id:2})},{status:(s)=>({json:(a) => document.getElementById('res').innerHTML = `<pre><code>${JSON.stringify(a, null, 4)}</code></pre>`})})}}}})[src];
// ------------------- ignore the above
const fs = require('fs');
const PNG = require('pngjs').PNG;
const functions = require('firebase-functions');
/**
* Using a new Promise, we can perform multiple async tasks all contained
* within that one Promise which can be resolved or rejected. We read the
* folder directory for its files and pass it on to our Promised 'readFiles'.
*/
function readMovieFiles(folder) { console.log('readMovieFiles', folder)
return new Promise((res, rej) => {
fs.readdir(folder, (err, files) => {
readFiles(files, folder).then(res).catch(rej)
});
});
}
/**
* Given an array of file names within a folder, we can chain together the
* file promises using the reduce method. Starting at an initial value of
* Promise<[]>, each file in the array will be read sequentially.
*/
function readFiles(files, folder) { console.log('readFiles', folder, files)
return Promise.all(files.map(name => readFile(folder + name)));
}
/**
* We read a file and in the parsed callback, we call the res() and pass it
* the newly constructed array containing the newest file to be parsed.
*/
function readFile(path) { console.log('readFile', path)
return new Promise((res, rej) => {
fs.createReadStream(path)
.pipe(new PNG({ filterType: 1 }))
.on('parsed', function() {
console.log('parsedFile', path)
res({
data: [],
width: this.width,
height: this.height
});
});
});
}
exports.movie = functions.https.onRequest((req, res) => {
console.log('********** START FUNCTION ************');
if (!req.query.id) req.query.id = 1;
readMovieFiles(`../movies/${req.query.id}/png/`).then(exitJson => {
res.status(200).json(exitJson);
}).catch(error => {
res.status(500).json(error);
});
console.log('************* FINISHED *************');
});
<pre><code id="res"></code></pre>
Initially, I have made loading here so like this
export function сonvertFilesToByteArray(e) {
const MAX_FILE_SIZE = 1024 * 1024 * 50; // 50MB
const files = Object.keys(e.target.files);
const asyncReadFile = eachFile =>
new Promise((resolve, reject) => {
if (e.target.files[eachFile].size > MAX_FILE_SIZE) {
return reject([{ message: `File ${e.target.files[eachFile].name} too large` }]);
}
const reader = new FileReader();
const targetFileInfo = {
contentType: e.target.files[eachFile].type,
filename: e.target.files[eachFile].name,
};
reader.readAsArrayBuffer(e.target.files[eachFile]);
reader.onload = () => {
resolve({ ...targetFileInfo, body: Array.from(new Uint8Array(reader.result)) });
};
reader.onerror = error => reject(error);
});
return Promise.all(files.map(asyncReadFile));
}
Here in the constant files, I define how many at my files and I apply a function to each of them.
And then I get my file(s) in the component
handleFileUpload = (e) => {
сonvertFilesToByteArray(e)
.then((result) => {
runInAction(() => {
this.files = [
...this.files,
...result,
];
});
})
.catch(err => runInAction(() => {
this.errors = [...this.errors, err[0].message];
}));
}
And put in this.files and finally my this.files looks like [{contentType: 'plain/text', filename: 'blabla', body: [123, 456, 23, ...] }]
Where [123, 456, 23...] there is my ArrayBuffer
But at such approach in spite of the fact that I use Promise.all, when loading files/files which have weight more ~ 2MB, the page is frozen, it is impossible to interact with her in any way (but I can scroll). Except as realization when each file are divided into chunks nothing has come to mind to correct a situation.
Ok, I try to rewrite the code: With chunks
export function сonvertFilesToByteArray(e) {
const MAX_FILE_SIZE = 1024 * 1024 * 50; // 50MB
const files = Object.keys(e.target.files);
const asyncReadFile = eachFile =>
new Promise((resolve, reject) => {
if (e.target.files[eachFile].size > MAX_FILE_SIZE) {
return reject([{ message: `File ${e.target.files[eachFile].name} too large` }]);
}
const file = e.target.files[eachFile];
let offset = 0;
console.log(offset, 'offset', file.size, 'size');
const defaultChunkSize = 64 * 1024; // bytes
const fileReader = new FileReader();
const blob = file.slice(offset, offset + defaultChunkSize);
const isEndOfFile = () => offset >= file.size;
const testEndOfFile = () => {
if (isEndOfFile()) {
console.log('Done reading file');
}
};
fileReader.readAsArrayBuffer(blob);
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
offset += result.length;
testEndOfFile();
console.log(result, 'result');
resolve(result);
} else {
reject(target.error);
}
};
});
return Promise.all(files.map(asyncReadFile));
}
Here I receive the file and I divide it. But the problem is that if the file is more than a chunk, then I should bring together him from them again and again. But how to make it in my case? I can't understand it in any way...
Please help me :) What it is necessary to make to read the file in chunks and to receive it as ArrayBuffer?