Azure blob storage multiple simultaneous uploads with progress - javascript

I'm trying to upload multiple files simultaneously to Azure BLOB storage from JavaScript. I'm not sure how it's handling the parallelism, but I'm trying to have separate progress bars for each file/upload/promise.
Now the progress function gets called but gives only "loadedBytes" I need a way to know which progress bar to update.. One person suggested onload give an identifier, it does not seem to have an onload event. When I use the code below, the index is always the last one in the loop.
try {
console.log("Uploading files…");
var inputElement = document.getElementById('fileSelector');
const promises = [];
for (var fileIndex = 0; fileIndex < inputElement.files.length; fileIndex++) {
const file = inputElement.files[fileIndex];
var thisToken = await this.Instance.invokeMethodAsync('jsGetSASToken', file.name);
var containerURL = new azblob.ContainerURL(thisToken, azblob.StorageURL.newPipeline(new azblob.AnonymousCredential));
const blockBlobURL = azblob.BlockBlobURL.fromContainerURL(containerURL, file.name);
var blobUploadOptions = {
blockSize: 4 * 1024 * 1024, // 4MB block size
parallelism: 20, // 20 concurrency
metadata: { 'testindex': fileIndex.toString() },
progress: function (ev) {
var percentdone = ((ev.loadedBytes / file.size) * 100);
// Jumps around because loadedBytes is different for each upload
document.getElementById('percentdone-' + fileIndex).innerHTML = percentdone.toFixed(2) + "%";
// fileIndex is always the last item in the loop
}
};
promises.push(
azblob.uploadBrowserDataToBlockBlob(
azblob.Aborter.none,
file,
blockBlobURL,
blobUploadOptions
)
);
}
await Promise.all(promises);
console.log('Done.');
} catch (error) {
console.log("File Upload Error");
console.log(error);
}

Seems this issue is caused by fileIndex. I use file.name as the identifier, everything works as excepted. Try the code below:
<html>
<body>
<button id="select-button">Select and upload files</button>
<input type="file" id="file-input" multiple style="display: none;" />
<div id="showProgress"></div>
<p><b>Status:</b></p>
<p id="status" style="height:160px; width: 593px; overflow: scroll;" />
</body>
<script src="./azure-storage-blob.js" charset="utf-8"></script>
<script>
const selectButton = document.getElementById("select-button");
const fileInput = document.getElementById("file-input");
const status = document.getElementById("status");
const reportStatus = message => {
status.innerHTML += `${message}<br/>`;
status.scrollTop = status.scrollHeight;
}
const accountName = "storage account";
const sasString = "sas token";
const containerName = "container";
const containerURL = new azblob.ContainerURL(
`https://${accountName}.blob.core.windows.net/${containerName}?${sasString}`,
azblob.StorageURL.newPipeline(new azblob.AnonymousCredential));
const uploadFiles = async () => {
try {
reportStatus("Uploading files...");
const promises = [];
for (var fileIndex = 0; fileIndex < fileInput.files.length; fileIndex++) {
const file = fileInput.files[fileIndex];
const blockBlobURL = azblob.BlockBlobURL.fromContainerURL(containerURL, file.name);
document.getElementById('showProgress').innerHTML += file.name +":<div id='progress-"+ file.name +"'></div>"
var blobUploadOptions = {
blockSize: 4 * 1024 * 1024, // 4MB block size
parallelism: 20, // 20 concurrency
metadata: { 'testindex': fileIndex.toString() },
progress: function (ev) {
var percentdone = ((ev.loadedBytes / file.size) * 100);
var progessItem = document.getElementById('progress-' + file.name);
progessItem.innerHTML = percentdone.toFixed(2) + "%";
}
};
var promise = azblob.uploadBrowserDataToBlockBlob(
azblob.Aborter.none, file, blockBlobURL,blobUploadOptions);
promise.then((result)=>{
var progessItem = document.getElementById('progress-' + file.name);
progessItem.innerHTML += " file link"
});
promises.push(promise);
}
await Promise.all(promises);
reportStatus("Done.");
} catch (error) {
console.log(error)
}
}
selectButton.addEventListener("click", () => fileInput.click());
fileInput.addEventListener("change", uploadFiles);
</script>
</html>
Result:
Update Result:

Related

Import 2 attachments in same Drive's folder (created if he doesn't exists)

I am currently developing an application to make requests. These requests can be accompanied by two attachments. These are not mandatory, there can be none, one or two.)
If there are any, they will be saved in a specific folder on the Drive. This folder is a child folder created automatically and named after the id of the request.
So I created a script to import the attachments when a button is clicked since I need to get the id and url of the attachment to add it to the Sheets file.
However, I have a problem when there are two attachments. Indeed, this generates two folders with the same name (the id of the request) whereas I would like them to be grouped in the same folder.
Here is the html code of the attachments :
<div class="form-row divHtml" id="divPhotos">
<div class="form-group col-4">
<label for="photoPres" class="txtPhoto"><b>Photo de près :</b></label><br>
<input type="file" id="photoPres">
</div>
<div class="form-group col-4">
<label for="photoLoin" class="txtPhoto"><b>Photo de loin :</b></label><br>
<input type="file" id="photoLoin">
</div>
</div>
<div class="form-row divHtml">
<div class="form-group col-8">
<input type="button" id = "btnFichiers" value="IMPORTER LES PHOTOS" onclick="getFiles()">
<p id="idPhotoPres" class="invisible"></p>
<p id="idPhotoLoin" class="invisible"></p>
</div>
</div>
Here is the corresponding javascript :
function getFiles(){
const f = document.getElementById('photoPres');
if (f.files.length > 0){
[...f.files].forEach((file, i) => {
const fr = new FileReader();
fr.onload = (e) => {
const data = e.target.result.split(",");
const obj = {fileName: f.files[i].name, mimeType: data[0].match(/:(\w.+);/)[1], data: data[1]};
google.script.run.withSuccessHandler(id => {
document.getElementById("idPhotoPres").innerHTML = id
}).saveFile(obj);
}
fr.readAsDataURL(file);
});
}
const f2 = document.getElementById('photoLoin');
if (f2.files.length > 0){
[...f2.files].forEach((file2, i) => {
const fr2 = new FileReader();
fr2.onload = (e) => {
const data = e.target.result.split(",");
const obj = {fileName: f2.files[i].name, mimeType: data[0].match(/:(\w.+);/)[1], data: data[1]};
google.script.run.withSuccessHandler((id) => {
document.getElementById("idPhotoLoin").innerHTML = id;
}).saveFile(obj);
}
fr2.readAsDataURL(file2);
});
}
}
And here is the apps script code for the back-end part:
function saveFile(obj){
var pieceJointe = Utilities.newBlob(Utilities.base64Decode(obj.data), obj.mimeType, obj.fileName);
const dossierDrive = DriveApp.getFolderById('1KRwd79dc8k8EArwHU_1RnkDHd-Sdv0DJ');
const idDemande = generateId().toString();
var dossierPieceJointe;
var idDosssier = folderExists(idDemande);
if (idDosssier !== false) {
dossierPieceJointe = DriveApp.getFolderById(idDosssier);
const pieceJointeAjoutee = dossierPieceJointe.createFile(pieceJointe);
const informationsPJ = {
'url' : pieceJointeAjoutee.getUrl(),
'id' : pieceJointeAjoutee.getId()
};
return informationsPJ;
} else {
dossierPieceJointe = dossierDrive.createFolder(idDemande);
const pieceJointeAjoutee = dossierPieceJointe.createFile(pieceJointe);
const informationsPJ = {
'url' : pieceJointeAjoutee.getUrl(),
};
return informationsPJ;
}
}
function generateId(){
const classeur = SpreadsheetApp.getActiveSpreadsheet();
const webAppSheet = classeur.getSheetByName("Demandes");
var lastRow = webAppSheet.getLastRow();
var splitId = []; var id; var lastDemande = "";
if(lastRow == 1){
return (new Date().getFullYear()) + "_1";
}else{
lastDemande = webAppSheet.getRange(lastRow, 1).getValue();
splitId = lastDemande.split('_');
if (splitId[0] == new Date().getFullYear()){
id = lastId + 1;
feuilleBDD.getRange("I2").setValue(id);
return (new Date().getFullYear()) + "_" + id;
}
else {
feuilleBDD.getRange("I2").setValue(1)
return (new Date().getFullYear()) + "_1";
}
}
}
function folderExists(nomDossierPJ) {
const dossierDrive = DriveApp.getFolderById('1KRwd79dc8k8EArwHU_1RnkDHd-Sdv0DJ');
var folders = dossierDrive.getFolders();
var theReturn = false,folderName = "",folder;
while (folders.hasNext()) {
folder = folders.next();
folderName = folder.getName();
if (folderName === nomDossierPJ) {
theReturn = folder.getId();
break;
};
};
return theReturn;
}
Could you please help me to fix this problem? Thanks in advance for your help.
When I saw your showing script, I'm worried that in your script, the reason for your issue might be due to that FileReader and google.script.run are run with the asynchronous process. If my understanding is correct, how about the following modification?
In this modification, your Javascript is modified as follows.
Modified script:
function getResponse(file) {
return new Promise((resolve, reject) => {
const fr = new FileReader();
fr.onload = e => {
const data = e.target.result.split(",");
const obj = { fileName: file.name, mimeType: data[0].match(/:(\w.+);/)[1], data: data[1] };
google.script.run.withFailureHandler(reject).withSuccessHandler(resolve).saveFile(obj);
}
if (file) {
fr.readAsDataURL(file);
} else {
reject("No file");
}
});
}
async function getFiles() {
const f1 = document.getElementById('photoPres');
const f2 = document.getElementById('photoLoin');
const files = [{ f: f1.files[0], disp: "idPhotoPres" }, { f: f2.files[0], disp: "idPhotoLoin" }];
for (let i = 0; i < files.length; i++) {
if (!files[i].f) continue;
const { url } = await getResponse(files[i].f).catch(err => console.log(err));
document.getElementById(files[i].disp).innerHTML = url;
}
}
By this modification, the files are uploaded with the synchronous process. By this, I thought that your issue might be able to be removed.
Note:
I'm not sure about your actual situation. If you are using Web Apps, when you modified the script, please reflect the latest script in the Web Apps. Please be careful about this.

Why is writing the same data to IndexedDB a second time consistently slower?

I stored some jpeg files (exactly 350, same files same size. Total: 336.14 MB) as Blob in IndexedDB. It took around 1 second to complete the transaction. Then I read all the data from IndexedDB to an array and again sored to IndexedDB. But this time it takes around 15 Seconds. I observed this as a consistent behavior. Anything wrong here? I used performance.now() to get the time difference
Files: 350,
Size of each: 937 KB,
Browser: Chrome and Chromium Edge
//Open
var dbOpen = indexedDB.open(INDEXED_DB_NAME, INDEXED_DB_VERSION);
dbOpen.onupgradeneeded = function (e) {
console.log("onupgradeneeded");
var store = e.currentTarget.result.createObjectStore(
IMAGE_DATA_STORE, { autoIncrement: true });
};
dbOpen.onsuccess = function (e) {
image_data_db = dbOpen.result;
console.log("indexed DB opened");
};
//Initial Write
var inputFiles = document.getElementById('inputFiles');
for (var i = 0; i < inputFiles.files.length; i++) {
let file = inputFiles.files[i];
var b = new Blob([file], { type: file.type });
fileblobs.push(b);
}
StoreIdb(fileblobs); // < First write
//StoreIdb()
t0 = performace.now();
var trx = image_data_db.transaction(IMAGE_DATA_STORE, 'readwrite');
var imagestore = trx.objectStore(IMAGE_DATA_STORE);
for (i = 0; i < fileblobs.length; i++) {
request = imagestore.add(fileblobs[i]);
request.onsuccess = function (e) {
console.log('added');
};
request.onerror = function (e) {
console.error("Request Error", this.error);
};
}
trx.onabort = function (e) {
console.error("Exception:", this.error, this.error.name);
};
trx.oncomplete = function (e) {
console.log('completed');
t1 = performance.now();
timetaken = t1 - t0;
}
//Read
var objectStore = image_data_db.transaction(IMAGE_DATA_STORE).objectStore(IMAGE_DATA_STORE);
objectStore.openCursor().onsuccess = function (e) {
var cursor = e.target.result;
if (cursor) {
blobArray.push(cursor.value.blob);
cursor.continue();
}
else
{
// completed
}
}
// blobArray will be used for second time << Second Write
I figured it out. First time it was storing file instance blob.
I ve changed file instance blob to Array buffer just to want to ensure data type similar in both cases. Now it is taking same time.
for (var i = 0; i < inputFiles.files.length; i++) {
let file = inputFiles.files[i];
file.arrayBuffer().then((arrayBuffer) => {
let blob = new Blob([new Uint8Array(arrayBuffer)], {type: file.type });
blobs.push(blob);
if ( blobs.length == inputFiles.files.length){
callback(blobs);
}
});
}

js can I read a portion of a file with FileReader? [duplicate]

I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};

Multiple progress bars for multiple files upload firebase

I am working with Firebase Storage and trying to upload multiple files at a time. What I have done so far is, i get the files, and upload one by one to firebase and only one progress bar is there which shows the progress for each uploading file. What I need is, when i select the files, i want to create the progress bars equal to the number of files and they will start uploading and each progress bar will be shown its own progress.
What I have done so far is :
var up = document.getElementById("fileUpload"),
pr = document.getElementsByClassName("progress")[0];
list = document.getElementsByClassName("list")[0];
//Listen for file selection
up.addEventListener('change', function(e){
//Get files
for (var i = 0; i < e.target.files.length; i++) {
var imageFile = e.target.files[i];
uploadImageAsPromise(imageFile,i);
}
});
//Handle waiting to upload each file using promise
function uploadImageAsPromise (imageFile,i) {
return new Promise(function (resolve, reject) {
var storageRef = firebase.storage().ref($.cookie("_lo")+"/"+imageFile.name);
//Upload file
var task = storageRef.put(imageFile);
//Update progress bar
task.on('state_changed',
function progress(snapshot){
var percentage = snapshot.bytesTransferred / snapshot.totalBytes * 100;
pr.value = percentage;
},
function error(err){
},
function complete(){
var downloadURL = task.snapshot.downloadURL;
console.log("file " + (i+1) + " Uplaoded");
console.log(downloadURL);
}
);
});
}
This is the quickest Solution I came with :
var up = document.getElementById("fileUpload"),
pr = document.getElementsByClassName("progress");
list = document.getElementsByClassName("list")[0];
//Listen for file selection
up.addEventListener('change', function(e){
//Get files
for (var i = 0; i < e.target.files.length; i++) {
var imageFile = e.target.files[i];
list.insertAdjacentHTML('afterbegin','<li class="row">'+
'<div class="col-4">'+imageFile.name+'</div>'+
'<div class="col-4">'+imageFile.size+'</div>'+
'<progress value="0" max="100" class="progress"></progress>'+
'</li>');
uploadImageAsPromise(imageFile,i);
}
});
//Handle waiting to upload each file using promise
function uploadImageAsPromise (imageFile,i) {
return new Promise(function (resolve, reject) {
var storageRef = firebase.storage().ref($.cookie("_lo")+"/"+imageFile.name);
//Upload file
var task = storageRef.put(imageFile);
//Update progress bar
task.on('state_changed',
function progress(snapshot){
var percentage = snapshot.bytesTransferred / snapshot.totalBytes * 100;
pr[i].value = percentage;
},
function error(err){
},
function complete(){
var downloadURL = task.snapshot.downloadURL;
console.log("file " + (i+1) + " Uplaoded");
console.log(downloadURL);
}
);
});
}

javascript FileReader - parsing long file in chunks

I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};

Categories