Okay so I'm converting html into pdfs, the pdf returned from my backend I convert to a new Blob with type: 'application/pdf', this all works fine. I now want to merge multiple Blobs into one. I'm using the following function to do so.
function ConcatenateBlobs(blobs, type, callback) {
var buffers = [];
var index = 0;
function readAsArrayBuffer() {
if (!blobs[index]) {
return concatenateBuffers();
}
var reader = new FileReader();
reader.onload = function(event) {
buffers.push(event.target.result);
index++;
readAsArrayBuffer();
};
reader.readAsArrayBuffer(blobs[index]);
}
readAsArrayBuffer();
function concatenateBuffers() {
var byteLength = 0;
buffers.forEach(function(buffer) {
byteLength += buffer.byteLength;
});
var tmp = new Uint16Array(byteLength);
var lastOffset = 0;
buffers.forEach(function(buffer) {
// BYTES_PER_ELEMENT == 2 for Uint16Array
var reusableByteLength = buffer.byteLength;
if (reusableByteLength % 2 != 0) {
buffer = buffer.slice(0, reusableByteLength - 1)
}
tmp.set(new Uint16Array(buffer), lastOffset);
lastOffset += reusableByteLength;
});
var blob = new Blob([tmp.buffer], {
type: type
});
callback(blob);
}
}
But for some reason I am only getting the last pdf in the array to show as the result.
Related
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};
I am trying to read excel file in Javascript with the plugin xlsx.js . I can read all cell values except the first value. Here is my code:
let numArr = [];
selectFile(event) {
let reader = new FileReader();
reader.onload = () => {
const data = reader.result;
const workbook = XLSX.read(data, {
type: 'binary'
});
workbook.SheetNames.forEach((sheetName) => {
// Here is your object
const XL_row_object = XLSX.utils.sheet_to_json(workbook.Sheets[sheetName]);
// var json_object = JSON.stringify(XL_row_object);
for (let i = 0; i < XL_row_object.length; i++) {
// this.groupNumsArr.push(XL_row_object[i]['number']);
const theNum = Object.values(XL_row_object[i])[0].toString();
numArr.push(theNum);
}
});
};
reader.readAsBinaryString(event.target.files[0]); }
The api gives first row as key so You can try this
for (let i = 0; i < XL_row_object.length; i++) {
var theNum = '';
if(i == 0)
{
theNum = Object.keys(XL_row_object[i])[0].toString();
numArr.push(theNum);
}
// this.groupNumsArr.push(XL_row_object[i]['number']);
theNum = Object.values(XL_row_object[i])[0].toString();
numArr.push(theNum);
}
I am trying to capture images using cordovaCamera and imagePicker then remove the EXIF data and create a copy of the image that I get after the metadata is removed.
In the following code I get image file in Blob format which I pass to resolveLocalFileSystemURL but unable to proceed as resolveLocalFileSystemURL does not accept blob or base64 data so is it possible to convert Blob/Base64 to fileURI format. Or is there any alternative to solve this.
In html:
<input id="erd" type="file"/>
In controllers.js :
var input = document.querySelector('#erd');
input.addEventListener('change', load);
function load(){
var fr = new FileReader();
fr.onload = process;
fr.readAsArrayBuffer(this.files[0]);
console.log(" onload "+URL.createObjectURL(this.files[0]));
}
function process(){
var dv = new DataView(this.result);
var offset = 0, recess = 0;
var pieces = [];
var i = 0;
if (dv.getUint16(offset) == 0xffd8){
offset += 2;
var app1 = dv.getUint16(offset);
offset += 2;
while (offset < dv.byteLength){
if (app1 == 0xffe1){
pieces[i] = {recess:recess,offset:offset-2};
recess = offset + dv.getUint16(offset);
i++;
}
else if (app1 == 0xffda){
break;
}
offset += dv.getUint16(offset);
var app1 = dv.getUint16(offset);
offset += 2;
}
if (pieces.length > 0){
var newPieces = [];
pieces.forEach(function(v){
newPieces.push(this.result.slice(v.recess, v.offset));
}, this);
newPieces.push(this.result.slice(recess));
var br = new Blob(newPieces, {type: 'image/jpeg'});
console.log(" pieces.length "+URL.createObjectURL(br));
$scope.strimg=URL.createObjectURL(br).toString();
//var file = new File(URL.createObjectURL(br), "uploaded_file.jpg", { type: "image/jpeg", lastModified: Date.now() });
var myFile = blobToFile(br, "my-image.jpg");
console.log('myFile'+JSON.stringify(myFile));
$scope.strimg = myFile;
createFileEntry($scope.strimg);
}
}
}
function blobToFile(theBlob,fileName){
console.log('theBlob'+theBlob+fileName);
var b = theBlob;
b.lastModifiedDate = new Date();
b.name = fileName; //Cast to a File() type
return theBlob;
}
function createFileEntry(fileURI) {
window.resolveLocalFileSystemURL(fileURI, copyFile, fail);
}
// 5
function copyFile(fileEntry) {
console.log(" copyFile "+fileEntry);
var name = fileEntry.fullPath.substr(fileEntry.fullPath.lastIndexOf('/') + 1);
var newName = name;
$scope.filepathnew="file:///storage/emulated/0/Android/data/com.offermanagement.system/files/";
window.resolveLocalFileSystemURL($scope.filepathnew, function(fileSystem2) {
fileEntry.copyTo(
fileSystem2,
newName,
onCopySuccess,
fail
);
},
fail);
}
function onCopySuccess(entry) {
$scope.$apply(function () {
$rootScope.profilepic=entry.nativeURL;
$rootScope.images.push(entry.nativeURL);
$rootScope.items.push({src:entry.nativeURL,sub:'' });
});
}
function fail(error) {
console.log("fail: " + error.code);
}
I get the following error for the above code
"Uncaught TypeError: Wrong type for parameter "uri" of resolveLocalFileSystemURI: Expected String, but got Blob."
I have something like this.
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKQAAACkCAYAAAAZtYVBAAAgAElEQ…TFHIWRhe2sii/S5zA+N/ZF6gI+RiSKspPUBiI1UQf9/wDVxYvlSggfZqgAAAABJRU5Erkaa==="
Is there a way to create a file object? with this?
i see the method URL.createObjectURl but it seems like a need to create a blobl object and i dont really know how.
The following function will convert a data URI to a blob.
function dataURIToBlob(dataURI) {
// Split the dataUri up into parts
// data:[<mediatype>][;<charset>],(data)
var parts = /data:([^;]+)(?:;([^,]+))?,(.+)/.exec(dataURI),
mime = parts[1],
charset = parts[2] || 'charset=US-ASCII',
encodedData = parts[3];
var data;
if (charset === 'base64') {
// If base64 convert to a Uint8 clamped array of character codes
var decodedData = atob(encodedData);
data = new Uint8Array(decodedData.length);
for (var i = 0; i < decodedData.length; i++) {
data[i] = decodedData.charCodeAt(i);
}
} else {
data = decodeURIComponent(encodedData);
}
return new Blob([data], {
type: mime
});
}
You can see it working in this snippet which takes a data URI, converts it into a blob, then creates a blob URI that you can use to access the file. A sample image and CSV file is provided.
testData = {};
testData['image/gif'] = 'data:image/gif;base64,R0lGODlhEAAQAMQAAORHHOVSKudfOulrSOp3WOyDZu6QdvCchPGolfO0o/XBs/fNwfjZ0frl3/zy7////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAkAABAALAAAAAAQABAAAAVVICSOZGlCQAosJ6mu7fiyZeKqNKToQGDsM8hBADgUXoGAiqhSvp5QAnQKGIgUhwFUYLCVDFCrKUE1lBavAViFIDlTImbKC5Gm2hB0SlBCBMQiB0UjIQA7';
testData['text/csv'] = 'data:text/csv;charset=utf-8,ID%2CScore%2CAssignee%2CCreated%2CComment%0Aid_value0%2Cscore_value0%2Cassignee_value0%2Ccreated_value0%2Ccomment_value0%0Aid_value1%2Cscore_value1%2Cassignee_value1%2Ccreated_value1%2Ccomment_value1%0Aid_value2%2Cscore_value2%2Cassignee_value2%2Ccreated_value2%2Ccomment_value2%0A';
function dataURIToBlob(dataURI) {
// Split the dataUri up into parts
// data:[<mediatype>][;<charset>],(data)
var parts = /data:([^;]+)(?:;([^,]+))?,(.+)/.exec(dataURI),
mime = parts[1],
charset = parts[2] || 'charset=US-ASCII',
encodedData = parts[3];
var data;
if (charset === 'base64') {
// If base64 convert to a Uint8 clamped array of character codes
var decodedData = atob(encodedData);
data = new Uint8Array(decodedData.length);
for (var i = 0; i < decodedData.length; i++) {
data[i] = decodedData.charCodeAt(i);
}
} else {
data = decodeURIComponent(encodedData);
}
return new Blob([data], {
type: mime
});
}
function createBlobURI() {
var blob = dataURIToBlob(dataURIInput.value);
var blobURI = URL.createObjectURL(blob);
blobURIAnchor.href = blobURI;
blobURIAnchor.innerHTML = blobURI;
blobURIAnchor.style.display = 'block';
blobURIAnchor.download = 'blob.' + blob.type.split('/')[1];
}
function insertTestData(mime) {
dataURIInput.value = testData[mime];
}
insertTestData('image/gif');
<button onclick="insertTestData('image/gif')">Image test</button>
<button onclick="insertTestData('text/csv')">CSV test</button>
<input id="dataURIInput" type="text"/>
<button onclick='createBlobURI()'>Create blob URI</button>
<a id="blobURIAnchor" href="#" style="display:none"></a>
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};