I was trying to convert a blob to base64, and I found my way around, but while waiting the result from the function displayBase64String the map function in submitOffre returns an empty string even though console.log prints some data.
I'll appreciate any solution
here is my code.
submitOffre = (saleData) => {
debugger ;
var result = base64Service.displayBase64String(saleData);
console.log("========", result);
const rs = result.map(value => value.file); // Doesn't work.
console.log(rs); // rs is empty
}
class Base64Service {
blobToBase64 = (blob, callback) => {
var reader = new FileReader();
var data = '';
reader.onload = function () {
var dataUrl = reader.result;
var base64 = dataUrl.split(',')[1];
callback(base64);
};
reader.readAsDataURL(blob);
}
displayBase64String(formProps) {
const result = [];
const outbut = Object.entries(formProps.imageToUpload).map(([key, value]) => {
this.blobToBase64(value, (data) => {
result.push({ "file": `data:${value.type};base64,${data}` })
})
});
return result;
};
}
export default new Base64Service();
Something like that might help:
I've modified your code a bit, just to show you the basic pattern.
If you're doing more than 1 image at a time, you will need to use Promise.all, to keep track of more than 1 promise at once.
submitOffre = async (saleData) => { // SEE THE async KEYWORD
debugger ;
var result = await blobToBase64(saleData); // SEE THE await KEYWORD
console.log("========", result);
const rs = result.map(value => value.file); // Doesn't work.
console.log(rs); // rs is empty
}
I'll treat as if you were converting only 1 image.
blobToBase64 = (blob, callback) => new Promise((resolve,reject) => {
var reader = new FileReader();
var data = '';
reader.onload = function () {
var dataUrl = reader.result;
var base64 = dataUrl.split(',')[1];
callback(base64);
resolve(base64); // NOTE THE resolve() FUNCTION TO RETURN SOME VALUE TO THE await
};
reader.readAsDataURL(blob);
});
Related
I'm uploading and then reading the CSV file but I'm facing an issue while splitting it, so basically, column names in CSV contain ',' so when I'm going to split the columns with ',' so I don't get the full column value, please suggest me some proper way for it. Thanks
const readCsv = (file) => {
const reader = new FileReader();
const filetext = reader.readAsBinaryString(file);
reader.addEventListener('load', function (e) {
const data = e.target.result;
let parsedata = [];
let newLinebrk = data.split('\n');
for (let i = 0; i < newLinebrk.length; i++) {
parsedata.push(newLinebrk[i].split(','));
}
console.log("parsedData: ", parsedata);
});
};
CSV:
column 1 column2
test lorem, ipsum, dummy/text
after splitting:
['test', 'lorem', 'ipsum', 'dummy/text']
so by doing that I'm unable to get a proper column name that contains a comma in string.
In my case, I used Papa Parse which fulfills my all requirements.
const readCsv = (file) => {
const reader = new FileReader();
reader.readAsBinaryString(file);
reader.addEventListener('load', function (e) {
const data = e.target.result;
Papaparse.parse(data, {
complete: function (results) {
console.log("results: ", results.data);
},
});
});
};
I am attempting to convert an array of localhost URLs to base64 strings.
let uploadedBase64Images = [];
for (let i = 0; i < urls.length; i++) {
let img = await fetch(urls[i]);
let imgBlob = await img.blob();
let reader = new FileReader();
let base64;
reader.readAsDataURL(imgBlob);
reader.onloadend = () => {
console.log(reader.result)
base64 = reader.result;
console.log(base64)
uploadedBase64Images.push(base64)
};
}
console.log(uploadedBase64Images)
One thing I noticed is that the console.log(uploadedBase64Images) always prints before console.log(base64). This code block is wrapped in an async function as well. I've tried many other ways but at the end of the day, uploadedBase64Images is always empty.
When I move uploadedBase64Images.push(base64) outside of reader.onloadend, i.e.:
reader.onloadend = () => {
console.log(reader.result)
base64 = reader.result;
console.log(base64)
};
uploadedBase64Images.push(base64)
uploadedBase64Images is [undefined], which leads me to believe that the Promise isn't being resolved?
I appreciate any help on this, thanks in advance!
From what i see, the problem is the reader.onloadend, it is in its own zone which is not following the async behaviour.
So, by wrapping the reader function to Promise to wait for its response before doing anything may solve your problem
// wrapping reader in Promise
const convertImageToBase64 = async(imgBlob) => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(imgBlob);
reader.onloadend = () => {
resolve(reader.result);
};
reader.onerror = reject
})
}
const uploadedBase64Images = [];
for (let i = 0; i < urls.length; i++) {
const img = await fetch(urls[i]);
const imgBlob = await img.blob();
const base64 = await convertImageToBase64(imgBlob)
uploadedBase64Images.push(base64)
}
console.log(uploadedBase64Images)
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks đź‘Ť
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};
In JavaSctipt the FileReader object doesn't seem to have support for just reading the first line of a file. (up to the CR '\n'). I dont want to read in the whole file to save memory.
Is there a way to do it?
My code (note that readLine() function does not exists):
self.loadFirstLineFromFile = function (options, callback) {
var hiddenElement = document.createElement('input');
hiddenElement.id = 'hidden-tsv-file-loader';
hiddenElement.type = 'file';
hiddenElement.accept = options.extension;
hiddenElement.style.display = 'none';
hiddenElement.addEventListener('change', function (event) {
var file = event.target.files[0];
var reader = new FileReader(file);
var firstLine;
firstLine = reader.readLine();
callback(firstLine);
});
document.body.appendChild(hiddenElement);
hiddenElement.click();
};
There's nothing builtin for that, but it's simple to implement:
var file = event.target.files[0];
var sliced = file.slice(0, 2048); // Pick a size that you're ok with
// NOTE: `await` keyword requires transpiling (Babel) for IE11,
// and to be inside an async function. An alternative is:
// sliced.text().then(function(text) { console.log(text); });
var text = await sliced.text();
console.log(text);
Here's an interface that reads the data from the Blob decoded as text and chunked by a delimiter:
async function* readLines (blob, encoding = 'utf-8', delimiter = /\r?\n/g) {
const reader = blob.stream().getReader();
const decoder = new TextDecoder(encoding);
try {
let text = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
text += decoder.decode(value, { stream: true });
const lines = text.split(delimiter);
text = lines.pop();
yield* lines;
}
yield text;
} finally {
reader.cancel();
}
}
We can use this to read a single line and discard the rest without reading the entire file:
hiddenElement.addEventListener('change', async function (event) {
const file = event.target.files[0];
for await (const line of readLines(file, 'utf-8', '\n')) {
callback(line);
return; // signals reader.cancel() to the async iterator
}
});
Since I use Javascript with Knockout I refactored Patricks solution into this:
self.loadStream = function (options, callback) {
var hiddenElement = document.createElement('input');
hiddenElement.id = 'hidden-tsv-file-loader';
hiddenElement.type = 'file';
hiddenElement.accept = options.extension;
hiddenElement.style.display = 'none';
hiddenElement.addEventListener('change', function (event) {
var file = event.target.files[0];
var reader = file.stream().getReader();
var decoder = new TextDecoder('utf-8');
var data;
var readNextChunk = function () {
data = reader.read();
data.then(function (result) {
if (!result.value) {
callback({ chunk: '', done: true, shouldStop: true }, file);
} else {
var chunk = decoder.decode(result.value, { stream: true });
var args = {
chunk: chunk,
done: result.done,
shouldStop: true
};
callback(args, file);
if (!result.done && !args.shouldStop) {
readNextChunk();
}
}
});
};
readNextChunk();
hiddenElement.remove();
});
document.body.appendChild(hiddenElement);
hiddenElement.click();
};
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks đź‘Ť
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};