javascript fetch an image and create base64 - javascript

I'm using CloudFlare service workers and I want to fetch an image and then generate a base64 representation of it.
So something like this:
const res = await fetch('https://cdn.cnn.com/cnnnext/dam/assets/211010073527-tyson-fury-exlarge-169.jpg')
const blob = await res.blob();
console.log(blob)
console.log(btoa(blob))
this of course doesn't work, any ideas how to get this resolved?

complete worker script with cloudflare using btao
addEventListener('fetch', event => {
event.respondWith(handleRequest(event.request))
})
const imageUrl =
'https://upload.wikimedia.org/wikipedia/commons/thumb/7/72/' +
'Cat_playing_with_a_lizard.jpg/1200px-Cat_playing_with_a_lizard.jpg';
function base64Encode (buf) {
let string = '';
(new Uint8Array(buf)).forEach(
(byte) => { string += String.fromCharCode(byte) }
)
return btoa(string)
}
function base64Decode (string) {
string = atob(string);
const
length = string.length,
buf = new ArrayBuffer(length),
bufView = new Uint8Array(buf);
for (var i = 0; i < length; i++) { bufView[i] = string.charCodeAt(i) }
return buf
}
async function handleRequest(request) {
const
upstreamResponse = await fetch(imageUrl),
text = base64Encode(await upstreamResponse.arrayBuffer()),
bin = base64Decode(text);
return new Response(bin, {status: 200, headers: {
'Content-type': upstreamResponse.headers.get('Content-type')
}})
}
can refer this discussion as well

Related

Parsing multipart result

I am receiving json file and image from backend with multipart/form-data and I have to parse it to frontend.
--47f5d03f-8473-428e-a4e7-11e2a0710dc2
�PNG
gAMA���\#�!:����O-Z�h���c�}�Յ��+.���* (etc with this data)
--47f5d03f-8473-428e-a4e7-11e2a0710dc2
{"CustomerId":"123","Title":"This is title","Name":"This is name"}
--47f5d03f-8473-428e-a4e7-11e2a0710dc2--
Json is parsed perfectly, but I have a problem with creating image.
But Blob is always giving me an error when I try to use it for creating image element.
This is how i tried to do this.
API.post(`api/endpoint`, data).then((resp) => {
const boundIndex = resp.headers['content-type'].indexOf('boundary="') + 'boundary='.length;
const contentType = resp.headers['content-type']
const bound = contentType.slice(boundIndex,contentType.length);
const boundary = '--' + bound.slice(1, bound.length - 1);
const splitedResponseData = resp.data.split(boundary);
const jsonResponse = JSON.parse(splitedResponseData[2]);
const blob = new Blob([splitedResponseData[1]], { type: 'image/png' });
const blob1 = new Blob([stringToUint(splitedResponseData[1])], { type: 'image/png' });
Blob1 is using function stringToUint() to convert string to Uint8Array.
function stringToUint(stringData) {
const string1 = btoa(unescape(encodeURIComponent(stringData))),
charList = string1.split(''),
uintArray = [];
for (let i = 0; i < charList.length; i++)
{
uintArray.push(charList[i].charCodeAt(0));
}
return new Uint8Array(uintArray);
}
Am I doing something wrong or is there another way I can take a picture based on the data I get?

Web Serial API - problem when stream consists of mix of ASCII and raw data

I'm trying to parse the following serial input raw data:
[51,45,51,30,33,30,33,00,00,00,C0,50,17,FE,05,00,00,58,02,9A,0E,00,00,00,00,02,00,0D,0A]
It consist of some chars(0X51,0x45,0x51...) and some raw data (0x00,0x00,0xC0...) and it ends with '\r\n' (0x0D,0x0A).
The end result I'm getting is ('\r\n' is not included):
['51','45','51','30','33','30','33','0','0','0','fffd','50','17','fffd','5','0','0','58','2','fffd','e','0','0','0','0','2','0']
The values in index 10,13,19 (C0,FE,9A) are not displayed correctly (because they are not part of the ASCII table?).
To get the data I use TextDecoderStream, and I'm not sure it's the right decoder, although the data is partially comprised of ASCII chars and ends with '\r\n'.
Is there more suitable decoder or there is an error in my code?
Thanks in advance.
// request & open port here.
port = await navigator.serial.requestPort();
await port.open({
baudrate: 115200,
baudRate: 115200,
dataBits: 8,
stopBits: 1,
parity: "none",
flowControl: "none",
});
// setup the output stream here.
const encoder = new TextEncoderStream();
outputDone = encoder.readable.pipeTo(port.writable);
outputStream = encoder.writable;
/**********************************/
// code to read the stream here.
let decoder = new TextDecoderStream();
inputDone = port.readable.pipeTo(decoder.writable);
inputStream = decoder.readable.pipeThrough(
new TransformStream(new LineBreakTransformer())
);
reader = inputStream.getReader();
readLoop();
}
/**
* Reads data from the input stream and displays it on screen.
*/
async function readLoop() {
let i = 0;
while (true) {
const { value, done } = await reader.read();
if (value) {
let arrValue = [...value];
ascii_to_hexa(arrValue);
console.log(arrValue);
if (done) {
console.log("[readLoop] DONE", done);
reader.releaseLock();
break;
}
}
}
}
class LineBreakTransformer {
constructor() {
// A container for holding stream data until a new line.
this.container = "";
}
transform(chunk, controller) {
// Handle incoming chunk
this.container += chunk;
const lines = this.container.split("\r\n");
this.container = lines.pop();
lines.forEach((line) => controller.enqueue(line));
}
flush(controller) {
// Flush the stream.
controller.enqueue(this.container);
}
}
function ascii_to_hexa(arr) {
for (let i = 0; i < arr.length; i++) {
arr[i] = Number(arr[i].charCodeAt(0)).toString(16);
}
}

js can I read a portion of a file with FileReader? [duplicate]

I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};

Map function returns an empty array

I was trying to convert a blob to base64, and I found my way around, but while waiting the result from the function displayBase64String the map function in submitOffre returns an empty string even though console.log prints some data.
I'll appreciate any solution
here is my code.
submitOffre = (saleData) => {
debugger ;
var result = base64Service.displayBase64String(saleData);
console.log("========", result);
const rs = result.map(value => value.file); // Doesn't work.
console.log(rs); // rs is empty
}
class Base64Service {
blobToBase64 = (blob, callback) => {
var reader = new FileReader();
var data = '';
reader.onload = function () {
var dataUrl = reader.result;
var base64 = dataUrl.split(',')[1];
callback(base64);
};
reader.readAsDataURL(blob);
}
displayBase64String(formProps) {
const result = [];
const outbut = Object.entries(formProps.imageToUpload).map(([key, value]) => {
this.blobToBase64(value, (data) => {
result.push({ "file": `data:${value.type};base64,${data}` })
})
});
return result;
};
}
export default new Base64Service();
Something like that might help:
I've modified your code a bit, just to show you the basic pattern.
If you're doing more than 1 image at a time, you will need to use Promise.all, to keep track of more than 1 promise at once.
submitOffre = async (saleData) => { // SEE THE async KEYWORD
debugger ;
var result = await blobToBase64(saleData); // SEE THE await KEYWORD
console.log("========", result);
const rs = result.map(value => value.file); // Doesn't work.
console.log(rs); // rs is empty
}
I'll treat as if you were converting only 1 image.
blobToBase64 = (blob, callback) => new Promise((resolve,reject) => {
var reader = new FileReader();
var data = '';
reader.onload = function () {
var dataUrl = reader.result;
var base64 = dataUrl.split(',')[1];
callback(base64);
resolve(base64); // NOTE THE resolve() FUNCTION TO RETURN SOME VALUE TO THE await
};
reader.readAsDataURL(blob);
});

javascript FileReader - parsing long file in chunks

I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks 👍
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};

Categories