I am setting up a canvas and then drawing an image into that canvas, followed by calling canvas.toBlob(function(blob)...), but I am finding the blob argument inside the toBlob is sometimes null.
Why would this be? Should I be waiting for something after drawImage or something (even though in the snippet - you can see I wait for the image to be loaded before proceeding)?
//--------------------------------------------------
function doImageFileInsert(fileinput) {
var newImg = document.createElement('img');
var img = fileinput.files[0];
let reader = new FileReader();
reader.onload = (e) => {
let base64 = e.target.result;
newImg.src = base64;
doTest(newImg);
};
reader.readAsDataURL(img);
fileinput.value = ''; // reset ready for another file
}
//--------------------------------------------------
function doTest(imgElem) {
console.log('doTest');
var canvas = document.createElement("canvas");
var w = imgElem.width;
var h = imgElem.height;
canvas.width = w;
canvas.height = h;
var ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(imgElem, 0, 0, w, h);
canvas.toBlob(function(blob) {
if (blob) {
console.log('blob is good');
} else {
console.log('blob is null');
alert('blob is null');
}
}, 'image/jpeg', 0.9);
}
canvas, div {
border:solid 1px grey;
padding:10px;
margin:5px;
border-radius:9px;
}
img {
width:100%;
height:auto;
}
<input type='file' value='Add' onChange='doImageFileInsert(this)'>
Also available at https://jsfiddle.net/Abeeee/rtwcge5h/24/.
If you add images via the Choose-File button enough times then you get the problem (alert('blob is null')).
There are only a few reasons why toBlob would produce null:
A bug in the browser's encoder (never seen it myself).
A canvas whose area is bigger than the maximum supported by the UA.
A canvas whose width or height is 0.
Since you are not waiting for the image to load, its width and height properties are still 0, and you fall in the third bullet above since you do set the canvas's size to these.
So to fix your error, wait for the image has loaded before doing anything with it.
Also, note that you should almost never use FileReader.readAsDataURL(), and certainly not to display media files from a Blob, instead generate a blob:// URI from these Blobs using URL.createObjectURL().
But in your case, you can even use the better createImageBitmap API which will take care of loading the image in your Blob and will generate a memory friendly ImageBitmap object which is ready to be drawn by the GPU without any more computation.
Only Safari hasn't implemented the basics of this API yet, but they should soon (it's exposed behind flags, and unflagged in TP version), and I wrote a polyfill you can use to fix the holes in various implementations.
const input = document.querySelector("input");
input.oninput = async (evt) => {
const img = await createImageBitmap(input.files[0]);
const canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
const ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
// I hope you'll do something more here,
// reencoding an iamge to JPEG through the Canvas API is a bad idea
// Canvas encoders aim at speed, not quality
canvas.toBlob( (blob) => {
console.log( blob );
}, "image/jpeg", 0.9 );
};
<!-- createImageBitmap polyfill for Safari -->
<script src="https://cdn.jsdelivr.net/gh/Kaiido/createImageBitmap/dist/createImageBitmap.js"></script>
<input type="file" accept="image/*">
Okay, it looks like it was the caller to the canvas work that was the problem - and indeed the image had not been loaded fully by the time the drawImage (probably) was run.
The original call to doTest() was
function doImageFileInsert(fileinput) {
var contenteditable = document.getElementById('mydiv');
var newImg = document.createElement('img');
//contenteditable.appendChild(newImg);
var img = fileinput.files[0];
let reader = new FileReader();
reader.onload = (e) => {
let base64 = e.target.result;
newImg.src = base64;
doTest(newImg); <-----
};
reader.readAsDataURL(img);
}
but it was the call that was at fault
changing it to
function doImageFileInsert(fileinput) {
var contenteditable = document.getElementById('mydiv');
var newImg = document.createElement('img');
//contenteditable.appendChild(newImg);
var img = fileinput.files[0];
let reader = new FileReader();
reader.onload = (e) => {
let base64 = e.target.result;
newImg.src = base64;
//doTest(newImg);
newImg.onload = (e) => { doTest(newImg); }; <-----
};
reader.readAsDataURL(img);
}
seems to fix it. A working version can be seen in https://jsfiddle.net/Abeeee/rtwcge5h/25/
Here is a more modern approach if you wish to use it
/**
* #param {HTMLInputElement} fileInput
*/
async function doImageFileInsert (fileInput) {
const img = new Image()
img.src = URL.createObjectURL(fileInput.files[0])
await img.decode()
doTest(img)
}
or this that don't work in all browser (would require a polyfill)
/**
* #param {HTMLInputElement} fileInput
*/
function doImageFileInsert (fileInput) {
createImageBitmap(fileInput.files[0]).then(doTest)
}
The FileReader is such a legacy pattern now days when there is new promise based read methods on the blob itself and that you can also use URL.createObjectURL(file) instead of wasting time encoding a file to base64 url (only to be decoded back to binary again by the <img>) it's a waste of time, processing, and RAM.
img.decode()
Blob#instance_methods
createObjectURL
Related
I'm trying to use the merge-images script (https://github.com/lukechilds/merge-images) to merge some images into a single one using nodejs.
I'm having trouble understanding what ecatly I'm supposed to provide in the then() method of mergeImages.
This is what I have so far:
const mergeImages = require('merge-images');
const { createCanvas, Canvas, Image } = require('canvas');
const width = 2880;
const height = 2880;
var canvas = createCanvas(width, height);
const ctx = canvas.getContext('2d');
var img = new Image();
img.onload = () => ctx.drawImage(img, 0, 0);
img.onerror = err => { throw err }
img.src = './result.png';
mergeImages([
'./lunas_parts/1.png', './lunas_parts/2.png', './lunas_parts/3.png',
], {
Canvas: Canvas,
Image: Image
})
.then(b64 => img = b64);
I do have an empty result.png image in the right location, as well as the 1, 2 and 3 .png files. The console isn't showing any error when I execute the above script, but result.png remains empty after the execution.
Is the canvas image source I'm using in then() not correct? What am I supposed to pass there exactly?
Thanks in advance for any help.
I have a web application that allows user to upload tons of images, My goal is to reduce the hosting fees AWS are going to charge me by shrink the image size users uploaded,
Currently i set the limit to 1MB using simple Js, I am not really a frontend developer, So How hard it is to create a javascript solution that automatically shrinks the image size? Is this can be done easily?
If not, then my other two options are Django and AWS UI, the backend is Django, But Django can only process the image once it reaches the backend, So if this happens, Would my aws cost increase?
Finally it's the AWS solution, I don't know how to do it yet, Maybe using something called lambda? Is it completely free? If it keeps my cost down and easy to set up then this is it?
Or would it be wise just to use simple javascript? is this can be easily implemented? like copy and paste code less than 50lines of code then it's automatically solved?
HTML5 Canvas Resize (Downscale) Image High Quality?
ImageMagick on js
JavaScript library similar to Imagemagick
Sorry if this bothers you, I can't use comment yet
Here's a working example based on my comment:
/**
* Resize a base 64 Image
* #param {String} base64 - The base64 string (must include MIME type)
* #param {Number} newWidth - The width of the image in pixels
* #param {Number} newHeight - The height of the image in pixels
*/
const toBase64 = file => new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => resolve(reader.result);
reader.onerror = error => reject(error);
});
async function Main() {
const file = document.querySelector('.file').files[0];
const newFile = await toBase64(file);
document.body.append("Before: ");
let before = document.createElement("img");
before.src = newFile;
document.body.appendChild(before);
resizeBase64Img(newFile, 200, 200).then(resized => {
let img = document.createElement("img");
img.src = resized;
document.body.appendChild(img);
});
}
function resizeBase64Img(base64, newWidth, newHeight) {
return new Promise((resolve, reject) => {
var canvas = document.createElement("canvas");
canvas.style.width = newWidth.toString() + "px";
canvas.style.height = newHeight.toString() + "px";
let context = canvas.getContext("2d");
let img = document.createElement("img");
img.src = base64;
img.onload = function() {
context.scale(newWidth / img.width, newHeight / img.height);
context.drawImage(img, 0, 0);
resolve(canvas.toDataURL());
}
});
}
document.querySelector(".file").addEventListener("input", Main);
<input class="file" type="file">
I want to access and manipulate an image's binary data using javascript. As far as I could research, it can be done using Canvas API - creating a canvas and a context in memory, and then using their built-in methods. What I've come up looks like this:
function fileToBase64(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(<string>reader.result);
reader.onerror = () => reject(reader.error);
reader.readAsDataURL(file);
});
}
function base64ToImageData(base64: string): Promise<ImageData> {
return new Promise((resolve, reject) => {
const img = new Image();
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
resolve(imageData);
}
img.onerror = () => reject();
img.src = base64;
});
}
And what I don't like about this approach is that it is verbose and probably uses more resources than it should.
So the question is: Is there a way to get a binary representation of an image without using canvas API?
you could construct a ImageData manually but for that you need a Uint8ClampedArray. For that you would also have to decode a binary file to pixel data yourself. So you probably want to use canvas or offscreanCanvas for that anyway
but there are some tools to ease the way to get imageData from blob using createImageBitmap
This imageBitmap contains the pixel data but you are not able to read it, but at least it will be faster to paint it to a canvas element.
(async () => {
// just to simulate a file you would get from file input
const res = await fetch('https://httpbin.org/image/png')
const blob = await res.blob()
const file = new File([blob], 'image.png', blob)
async function getImageData (blob) {
const bitmap = await createImageBitmap(blob)
const canvas = document.createElement('canvas')
const ctx = canvas.getContext('2d')
canvas.width = bitmap.width
canvas.height = bitmap.height
ctx.drawImage(bitmap, 0, 0)
return ctx.getImageData(0, 0, canvas.width, canvas.height)
}
getImageData(blob).then(imageData => console.log(imageData.width))
})()
but you might not use createImageBitmap due to compatibility issues, so then i suggest that you instead of creating a image from file input -> base64 -> image with the FileReader that you instead go from file to image using a pointer references with object urls
const img = new Image()
img.src = URL.createObjectURL(file)
this will use less resources cuz you don't need to encode it to base64 and decode it back to binary again, it will use less memory since it will just load the image from your disk instead directly
I am currently working on a webapp which will be used to upload files after a resize. When I am working on localhost:3000, the resize process works like a charm, but when I am working on 192.168.0.5:3000 for example (or another domain), canvas.toDataURL returns an empty string instead of the base64 and the resize never ends.
This issue only occurs when the brave shields are enabled, and the reason behind the blocked element is a cross site device-recognition.
What can I do for my canvas to be able to resize an image I am uploading to the browser and get it's dataURL and Blob, when I am not using localhost ?
The idea is to get an file from an input, resize it to 16MP via a Canva, and get the blob of this canvas to send to the backend.
I have a PictureCompress function which will read the file with new FileReader() in order to get the base64 of this file. After that I am creating a new Image() and set it's src to my reader event.target.result (the b64) and, onload I am supposed to create the Canvas to resize the image via ctx.drawImage, get the new base64 via canvas.toDataURL and then the blob.
This process is perfectly working on my webapp when running it locally, but when I change the url to access my machine IP (I expect it to be the same on a domain name), it is no longer working.
For example, there is a live version of this code on CodeBox and the issue seems to occurs.
EDIT :
This issue only occurs when the url has a port (192.168.0.5:3000 is not working, but 192.168.0.5 is working). Why ?
On firefox, the above fix does not work : Blocked http://192.168.0.13/ from extracting canvas data because no user input was detected.. Is it because it's not https ? To be continued ...
function Uploader(props) {
function PictureCompress(file, callback) {
const fileName = file.name;
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onerror = error => console.warn(error);
reader.onload = event => {
const img = new Image();
img.name = fileName;
img.onerror = error => console.warn(error);
img.onload = e => {
const imageOriginalWidth = e.target.width;
const imageOriginalHeight = e.target.height;
const hvRatio = imageOriginalWidth / imageOriginalHeight;
const vhRatio = imageOriginalHeight / imageOriginalWidth;
const imageHvRatio = 16000000 * hvRatio;
const imageVhRatio = 16000000 * vhRatio;
const newWidth = Math.sqrt(imageHvRatio);
const newHeight = Math.sqrt(imageVhRatio);
const canvas = document.createElement("canvas");
canvas.width = newWidth;
canvas.height = newHeight;
const ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, newWidth, newHeight);
const base64 = canvas.toDataURL(file.type);
console.log(base64, newWidth, newHeight, file.type);
canvas.toBlob(
blob => {
console.log(blob);
callback(blob, base64);
},
file.type,
0.85
);
};
img.src = event.target.result;
};
}
function recursiveUpload(index, files) {
if (index >= files.length) {
return;
}
const file = files[index];
PictureCompress(file, resizedFile => {
resizedFile.name = file.name;
resizedFile.lastModified = file.lastModified;
//MY API CALL, NOT THE ISSUE
});
}
return (
<input type={"file"} onChange={event => recursiveUpload(0, event.target.files)} />
);
}
Thank you for your help !
I've got the proof-of-concept I need for loading in multiple images with FileReader.readAsDataURL().
However, my clients' workflow is such that they load in hundreds of images at a time which crashes the browser.
Is there any way to load in these images in as actual thumbnails (16k vs 16Mb)?
First, don't use a FileReader, at all.
Instead, to display any data from a Blob, use the URL.createObjectURL method.
FileReader will load the binary data thrice in memory (one when reading the Blob for conversion, one as Base64 String and one when passed as
the src of your HTMLElement.)
In case of a file stored on the user-disk, a blobURL will load the data only once in memory, from the HTMLElement. A blobURL is indeed a direct pointer to the Blob's data.
So this will already free a lot of memory for you.
inp.onchange = e => {
for(let file of inp.files) {
let url = URL.createObjectURL(file);
let img = new Image(200);
img.src = url;
document.body.appendChild(img);
}
};
<input type="file" webkitdirectory accepts="image/*" id="inp">
Now, if it is not enough, to generate thumbnails, you could draw all these images on canvases and reduce their size if needed.
But keep in mind that to be able to do so, you'd have to first load the original image's data anyway, and that you cannot be sure how will browser cleanup this used memory. So might do more harm than anything by creating these thumbnail versions.
Anyway, here is a basic example of how it could get implemented:
inp.onchange = e => {
Promise.all([...inp.files].map(toThumbnail))
.then(imgs => document.body.append.apply(document.body, imgs.filter(v => v)))
.catch(console.error);
};
function toThumbnail(file) {
return loadImage(file)
.then(drawToCanvas)
.catch(_ => {});
}
function loadImage(file) {
const url = URL.createObjectURL(file);
const img = new Image();
return new Promise((res, rej) => {
img.onload = e => res(img);
img.onerror = rej;
img.src = url;
});
};
function drawToCanvas(img) {
const w = Math.min(img.naturalWidth, 200);
const r = w / img.naturalWidth;
const h = img.naturalHeight * r;
const canvas = Object.assign(
document.createElement('canvas'), {
width: w,
height: h
}
);
canvas.getContext('2d').drawImage(img, 0, 0, w, h);
return canvas;
}
<input type="file" webkitdirectory accepts="image/*" id="inp">