I have a problem when using html2canvas, i need to download a div so i convert it to canvas, below is my code
const handleDownload = () => {
//custom div need to download
let containerImage = document.createElement('div');
containerImage.style.width = 1080
containerImage.style.height = 1080
for(let i = 0;i < 3;i++){
let tmpDiv = document.createElement('div')
tmpDiv.style.width = 1080
tmpDiv.style.height = 300
tmpDiv.style.background = 'red'
containerImage.appendChild(tmpDiv)
}
document.body.appendChild(containerImage)
downloadImageFromCanvas(containerImage,12,() => console.log('1'))
}
//download image fomr canvas
const downloadImageFromCanvas = (Ref, id,callback) => {
html2canvas(Ref)
.then(async (canvas) => {
const photo = await getImage({ canvas, width: 1360, height: 675 });
const element = document.createElement('a');
const blobUrl = URL.createObjectURL(photo);
element.href = blobUrl;
element.download = `Image ${id}`;
element.click();
callback()
})
.catch((e) => console.log('error', e));
};
//convert canvas to blob
const getImage = async ({
canvas,
width,
height,
mime = 'image/png',
quality = 0.8,
}) => {
return new Promise((resolve) => {
const tmpCanvas = document.createElement('canvas');
tmpCanvas.width = width * 0.99;
tmpCanvas.height = height;
const ctx = tmpCanvas.getContext('2d');
ctx.drawImage(
canvas,
0,
0,
canvas.width,
canvas.height,
0,
0,
width,
height
);
tmpCanvas.toBlob(resolve, mime, quality);
});
};
The issue is i already set the width for the div but i alway get this error
error DOMException: Failed to execute 'drawImage' on 'CanvasRenderingContext2D': The image argument is a canvas element with a width or height of 0
Does anyone have a solution for this problem? Thank you
Related
I'm using face-api.js Javascript API to develop a web app that user uploads her/his picture and we want to detect faces in the picture.
On the other hand I used VGGface 16 model json formatted to predict that user uploaded image is similar to which celebrity.
following are my javascript codes:
const MODEL_URL = '../faceapi_models'
Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL),
faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL),
// faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
])
.then((val) => {
console.log('val')
})
.catch((err) => {
console.log(err)
})
let model
async function loadModel() {
model = await tf.loadLayersModel('../web_model/vgg_model.json');
}
loadModel()
.then((val) => {
console.log('Model is Loaded');
})
.catch((err) => {
console.log('Model Not Load : ' + err)
})
let croppedImage = null;
const user_pic = document.getElementById('user_pic')
const preview = document.getElementById('preview')
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
window.onload = function() {
canvas.width = preview.width;
canvas.height = preview.height;
ctx.drawImage(preview, 0, 0);
};
preview.onclick = () => user_pic.click()
user_pic.addEventListener('change', () => {
const reader = new FileReader()
reader.onload = (e) => {
const img = new Image();
img.onload = function() {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
};
img.src = e.target.result;
}
reader.readAsDataURL(user_pic.files[0]);
detectFaces(user_pic.files[0])
})
async function detectFaces(input) {
let imgURL = URL.createObjectURL(input)
const imgElement = new Image()
imgElement.src = imgURL
const results = await faceapi.detectAllFaces(imgElement)
.then(results => {
if (Array.isArray(results) && results.forEach) {
results.forEach(result => {
const { x, y, width, height } = result.box;
const crop = ctx.getImageData(x, y, width, height);
croppedImage = new ImageData(crop.data, width, height);
const input = tf.browser.fromPixels(croppedImage);
const resizedImage = tf.image.resizeBilinear(input, [224, 224]);
const inputTensor = resizedImage.expandDims(0);
const predictions = model.predict(inputTensor).data();
const celebrityIndex = predictions.indexOf(Math.max(...predictions));
console.log(celebrityIndex)
// const celebrityName = celebrityLabels[celebrityIndex];
// Display the results
// const resultDisplay = document.getElementById('result');
// resultDisplay.innerHTML = `Most similar celebrity: ${celebrityName}`;
});
} else {
console.error('Results is not an array or does not have a forEach function.');
}
});
}
I have solve many problem that I have yet but I do not know how to handle this problem and Why did this problem arise?
This is complete erorr :
I found that problem is with x, y, width, height constants that had float values. a simple way is converting those to Int like this :
const {x, y, width, height} = result.box;
const xInt = Math.floor(x);
const yInt = Math.floor(y);
const widthInt = Math.floor(width);
const heightInt = Math.floor(height);
I'm currently trying to convert a base64Encoded string (representation of an image) into an ImageData object in Javascript. However, this comes back with an error:
Uncaught InvalidStateError: Failed to construct 'ImageData': The input data length is not a multiple of 4.
The encoded image is 940 x 740
What am I missing? Any help appreciated
JSFiddle link with full code
function _base64ToArrayBuffer(base64) {
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
}
const base64String = ""; //truncated because stackoverflow question too long.
const arrBuffer = _base64ToArrayBuffer (base64String );
var array = new Uint8ClampedArray(arrBuffer);
console.log(array);
var image = new ImageData(array, 900, 740);
console.log(image);
You can get the ImageData from a Canvas
function _base64ToImageData(buffer, width, height) {
return new Promise(resolve => {
var image = new Image();
image.addEventListener('load', function (e) {
var canvasElement = document.createElement('canvas');
canvasElement.width = width;
canvasElement.height = height;
var context = canvasElement.getContext('2d');
context.drawImage(e.target, 0, 0, width, height);
resolve(context.getImageData(0, 0, width, height));
});
image.src = 'data:image/png;base64,' + buffer;
document.body.appendChild(image);
setTimeout(() => {
document.body.removeChild(image);
}, 1);
});
}
window.addEventListener('load', () => {
_base64ToImageData(base64String, 2056, 1236).then(data => {
console.log(data);
});
});
I am using a Javascript library called face-api.js.
I need to extract the face from the video frame when face-api detects a face. Could anyone help me to do that part?
const video = document.getElementById('video');
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{video: {}},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
console.log('Box: ', detections[0].detection._box);
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
Add extractFaceFromBox function to your code, it can extract a face from video frames with giving bounding box and display result into outputimage.
Try this code and enjoy
// This is your code
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
//Call this function to extract and display face
extractFaceFromBox(video, detections[0].detection.box)
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
let outputImage = document.getElementById('outputImage')
// This function extract a face from video frame with giving bounding box and display result into outputimage
async function extractFaceFromBox(inputImage, box){
const regionsToExtract = [
new faceapi.Rect( box.x, box.y , box.width , box.height)
]
let faceImages = await faceapi.extractFaces(inputImage, regionsToExtract)
if(faceImages.length == 0){
console.log('Face not found')
}
else
{
faceImages.forEach(cnv =>{
outputImage.src = cnv.toDataURL();
})
}
}
This is not specific to face-api.js but you can use canvas to extract an image from a video. Here is a little function I wrote in my case.
const extractFace = async (video,x,y,width, height) => {
const canvas = document.createElement("canvas");
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL("image/jpeg");
const image = new Image();
image.src = dataUrl;
const canvasImg = document.createElement("canvas");
canvasImg.width = width;
canvasImg.height = height;
const ctx = canvasImg.getContext("2d");
image.onload = () => {
// Crop the image
ctx?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvasImg.toBlob((blob) => {
// Do something with the blob. Alternatively, you can convert it to a DataUrl like the video screenshot
// I was using react so I just called my handler
handSavePhoto(blob);
}, "image/jpeg");
};
};
You don't have to take the screenshot first, you can just go ahead and crop it but I found out after testing that cropping from an image gives consistent results. Here is how you will achieve it in that case.
const extractFace = async (video, x, y, width, height) => {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvas.toBlob((blob) => {
handSavePhoto(blob);
}, "image/jpeg");
};
With that out of the way, you can now use face-api data to get the face you want.
// assuming your video element is store in video variable
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions());
const {x, y, width, height} = detections[0].detection.box;
extractFace(video, x, y, width, height);
You can read more about drawImage from here.
Check if detection.length is bigger than 0. It means that it detects something in front of it.
The method below is being used as a parameter for another variable. But the problem here is that it is not returning the file back after it is being compressed.
async CompressImage(imageFile: File): Promise<File> {
return new Promise<File>((resolve, reject) => {
const cReader = new FileReader();
const image = new Image();
cReader.onload = () => {
image.src = cReader.result.toString();
image.onload = () => {
const canvas = document.createElement("canvas");
const context = canvas.getContext("2d");
context.drawImage(image, 0, 0);
//width & height initialization
canvas.width = width;
canvas.height = height;
var ctx = canvas.getContext("2d");
ctx.drawImage(image, 0, 0, width, height);
const convertedFile = canvas.toBlob((blob) => {
const scaledDown = new File([blob], imageFile.name);
});
resolve(convertedFile);
}
};
cReader.readAsDataURL(mediaFile);
});
}
const image = new CanvasImage(canvas);
canvas.width = 100;
canvas.height = 100;
const context = canvas.getContext('2d');
image.src = 'http://i.imgur.com/c2wRzfD.jpg';
image.addEventListener('load', async () => {
context.drawImage(image, 0, 0, 100, 100);
let dataURL = await canvas.toDataURL("image/png")
});
This code gives me error like:
"Cannot read property 'constructor' of undefined"
This comes only when I drawImage() on canvas it works properly when I draw rectangle, round etc.
Android Studio is 3.0.1
node -v is stabble
The problem is with this cod inside the listener
let dataURL = await canvas.toDataURL("image/png")
It seems to break your constructor definition to the drawImage
This can be solved by using it as follows
handleImageRect = async (canvas) => {
const image = new CanvasImage(canvas);
canvas.width = 100;
canvas.height = 100;
const context = canvas.getContext('2d');
image.src = 'https://image.freepik.com/free-vector/unicorn-background-design_1324-79.jpg';
image.addEventListener('load', () => {
console.log('image is loaded');
context.drawImage(image, 0, 0, 100, 100);
});
// Move it outside
let dataURL = await canvas.toDataURL("image/png")
console.log(dataURL)
}