I am using a Javascript library called face-api.js.
I need to extract the face from the video frame when face-api detects a face. Could anyone help me to do that part?
const video = document.getElementById('video');
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{video: {}},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
console.log('Box: ', detections[0].detection._box);
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
Add extractFaceFromBox function to your code, it can extract a face from video frames with giving bounding box and display result into outputimage.
Try this code and enjoy
// This is your code
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
//Call this function to extract and display face
extractFaceFromBox(video, detections[0].detection.box)
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
let outputImage = document.getElementById('outputImage')
// This function extract a face from video frame with giving bounding box and display result into outputimage
async function extractFaceFromBox(inputImage, box){
const regionsToExtract = [
new faceapi.Rect( box.x, box.y , box.width , box.height)
]
let faceImages = await faceapi.extractFaces(inputImage, regionsToExtract)
if(faceImages.length == 0){
console.log('Face not found')
}
else
{
faceImages.forEach(cnv =>{
outputImage.src = cnv.toDataURL();
})
}
}
This is not specific to face-api.js but you can use canvas to extract an image from a video. Here is a little function I wrote in my case.
const extractFace = async (video,x,y,width, height) => {
const canvas = document.createElement("canvas");
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL("image/jpeg");
const image = new Image();
image.src = dataUrl;
const canvasImg = document.createElement("canvas");
canvasImg.width = width;
canvasImg.height = height;
const ctx = canvasImg.getContext("2d");
image.onload = () => {
// Crop the image
ctx?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvasImg.toBlob((blob) => {
// Do something with the blob. Alternatively, you can convert it to a DataUrl like the video screenshot
// I was using react so I just called my handler
handSavePhoto(blob);
}, "image/jpeg");
};
};
You don't have to take the screenshot first, you can just go ahead and crop it but I found out after testing that cropping from an image gives consistent results. Here is how you will achieve it in that case.
const extractFace = async (video, x, y, width, height) => {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvas.toBlob((blob) => {
handSavePhoto(blob);
}, "image/jpeg");
};
With that out of the way, you can now use face-api data to get the face you want.
// assuming your video element is store in video variable
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions());
const {x, y, width, height} = detections[0].detection.box;
extractFace(video, x, y, width, height);
You can read more about drawImage from here.
Check if detection.length is bigger than 0. It means that it detects something in front of it.
Related
I'm using face-api.js Javascript API to develop a web app that user uploads her/his picture and we want to detect faces in the picture.
On the other hand I used VGGface 16 model json formatted to predict that user uploaded image is similar to which celebrity.
following are my javascript codes:
const MODEL_URL = '../faceapi_models'
Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL),
faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL),
// faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
])
.then((val) => {
console.log('val')
})
.catch((err) => {
console.log(err)
})
let model
async function loadModel() {
model = await tf.loadLayersModel('../web_model/vgg_model.json');
}
loadModel()
.then((val) => {
console.log('Model is Loaded');
})
.catch((err) => {
console.log('Model Not Load : ' + err)
})
let croppedImage = null;
const user_pic = document.getElementById('user_pic')
const preview = document.getElementById('preview')
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
window.onload = function() {
canvas.width = preview.width;
canvas.height = preview.height;
ctx.drawImage(preview, 0, 0);
};
preview.onclick = () => user_pic.click()
user_pic.addEventListener('change', () => {
const reader = new FileReader()
reader.onload = (e) => {
const img = new Image();
img.onload = function() {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
};
img.src = e.target.result;
}
reader.readAsDataURL(user_pic.files[0]);
detectFaces(user_pic.files[0])
})
async function detectFaces(input) {
let imgURL = URL.createObjectURL(input)
const imgElement = new Image()
imgElement.src = imgURL
const results = await faceapi.detectAllFaces(imgElement)
.then(results => {
if (Array.isArray(results) && results.forEach) {
results.forEach(result => {
const { x, y, width, height } = result.box;
const crop = ctx.getImageData(x, y, width, height);
croppedImage = new ImageData(crop.data, width, height);
const input = tf.browser.fromPixels(croppedImage);
const resizedImage = tf.image.resizeBilinear(input, [224, 224]);
const inputTensor = resizedImage.expandDims(0);
const predictions = model.predict(inputTensor).data();
const celebrityIndex = predictions.indexOf(Math.max(...predictions));
console.log(celebrityIndex)
// const celebrityName = celebrityLabels[celebrityIndex];
// Display the results
// const resultDisplay = document.getElementById('result');
// resultDisplay.innerHTML = `Most similar celebrity: ${celebrityName}`;
});
} else {
console.error('Results is not an array or does not have a forEach function.');
}
});
}
I have solve many problem that I have yet but I do not know how to handle this problem and Why did this problem arise?
This is complete erorr :
I found that problem is with x, y, width, height constants that had float values. a simple way is converting those to Int like this :
const {x, y, width, height} = result.box;
const xInt = Math.floor(x);
const yInt = Math.floor(y);
const widthInt = Math.floor(width);
const heightInt = Math.floor(height);
I want to record the multiple layered canvas using MediaRecorder.
But, i don't know how to achieve it...
help me...
this is the my pseudo code
const RECORD_FRAME = 30;
const canvasVideoTrack = canvas.captureStream(RECORD_FRAME).getVideoTracks()[0];
const waterMarkCanvasTrack = waterMarkCanvas.captureStream(RECORD_FRAME).getVideoTracks()[0];
const stream= new MediaStream();
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.stream.addTrack(canvasVideoTrack)
mediaRecorder.stream.addTrack(waterMarkCanvasTrack)
// .... recording
You need to draw all your canvases on a single one.
Even if we could record multiple video streams (which we can't yet), what you need is to composite these video streams. And for this, you use a canvas:
const prepareCanvasAnim = (color, offset) => {
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
ctx.fillStyle = color;
let x = offset;
const anim = () => {
x = (x + 1) % canvas.width;
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.fillRect(x, offset, 50, 50);
requestAnimationFrame(anim);
}
anim();
return canvas;
}
const canvas1 = prepareCanvasAnim("red", 20);
const canvas2 = prepareCanvasAnim("green", 80);
document.querySelector(".container").append(canvas1, canvas2);
const btn = document.querySelector("button");
const record = (evt) => {
btn.textContent = "Stop Recording";
btn.disabled = true;
setTimeout(() => btn.disabled = false, 5000); // at least 5s recording
// prepare our merger canvas
const canvas = canvas1.cloneNode();
const ctx = canvas.getContext("2d");
ctx.fillStyle = "#FFF";
const toMerge = [canvas1, canvas2];
const anim = () => {
ctx.fillRect(0, 0, canvas.width, canvas.height);
toMerge.forEach(layer => ctx.drawImage(layer, 0, 0));
requestAnimationFrame(anim);
};
anim();
const stream = canvas.captureStream();
const chunks = [];
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = (evt) => chunks.push(evt.data);
recorder.onstop = (evt) => exportVid(chunks);
btn.onclick = (evt) => recorder.stop();
recorder.start();
};
function exportVid(chunks) {
const vid = document.createElement("video");
vid.controls = true;
vid.src = URL.createObjectURL(new Blob(chunks));
document.body.append(vid);
btn.onclick = record;
btn.textContent = "Start Recording";
}
btn.onclick = record;
canvas { border: 1px solid }
.container canvas { position: absolute }
.container:hover canvas { position: relative }
.container { height: 180px }
<div class="container">Hover here to "untangle" the canvases<br></div>
<button>Start Recording</button><br>
But if you're going this way anyway, you might just as well do the merging from the get-go and append a single canvas in the document, the browser's compositor will have less work to do.
I have a problem when using html2canvas, i need to download a div so i convert it to canvas, below is my code
const handleDownload = () => {
//custom div need to download
let containerImage = document.createElement('div');
containerImage.style.width = 1080
containerImage.style.height = 1080
for(let i = 0;i < 3;i++){
let tmpDiv = document.createElement('div')
tmpDiv.style.width = 1080
tmpDiv.style.height = 300
tmpDiv.style.background = 'red'
containerImage.appendChild(tmpDiv)
}
document.body.appendChild(containerImage)
downloadImageFromCanvas(containerImage,12,() => console.log('1'))
}
//download image fomr canvas
const downloadImageFromCanvas = (Ref, id,callback) => {
html2canvas(Ref)
.then(async (canvas) => {
const photo = await getImage({ canvas, width: 1360, height: 675 });
const element = document.createElement('a');
const blobUrl = URL.createObjectURL(photo);
element.href = blobUrl;
element.download = `Image ${id}`;
element.click();
callback()
})
.catch((e) => console.log('error', e));
};
//convert canvas to blob
const getImage = async ({
canvas,
width,
height,
mime = 'image/png',
quality = 0.8,
}) => {
return new Promise((resolve) => {
const tmpCanvas = document.createElement('canvas');
tmpCanvas.width = width * 0.99;
tmpCanvas.height = height;
const ctx = tmpCanvas.getContext('2d');
ctx.drawImage(
canvas,
0,
0,
canvas.width,
canvas.height,
0,
0,
width,
height
);
tmpCanvas.toBlob(resolve, mime, quality);
});
};
The issue is i already set the width for the div but i alway get this error
error DOMException: Failed to execute 'drawImage' on 'CanvasRenderingContext2D': The image argument is a canvas element with a width or height of 0
Does anyone have a solution for this problem? Thank you
The method below is being used as a parameter for another variable. But the problem here is that it is not returning the file back after it is being compressed.
async CompressImage(imageFile: File): Promise<File> {
return new Promise<File>((resolve, reject) => {
const cReader = new FileReader();
const image = new Image();
cReader.onload = () => {
image.src = cReader.result.toString();
image.onload = () => {
const canvas = document.createElement("canvas");
const context = canvas.getContext("2d");
context.drawImage(image, 0, 0);
//width & height initialization
canvas.width = width;
canvas.height = height;
var ctx = canvas.getContext("2d");
ctx.drawImage(image, 0, 0, width, height);
const convertedFile = canvas.toBlob((blob) => {
const scaledDown = new File([blob], imageFile.name);
});
resolve(convertedFile);
}
};
cReader.readAsDataURL(mediaFile);
});
}
const image = new CanvasImage(canvas);
canvas.width = 100;
canvas.height = 100;
const context = canvas.getContext('2d');
image.src = 'http://i.imgur.com/c2wRzfD.jpg';
image.addEventListener('load', async () => {
context.drawImage(image, 0, 0, 100, 100);
let dataURL = await canvas.toDataURL("image/png")
});
This code gives me error like:
"Cannot read property 'constructor' of undefined"
This comes only when I drawImage() on canvas it works properly when I draw rectangle, round etc.
Android Studio is 3.0.1
node -v is stabble
The problem is with this cod inside the listener
let dataURL = await canvas.toDataURL("image/png")
It seems to break your constructor definition to the drawImage
This can be solved by using it as follows
handleImageRect = async (canvas) => {
const image = new CanvasImage(canvas);
canvas.width = 100;
canvas.height = 100;
const context = canvas.getContext('2d');
image.src = 'https://image.freepik.com/free-vector/unicorn-background-design_1324-79.jpg';
image.addEventListener('load', () => {
console.log('image is loaded');
context.drawImage(image, 0, 0, 100, 100);
});
// Move it outside
let dataURL = await canvas.toDataURL("image/png")
console.log(dataURL)
}