Try this fiddle in Chrome and in Firefox.
https://jsfiddle.net/lvl5hm/xosan6w9/29/
In Chrome it takes about 0.5-2ms to draw video to canvas, but FF for some reason takes 20-40, which is pretty insane.
Is there something that can help me improve FF performance?
const canvas = document.getElementById('canvas')
canvas.width = 500
canvas.height = 300
const ctx = canvas.getContext('2d')
const video = document.createElement('video')
video.src = 'https://static.beeline.ru/upload/images/business/delo/newmain/main.mp4'
video.muted = true
video.loop = true
video.oncanplay = () => {
video.play()
}
const frameCounterElement = document.getElementById('frameCounter')
let duration = 0
setInterval(() => {
frameCounterElement.innerHTML = duration.toFixed(2) + 'ms to render'
}, 400)
function loop() {
const startTime = performance.now()
ctx.drawImage(video, 0, 0, canvas.width, canvas.height)
duration = performance.now() - startTime
requestAnimationFrame(loop)
}
loop()
Related
I want to record the multiple layered canvas using MediaRecorder.
But, i don't know how to achieve it...
help me...
this is the my pseudo code
const RECORD_FRAME = 30;
const canvasVideoTrack = canvas.captureStream(RECORD_FRAME).getVideoTracks()[0];
const waterMarkCanvasTrack = waterMarkCanvas.captureStream(RECORD_FRAME).getVideoTracks()[0];
const stream= new MediaStream();
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.stream.addTrack(canvasVideoTrack)
mediaRecorder.stream.addTrack(waterMarkCanvasTrack)
// .... recording
You need to draw all your canvases on a single one.
Even if we could record multiple video streams (which we can't yet), what you need is to composite these video streams. And for this, you use a canvas:
const prepareCanvasAnim = (color, offset) => {
const canvas = document.createElement("canvas");
const ctx = canvas.getContext("2d");
ctx.fillStyle = color;
let x = offset;
const anim = () => {
x = (x + 1) % canvas.width;
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.fillRect(x, offset, 50, 50);
requestAnimationFrame(anim);
}
anim();
return canvas;
}
const canvas1 = prepareCanvasAnim("red", 20);
const canvas2 = prepareCanvasAnim("green", 80);
document.querySelector(".container").append(canvas1, canvas2);
const btn = document.querySelector("button");
const record = (evt) => {
btn.textContent = "Stop Recording";
btn.disabled = true;
setTimeout(() => btn.disabled = false, 5000); // at least 5s recording
// prepare our merger canvas
const canvas = canvas1.cloneNode();
const ctx = canvas.getContext("2d");
ctx.fillStyle = "#FFF";
const toMerge = [canvas1, canvas2];
const anim = () => {
ctx.fillRect(0, 0, canvas.width, canvas.height);
toMerge.forEach(layer => ctx.drawImage(layer, 0, 0));
requestAnimationFrame(anim);
};
anim();
const stream = canvas.captureStream();
const chunks = [];
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = (evt) => chunks.push(evt.data);
recorder.onstop = (evt) => exportVid(chunks);
btn.onclick = (evt) => recorder.stop();
recorder.start();
};
function exportVid(chunks) {
const vid = document.createElement("video");
vid.controls = true;
vid.src = URL.createObjectURL(new Blob(chunks));
document.body.append(vid);
btn.onclick = record;
btn.textContent = "Start Recording";
}
btn.onclick = record;
canvas { border: 1px solid }
.container canvas { position: absolute }
.container:hover canvas { position: relative }
.container { height: 180px }
<div class="container">Hover here to "untangle" the canvases<br></div>
<button>Start Recording</button><br>
But if you're going this way anyway, you might just as well do the merging from the get-go and append a single canvas in the document, the browser's compositor will have less work to do.
I'm creating a program to interact with music, and I was trying to synchronize the drum beats or the bass sounds with CSS, so in every high frequency sound, the background would change just in time.
My first idea was create an spectrum of the audio frequency, to understand how does it interacts with the sound played. In this case, I'm using Web Audio API analyserNode.fftSize to get all the points of the frequency in a audio file and create the spectrum with <canvas>
const audioCtx = new AudioContext();
//Create audio source
//Here, we use an audio file, but this could also be e.g. microphone input
const audioEle = new Audio();
audioEle.src = "good-day.mp3"; //insert file name here
audioEle.autoplay = true;
audioEle.preload = "auto";
const audioSourceNode = audioCtx.createMediaElementSource(audioEle);
//Create analyser node
const analyserNode = audioCtx.createAnalyser();
analyserNode.fftSize = 256;
const bufferLength = analyserNode.frequencyBinCount;
const dataArray = new Float32Array(bufferLength); // this is where all the frequencies are stored
Then, using this dataArray with all the frequencies of that moment, I run a condition that says: if that frequency is above X, change background to blue:
if (Math.max(...dataArray) + 100 > 70) { // +- 70 is a good number for that specific song
await changeBgColor();
}
The final results seems to work, but isn't perfect, since that if condition runs 60 times per second and sometimes doesn't catch the bass playing because of a voice or another sound in the audio file that mess with the frequency of the bass.
I don't know if there is something that really can be useful for this. I tried to use ToneJS and wadJS but I couldn't get anywhere with these libs.
I wonder what could be the best way to make these CSS iteration with sounds.
The song used for test this code was: Ice Cube - It Was A Good Day (good-day.mp3)
Full code:
index.html
<!DOCTYPE html>
<body id="bd">
<script>
const audioCtx = new AudioContext();
//Create audio source
//Here, we use an audio file, but this could also be e.g. microphone input
const audioEle = new Audio();
audioEle.src = "good-day.mp3"; //insert file name here
audioEle.autoplay = true;
audioEle.preload = "auto";
const audioSourceNode = audioCtx.createMediaElementSource(audioEle);
//Create analyser node
const analyserNode = audioCtx.createAnalyser();
analyserNode.fftSize = 256;
const bufferLength = analyserNode.frequencyBinCount;
const dataArray = new Float32Array(bufferLength);
//Set up audio node network
audioSourceNode.connect(analyserNode);
analyserNode.connect(audioCtx.destination);
//Create 2D canvas
const canvas = document.createElement("canvas");
canvas.style.position = "absolute";
canvas.style.top = 0;
canvas.style.left = 0;
canvas.width = window.innerWidth;
canvas.height = 300;
document.body.appendChild(canvas);
const canvasCtx = canvas.getContext("2d");
canvasCtx.clearRect(0, 0, canvas.width, canvas.height);
async function draw() {
const changeBgColor = () => {
document.getElementById("bd").style["background-color"] = "blue";
};
const sleep = () => {
// setTimeout(() => null, 1000)
console.log("bass sound");
};
//Schedule next redraw
requestAnimationFrame(draw);
//Get spectrum data
analyserNode.getFloatFrequencyData(dataArray);
//Draw black background
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
//Draw spectrum
const barWidth = (canvas.width / bufferLength) * 2.5;
let posX = 0;
document.getElementById("bd").style["background-color"] = "red";
if (Math.max(...dataArray) + 100 > 70) { // +- 70 is a good number for that specific song
await changeBgColor();
}
for (let i = 0; i < bufferLength; i++) {
const barHeight = (dataArray[i] + 100) * 2;
// console.log(barHeight)
canvasCtx.fillStyle = `rgb(${Math.floor(
barHeight + 100
)}, 255, ${Math.floor(barHeight + 200)})`;
canvasCtx.fillRect(
posX,
canvas.height - barHeight / 2,
barWidth,
barHeight / 2
);
posX += barWidth + 1;
}
}
draw();
</script>
</body>
My reference to this code was a doc in MDN about AnalyserNode
Here is quick demo on how to retrieve the music frequencies into an array and pass it to the CSS.
It's very basic, there is many problems, but maybe a good start for further proper animations.
const audio = document.getElementById('music');
audio.load();
audio.play();
const ctx = new AudioContext();
const audioSrc = ctx.createMediaElementSource(audio);
const analyser = ctx.createAnalyser();
audioSrc.connect(analyser);
analyser.connect(ctx.destination);
analyser.fftSize = 256;
const bufferLength = analyser.frequencyBinCount;
const frequencyData = new Uint8Array(bufferLength);
setInterval(async () => {
analyser.getByteFrequencyData(frequencyData);
let dataArray = Object.values(frequencyData);
//if (Math.max(...dataArray) + 100 > 70) { // +- 70 is a good number for that specific song
window.requestAnimationFrame(
await changeBgColor(dataArray)
)
//}
}, 100);
const changeBgColor = (dataArray) => {
console.log(dataArray)
let color1 = dataArray.slice(0,3).map((a) => a / 100) // Smaller numbers are darker
let color2 = dataArray.slice(3,6)
let color3 = dataArray.slice(6,9)
document.body.style.background = `linear-gradient(90deg, rgba(${color1}, 1) 0%, rgba(${color2},0.2) 35%, rgba(${color3},1) 100%)`
}
<audio id="music" src="https://cdn.glitch.com/02dcea11-9bd2-4462-ac38-eeb6a5ad9530%2F331_full_beautiful-minds_0171_preview.mp3?1522829295082" crossorigin="use-URL-credentials" controls="true"></audio>
<hr>
<span id="console"></span>
I am using a Javascript library called face-api.js.
I need to extract the face from the video frame when face-api detects a face. Could anyone help me to do that part?
const video = document.getElementById('video');
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{video: {}},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
console.log('Box: ', detections[0].detection._box);
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
Add extractFaceFromBox function to your code, it can extract a face from video frames with giving bounding box and display result into outputimage.
Try this code and enjoy
// This is your code
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = {width: video.width, height: video.height};
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
//Call this function to extract and display face
extractFaceFromBox(video, detections[0].detection.box)
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
}, 5000)
})
let outputImage = document.getElementById('outputImage')
// This function extract a face from video frame with giving bounding box and display result into outputimage
async function extractFaceFromBox(inputImage, box){
const regionsToExtract = [
new faceapi.Rect( box.x, box.y , box.width , box.height)
]
let faceImages = await faceapi.extractFaces(inputImage, regionsToExtract)
if(faceImages.length == 0){
console.log('Face not found')
}
else
{
faceImages.forEach(cnv =>{
outputImage.src = cnv.toDataURL();
})
}
}
This is not specific to face-api.js but you can use canvas to extract an image from a video. Here is a little function I wrote in my case.
const extractFace = async (video,x,y,width, height) => {
const canvas = document.createElement("canvas");
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL("image/jpeg");
const image = new Image();
image.src = dataUrl;
const canvasImg = document.createElement("canvas");
canvasImg.width = width;
canvasImg.height = height;
const ctx = canvasImg.getContext("2d");
image.onload = () => {
// Crop the image
ctx?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvasImg.toBlob((blob) => {
// Do something with the blob. Alternatively, you can convert it to a DataUrl like the video screenshot
// I was using react so I just called my handler
handSavePhoto(blob);
}, "image/jpeg");
};
};
You don't have to take the screenshot first, you can just go ahead and crop it but I found out after testing that cropping from an image gives consistent results. Here is how you will achieve it in that case.
const extractFace = async (video, x, y, width, height) => {
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
const context = canvas.getContext("2d");
// Get a screenshot from the video
context?.drawImage(image, x, y, width, height, 0, 0, width, height);
canvas.toBlob((blob) => {
handSavePhoto(blob);
}, "image/jpeg");
};
With that out of the way, you can now use face-api data to get the face you want.
// assuming your video element is store in video variable
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions());
const {x, y, width, height} = detections[0].detection.box;
extractFace(video, x, y, width, height);
You can read more about drawImage from here.
Check if detection.length is bigger than 0. It means that it detects something in front of it.
I'am trying to display a video inside a canvas and to loop the video. The issue sometimes happen when looping the video. Before playing the video again the video flicker for one frame. It's doesn't happen all the time and I don't get what's going on.
Here is the code
let canvas = document.querySelector("canvas");
let ctx = canvas.getContext("2d");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let src = "https://i.imgur.com/5ZiAeSX.mp4";
let video = document.createElement("video");
video.src = src;
video.muted = true;
video.play();
video.onended = () => {
video.play();
};
function render() {
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0);
requestAnimationFrame(render);
}
render();
You can also try the fiddle here
This is because looping a MediaElement is never a seamless operation.
This is particularly audible when looping audio media.
Normally, when played in a <video> we don't see it because the browser just pauses the video rendering for this short lapse of time, and our brain simply ignores the few frames that are paused and concentrate rather on all the ones that do move.
However, in your case it becomes very visible because you do clear the canvas anyway, but there is no video frame to be drawn on top of it. So it causes a big white flash.
A simple fix is to check whether the currentTime is 0 and to not redraw during this time:
let canvas = document.querySelector("canvas");
let ctx = canvas.getContext("2d");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let src = "https://i.imgur.com/5ZiAeSX.mp4";
let video = document.createElement("video");
video.src = src;
video.muted = true;
video.play();
let missed_frames = 0; // only for demo
video.onended = () => {
video.play();
// only for demo
setTimeout(() => {
_log.textContent = 'missed ' + missed_frames +' frames while looping';
missed_frames = 0;
}, 200);
};
function render() {
if(video.currentTime) {
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
}
// only for demo
else {
missed_frames++;
}
requestAnimationFrame(render);
}
render();
#_log { color: white; position: absolute; }
<pre id="_log"></pre>
<canvas></canvas>
An harder fix if you really need to loop seamlessly would be to use a MediaSource object, but if not really needed, that's a bit cumbersome to set in place.
You can set viedo.loop to true and avoid the part when you ask your video to go on playing.
I've did the trick this way :
let canvas = document.querySelector("canvas");
let ctx = canvas.getContext("2d");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
let src = "https://i.imgur.com/5ZiAeSX.mp4";
let video = document.createElement("video");
video.src = src;
video.muted = true;
video.videoHeight = '500px'; // returns the intrinsic height of the video
video.videoWidth = '100px'; // returns the intrinsic width of the video
video.play();
video.loop = true
/* video.onended = () => {
video.play();
}; */
function render() {
ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.drawImage(video, 0, 0);
requestAnimationFrame(render);
}
render();
this is why your video flicker, it doesn't loop, it just replay again and again.
give it a try ;)
I'm trying to build a function that extracts frames from a video in JavaScript. Here the code I came up with. The function receives a source and a callback. From there, I create a video with the source and I want to draw frames of the video in the canvas with a set interval.
Unfortunately, the frames returned are all transparent images.
I tried a few different things, but I can't make it work. Can someone help?
Thanks.
const extractFramesFromVideo = function(src, callback) {
var video = document.createElement('video');
video.src = src;
video.addEventListener('loadeddata', function() {
var canvas = document.createElement('canvas');
var context = canvas.getContext('2d');
canvas.setAttribute('width', video.videoWidth);
canvas.setAttribute('height', video.videoHeight);
var frames = [];
var fps = 1; // Frames per seconds to
var interval = 1 / fps; // Frame interval
var maxDuration = 10; // 10 seconds max duration
var currentTime = 0; // Start at 0
while (currentTime < maxDuration) {
video.currentTime = currentTime;
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
var base64ImageData = canvas.toDataURL();
frames.push(base64ImageData);
currentTime += interval;
if (currentTime >= maxDuration) {
console.log(frames);
callback(frames);
}
}
});
}
export default extractFramesFromVideo;
After tweaking your code to wait for the seeked event, and fixing a few bits and pieces, it seems to work fine:
async function extractFramesFromVideo(videoUrl, fps=25) {
return new Promise(async (resolve) => {
// fully download it first (no buffering):
let videoBlob = await fetch(videoUrl).then(r => r.blob());
let videoObjectUrl = URL.createObjectURL(videoBlob);
let video = document.createElement("video");
let seekResolve;
video.addEventListener('seeked', async function() {
if(seekResolve) seekResolve();
});
video.addEventListener('loadeddata', async function() {
let canvas = document.createElement('canvas');
let context = canvas.getContext('2d');
let [w, h] = [video.videoWidth, video.videoHeight]
canvas.width = w;
canvas.height = h;
let frames = [];
let interval = 1 / fps;
let currentTime = 0;
let duration = video.duration;
while(currentTime < duration) {
video.currentTime = currentTime;
await new Promise(r => seekResolve=r);
context.drawImage(video, 0, 0, w, h);
let base64ImageData = canvas.toDataURL();
frames.push(base64ImageData);
currentTime += interval;
}
resolve(frames);
});
// set video src *after* listening to events in case it loads so fast
// that the events occur before we were listening.
video.src = videoObjectUrl;
});
}
Usage:
let frames = await extractFramesFromVideo("https://example.com/video.webm");
currentComponent.refs.videopreview is <video ref="videopreview" autoPlay></video> in HTML page
Below is the code to extract the frame from the video.
const getFrame = () => {
const video = currentComponent.refs.videopreview;
if(!video.srcObject) {
return null;
}
const canvas = document.createElement('canvas');
canvas.width = canvasWidth;
canvas.height = canvasHeight;
canvas.getContext('2d').drawImage(video, 0,0);
const data = canvas.toDataURL('image/jpeg');
return data; // frame data
}
getFrame function can be called at required interval.