I'm building a cross-platform web app where audio is generated on-the-fly on the server and live streamed to a browser client, probably via the HTML5 audio element. On the browser, I'll have Javascript-driven animations that must precisely sync with the played audio. "Precise" means that the audio and animation must be within a second of each other, and hopefully within 250ms (think lip-syncing). For various reasons, I can't do the audio and animation on the server and live-stream the resulting video.
Ideally, there would be little or no latency between the audio generation on the server and the audio playback on the browser, but my understanding is that latency will be difficult to control and probably in the 3-7 second range (browser-, environment-, network- and phase-of-the-moon-dependent). I can handle that, though, if I can precisely measure the actual latency on-the-fly so that my browser Javascript knows when to present the proper animated frame.
So, I need to precisely measure the latency between my handing audio to the streaming server (Icecast?), and the audio coming out of the speakers on the computer hosting the speaker. Some blue-sky possibilities:
Add metadata to the audio stream, and parse it from the playing audio (I understand this isn't possible using the standard audio element)
Add brief periods of pure silence to the audio, and then detect them on the browser (can audio elements yield the actual audio samples?)
Query the server and the browser as to the various buffer depths
Decode the streamed audio in Javascript and then grab the metadata
Any thoughts as to how I could do this?
Utilize timeupdate event of <audio> element, which is fired three to four times per second, to perform precise animations during streaming of media by checking .currentTime of <audio> element. Where animations or transitions can be started or stopped up to several times per second.
If available at browser, you can use fetch() to request audio resource, at .then() return response.body.getReader() which returns a ReadableStream of the resource; create a new MediaSource object, set <audio> or new Audio() .src to objectURL of the MediaSource; append first stream chunks at .read() chained .then() to sourceBuffer of MediaSource with .mode set to "sequence"; append remainder of chunks to sourceBuffer at sourceBuffer updateend events.
If fetch() response.body.getReader() is not available at browser, you can still use timeupdate or progress event of <audio> element to check .currentTime, start or stop animations or transitions at required second of streaming media playback.
Use canplay event of <audio> element to play media when stream has accumulated adequate buffers at MediaSource to proceed with playback.
You can use an object with properties set to numbers corresponding to .currentTime of <audio> where animation should occur, and values set to css property of element which should be animated to perform precise animations.
At javascript below, animations occur at every twenty second period, beginning at 0, and at every sixty seconds until the media playback has concluded.
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title></title>
<style>
body {
width: 90vw;
height: 90vh;
background: #000;
transition: background 1s;
}
span {
font-family: Georgia;
font-size: 36px;
opacity: 0;
}
</style>
</head>
<body>
<audio controls></audio>
<br>
<span></span>
<script type="text/javascript">
window.onload = function() {
var url = "/path/to/audio";
// given 240 seconds total duration of audio
// 240/12 = 20
// properties correspond to `<audio>` `.currentTime`,
// values correspond to color to set at element
var colors = {
0: "red",
20: "blue",
40: "green",
60: "yellow",
80: "orange",
100: "purple",
120: "violet",
140: "brown",
160: "tan",
180: "gold",
200: "sienna",
220: "skyblue"
};
var body = document.querySelector("body");
var mediaSource = new MediaSource;
var audio = document.querySelector("audio");
var span = document.querySelector("span");
var color = window.getComputedStyle(body)
.getPropertyValue("background-color");
//console.log(mediaSource.readyState); // closed
var mimecodec = "audio/mpeg";
audio.oncanplay = function() {
this.play();
}
audio.ontimeupdate = function() {
// 240/12 = 20
var curr = Math.round(this.currentTime);
if (colors.hasOwnProperty(curr)) {
// set `color` to `colors[curr]`
color = colors[curr]
}
// animate `<span>` every 60 seconds
if (curr % 60 === 0 && span.innerHTML === "") {
var t = curr / 60;
span.innerHTML = t + " minute" + (t === 1 ? "" : "s")
+ " of " + Math.round(this.duration) / 60
+ " minutes of audio";
span.animate([{
opacity: 0
}, {
opacity: 1
}, {
opacity: 0
}], {
duration: 2500,
iterations: 1
})
.onfinish = function() {
span.innerHTML = ""
}
}
// change `background-color` of `body` every 20 seconds
body.style.backgroundColor = color;
console.log("current time:", curr
, "current background color:", color
, "duration:", this.duration);
}
// set `<audio>` `.src` to `mediaSource`
audio.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
function sourceOpen(event) {
// if the media type is supported by `mediaSource`
// fetch resource, begin stream read,
// append stream to `sourceBuffer`
if (MediaSource.isTypeSupported(mimecodec)) {
var sourceBuffer = mediaSource.addSourceBuffer(mimecodec);
// set `sourceBuffer` `.mode` to `"sequence"`
sourceBuffer.mode = "sequence";
fetch(url)
// return `ReadableStream` of `response`
.then(response => response.body.getReader())
.then(reader => {
var processStream = (data) => {
if (data.done) {
return;
}
// append chunk of stream to `sourceBuffer`
sourceBuffer.appendBuffer(data.value);
}
// at `sourceBuffer` `updateend` call `reader.read()`,
// to read next chunk of stream, append chunk to
// `sourceBuffer`
sourceBuffer.addEventListener("updateend", function() {
reader.read().then(processStream);
});
// start processing stream
reader.read().then(processStream);
// do stuff `reader` is closed,
// read of stream is complete
return reader.closed.then(() => {
// signal end of stream to `mediaSource`
mediaSource.endOfStream();
return mediaSource.readyState;
})
})
// do stuff when `reader.closed`, `mediaSource` stream ended
.then(msg => console.log(msg))
}
// if `mimecodec` is not supported by `MediaSource`
else {
alert(mimecodec + " not supported");
}
};
}
</script>
</body>
</html>
plnkr http://plnkr.co/edit/fIm1Qp?p=preview
There no way for you to measure latency directly, but any AudioElement generate events like 'playing' if it just played (fired quite often), or 'stalled' if stoped streaming, or 'waiting' if data is loading. So what you can do, is to manipulate your video based on this events.
So play while stalled or waiting is fired, then continue playing video if playing fired again.
But I advice you check other events that might affect your flow (error for example would be important for you).
https://developer.mozilla.org/en-US/docs/Web/API/HTMLAudioElement
What i would try is first create a timestamp with performance.now, process the data, and record it in a blob with the new web recorder api.
The web recorder will ask user access to his audio card, this can be a problem for your app, but it look like mandatory to get the real latency.
As soon this done, there is many way to measure the actual latency between the generation and the actual rendering. Basically, a sound event.
For further reference and example:
Recorder demo
https://github.com/mdn/web-dictaphone/
https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder_API/Using_the_MediaRecorder_API
Related
requestPictureInPicture is so amazing, but it looks like it only works with 1 video.
How can I get requestPictureInPicture to play multiple videos, so I can watch two videos at the same time?
Basically this only displays one video:
video
.requestPictureInPicture()
.catch(error => {
console.log(error) // Error handling
});
video2
.requestPictureInPicture()
.catch(error => {
console.log(error) // Error handling
});
https://codepen.io/zecheesy/pen/YzwBJMR
Thoughts: Maybe we could put two videos in a canvas? And have the pictureInPicture play both videos at the same time? https://googlechrome.github.io/samples/picture-in-picture/audio-playlist
I'm not sure if this is possible. Would love your help so much!
Regarding opening two PictureInPitcure windows simultaneously, the specs have a paragraph just for it, where they explain they actually leave it as an implementation detail:
Operating systems with a Picture-in-Picture API usually restrict Picture-in-Picture mode to only one window. Whether only one window is allowed in Picture-in-Picture mode will be left to the implementation and the platform. However, because of the one Picture-in-Picture window limitation, the specification assumes that a given Document can only have one Picture-in-Picture window.
What happens when there is a Picture-in-Picture request while a window is already in Picture-in-Picture will be left as an implementation detail: the current Picture-in-Picture window could be closed, the Picture-in-Picture request could be rejected or even two Picture-in-Picture windows could be created. Regardless, the User Agent will have to fire the appropriate events in order to notify the website of the Picture-in-Picture status changes.
So the best we can say it that you should not expect it to open two windows simultaneously.
Now, if you really wish, you can indeed draw both videos on a canvas and pass this canvas to a PiP window, after piping its captureStream() to a third <video>, though this require that both videos are served with the proper Access-Control-Allow-Origin headers, and moreover, it requires your browser to actually support the PiP API (current Firefox has a PiP feature which is not the PiP API).
Here is a proof of concept:
const vids = document.querySelectorAll( "video" );
const btn = document.querySelector( "button" );
// wait for both video has their metadata
Promise.all( [ ...vids ].map( (vid) => {
return new Promise( (res) => vid.onloadedmetadata = () => res() );
} ) )
.then( () => {
if( !HTMLVideoElement.prototype.requestPictureInPicture ) {
return console.error( "Your browser doesn't support the PiP API" );
}
btn.onclick = async (evt) => {
const canvas = document.createElement( "canvas" );
// both videos share the same 16/9 ratio
// so in this case it's really easy to draw both on the same canvas
// to make it dynamic would require more maths
// but I'll let it to the readers
const height = 720;
const width = 1280;
canvas.height = height * 2; // vertical disposition
canvas.width = width;
const ctx = canvas.getContext( "2d" );
const video = document.createElement( "video" );
video.srcObject = canvas.captureStream();
let began = false; // rPiP needs video's metadata
anim();
await video.play();
began = true;
video.requestPictureInPicture();
function anim() {
ctx.drawImage( vids[ 0 ], 0, 0, width, height );
ctx.drawImage( vids[ 1 ], 0, height, width, height );
// iff we are still in PiP mode
if( !began || document.pictureInPictureElement === video ) {
requestAnimationFrame( anim );
}
else {
// kill the stream
video.srcObject.getTracks().forEach( track => track.stop() );
}
}
}
} );
video { width: 300px }
<button>enter Picture in Picture</button><br>
<video crossorigin muted controls autoplay loop
src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm"></video>
<video crossorigin muted controls autoplay loop
src="https://upload.wikimedia.org/wikipedia/commons/a/a4/BBH_gravitational_lensing_of_gw150914.webm"></video>
And beware, since I did mute the videos fo SO, scrolling in a way the original videos are out of sight will pause them.
I have built a visitor management system and have recently swapped to a surface device as the driver. The html5 webcam stream is showing as blurry / out of focus on the front facing camera. If I swap to the rear camera however it is fine. And if i use the front facing camera on another public site that uses another webcam feature, it works absolutely fine.
Here is a capture of the camera element, it looks like some form of deliberate blurring as appose to the camera just being bad...
https://ibb.co/Dzf67nC/
I have tried scanning through the code and cannot find anything that scales the camera stream at all that may cause bluring or focus changing
Below is my photo.js file that provides the stream to my visitor sign in page and also handles the capturing of screenshots.
// References to all the element we will need.
var video = document.querySelector('#camera-stream'),
image = document.querySelector('#snap'),
my_photo = document.querySelector('#my-photo'),
container = document.querySelector('.camera-container'),
//start_camera = document.querySelector('#start-camera'),
controls = document.querySelector('.controls'),
take_photo_btn = document.querySelector('#take-photo'),
delete_photo_btn = document.querySelector('#delete-photo'),
download_photo_btn = document.querySelector('#download-photo'),
imgeurl = document.querySelector('#imagesource'),
open_camera = document.querySelector('#open-camera'),
error_message = document.querySelector('#error-message');
// The getUserMedia interface is used for handling camera input.
// Some browsers need a prefix so here we're covering all the options
navigator.getMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// Mobile browsers cannot play video without user input,
// so here we're using a button to start it manually.
open_camera.addEventListener("click", function(e){
if($('#my-photo').attr('src') == '') {
e.preventDefault();
container.classList.add("visible");
// Start video playback manually.
//video.play();
//showVideo();
if(!navigator.getMedia){
displayErrorMessage("Your browser doesn't have support for the navigator.getUserMedia interface.");
}
else{
// Request the camera.
navigator.getMedia(
{
video: true
},
// Success Callback
function(stream){
// Create an object URL for the video stream and
// set it as src of our HTLM video element.
video.srcObject=stream;
// Play the video element to start the stream.
video.play();
video.onplay = function() {
showVideo();
};
},
// Error Callback
function(err){
displayErrorMessage("There was an error with accessing the camera stream: " + err.name, err);
}
);
}
}
});
open_camera.click();
take_photo_btn.addEventListener("click", function(e){
e.preventDefault();
var count=4;
var counter=setInterval(timer, 500); //1000 will run it every 1 second
$('.countdown-container').addClass('visible');
function timer(){
count=count-1;
if (count <= 0)
{
$('.countdown-number').html('<i class="far fa-smile"></i>');
clearInterval(counter);
//counter ended, do something here
return;
}
$('.countdown-number').text(count);
//Do code for showing the number of seconds here
}
setTimeout(function(){
$('.countdown-container').removeClass('visible');
$('.countdown-number').text('Get Ready');
},2500);
setTimeout(function(){
video.pause(snap);
var snap = takeSnapshot();
// Show image.
image.setAttribute('src', snap);
imgeurl.value = snap;
image.classList.add("visible");
//tumbnail image
my_photo.setAttribute('src', snap);
my_photo.value = snap;
// Enable delete and save buttons
delete_photo_btn.classList.remove("disabled");
download_photo_btn.classList.remove("disabled");
take_photo_btn.classList.add("disabled");
},3000);
// Set the href attribute of the download button to the snap url.
// Pause video playback of stream.
});
delete_photo_btn.addEventListener("click", function(e){
e.preventDefault();
// Hide image.
image.setAttribute('src', "");
image.classList.remove("visible");
my_photo.setAttribute('src', "");
// Disable delete and save buttons
delete_photo_btn.classList.add("disabled");
download_photo_btn.classList.add("disabled");
take_photo_btn.classList.remove("disabled");
// Resume playback of stream.
video.play();
});
function showVideo(){
// Display the video stream and the controls.
//hideUI();
video.classList.add("visible");
controls.classList.add("visible");
}
function takeSnapshot(){
// Here we're using a trick that involves a hidden canvas element.
var hidden_canvas = document.querySelector('canvas'),
context = hidden_canvas.getContext('2d');
var width = video.videoWidth,
height = video.videoHeight;
if (width && height) {
// Setup a canvas with the same dimensions as the video.
hidden_canvas.width = width;
hidden_canvas.height = height;
// Make a copy of the current frame in the video on the canvas.
context.drawImage(video, 0, 0, width, height);
// Turn the canvas image into a dataURL that can be used as a src for our photo.
return hidden_canvas.toDataURL('image/png');
}
}
download_photo_btn.addEventListener("click", function(e){
e.preventDefault();
container.classList.remove("visible");
my_photo.classList.add("visible");
});
function displayErrorMessage(error_msg, error){
error = error || "";
if(error){
console.log(error);
}
error_message.innerText = error_msg;
hideUI();
error_message.classList.add("visible");
}
function hideUI(){
// Helper function for clearing the app UI.
controls.classList.remove("visible");
//start_camera.classList.remove("visible");
video.classList.remove("visible");
snap.classList.remove("visible");
error_message.classList.remove("visible");
}
The camera should be a lot crisper than it is. No errors or anything in the console.
Hi I would like to start playing a sound on a scecific position. The api said that I can use .pos but it doesn't start where I would like it to start
<p>This sound last 13 sec.</p>
<h1>Audio</h1>
<h3>2:00</h3>
<h3>3:00</h3>
<h3>4:00</h3>
<h3>10:00</h3>
My javascript.
var son = [false, "0000"]
sound = new Howl({
urls: ['http://goldfirestudios.com/proj/howlerjs/sound.mp3'],
autoplay: false
});
function playSequence(events,valeur){
//if there is no sound play the track at a certain position
var playSound = function(valeur){
if(son[0] == true){
sound.stop();
son[0] = false;
}else{
son[0] = true;
sound.pos = parseInt(valeur[0]); // position at 2 sec
sound.play();
}
}
playSound(valeur);
}
//play the sound on click
$("a.val2").on('click', function(events){
valeur = $(this).text();
playSequence(events, valeur);
});
pos is a method, not a property. For best compatibility, you would want to play it like follows (fixed in the 2.0 beta branch):
sound.play(function(id){
sound.pos(valeur[0], id);
});
Here's how you would do it in 2.0 (check 2.0 branch at https://github.com/goldfire/howler.js):
var id = sound.play();
sound.seek(valeur[0], id);
If you are only playing one sound then there is no need to set the id, but it is best practice to change the position after playing when using 1.1.x.
I was having issues playing a sound at a specific time as well, it worked on the first play of a sound, but if I tried to go back and play that sound again, using seek immediately wouldn't work.
I just figured out a way to consistently make it work using howler v2:
function playSoundAtPosition(sound, pos) {
sound.once("play", () => {
sound.seek(pos);
});
sound.play();
}
This makes use of the callback method once which fires the callback to the play event and then immediately removes itself. I believe this helps because it waits until it actually knows the sound is playing before seeking to the desired time.
I know this is an old question but hopefully it's useful to someone if they come across this!
You can use Sprites!
var sound = new Howl({
src: ['sounds.webm', 'sounds.mp3'],
sprite: {
blast: [0, 3000],
laser: [4000, 1000],
winner: [6000, 5000]
}
});
// Shoot the laser!
sound.play('laser');
:)
I found it at this link How to Create and use Sprites
I am attempting to create a thumbnail preview from a video file (mp4,3gp) from a form input type='file'. Many have said that this can be done server side only. I find this hard to believe since I just recently came across this Fiddle using HTML5 Canvas and Javascript.
Thumbnail Fiddle
The only problem is this requires the video to be present and the user to click play before they click a button to capture the thumbnail. I am wondering if there is a way to get the same results without the player being present and user clicking the button. For example: User click on file upload and selects video file and then thumbnail is generated. Any help/thoughts are welcome!
Canvas.drawImage must be based on html content.
source
here is a simplier jsfiddle
//and code
function capture(){
var canvas = document.getElementById('canvas');
var video = document.getElementById('video');
canvas.getContext('2d').drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
}
The advantage of this solution is that you can select the thumbnail you want based on the time of the video.
Recently needed this so I wrote a function, to take in a video file and a desired timestamp, and return an image blob at that time of the video.
Sample Usage:
try {
// get the frame at 1.5 seconds of the video file
const cover = await getVideoCover(file, 1.5);
// print out the result image blob
console.log(cover);
} catch (ex) {
console.log("ERROR: ", ex);
}
Function:
function getVideoCover(file, seekTo = 0.0) {
console.log("getting video cover for file: ", file);
return new Promise((resolve, reject) => {
// load the file to a video player
const videoPlayer = document.createElement('video');
videoPlayer.setAttribute('src', URL.createObjectURL(file));
videoPlayer.load();
videoPlayer.addEventListener('error', (ex) => {
reject("error when loading video file", ex);
});
// load metadata of the video to get video duration and dimensions
videoPlayer.addEventListener('loadedmetadata', () => {
// seek to user defined timestamp (in seconds) if possible
if (videoPlayer.duration < seekTo) {
reject("video is too short.");
return;
}
// delay seeking or else 'seeked' event won't fire on Safari
setTimeout(() => {
videoPlayer.currentTime = seekTo;
}, 200);
// extract video thumbnail once seeking is complete
videoPlayer.addEventListener('seeked', () => {
console.log('video is now paused at %ss.', seekTo);
// define a canvas to have the same dimension as the video
const canvas = document.createElement("canvas");
canvas.width = videoPlayer.videoWidth;
canvas.height = videoPlayer.videoHeight;
// draw the video frame to canvas
const ctx = canvas.getContext("2d");
ctx.drawImage(videoPlayer, 0, 0, canvas.width, canvas.height);
// return the canvas image as a blob
ctx.canvas.toBlob(
blob => {
resolve(blob);
},
"image/jpeg",
0.75 /* quality */
);
});
});
});
}
Recently needed this and did quite some testing and boiling it down to the bare minimum, see https://codepen.io/aertmann/pen/mAVaPx
There are some limitations where it works, but fairly good browser support currently: Chrome, Firefox, Safari, Opera, IE10, IE11, Android (Chrome), iOS Safari (10+).
video.preload = 'metadata';
video.src = url;
// Load video in Safari / IE11
video.muted = true;
video.playsInline = true;
video.play();
You can use this function that I've written. You just need to pass the video file to it as an argument. It will return the dataURL of the thumbnail(i.e image preview) of that video. You can modify the return type according to your need.
const generateVideoThumbnail = (file: File) => {
return new Promise((resolve) => {
const canvas = document.createElement("canvas");
const video = document.createElement("video");
// this is important
video.autoplay = true;
video.muted = true;
video.src = URL.createObjectURL(file);
video.onloadeddata = () => {
let ctx = canvas.getContext("2d");
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
video.pause();
return resolve(canvas.toDataURL("image/png"));
};
});
};
Please keep in mind that this is a async function. So make sure to use it accordingly.
For instance:
const handleFileUpload = async (e) => {
const thumbnail = await generateVideoThumbnail(e.target.files[0]);
console.log(thumbnail)
}
The easiest way to display a thumbnail is using the <video> tag itself.
<video src="http://www.w3schools.com/html/mov_bbb.mp4"></video>
Use #t in the URL, if you want the thumbnail of x seconds.
E.g.:
<video src="http://www.w3schools.com/html/mov_bbb.mp4#t=5"></video>
Make sure that it does not include any attributes like autoplay or controls and it should not have a source tag as a child element.
With a little bit of JavaScript, you may also be able to play the video, when the thumbnail has been clicked.
document.querySelector('video').addEventListener('click', (e) => {
if (!e.target.controls) { // Proceed, if there are no controls
e.target.src = e.target.src.replace(/#t=\d+/g, ''); // Remove the time, which is set in the URL
e.target.play(); // Play the video
e.target.controls = true; // Enable controls
}
});
<video src="http://www.w3schools.com/html/mov_bbb.mp4#t=5"></video>
With jQuery Lib you can use my code here. $video is a Video element.This function will return a string
function createPoster($video) {
//here you can set anytime you want
$video.currentTime = 5;
var canvas = document.createElement("canvas");
canvas.width = 350;
canvas.height = 200;
canvas.getContext("2d").drawImage($video, 0, 0, canvas.width, canvas.height);
return canvas.toDataURL("image/jpeg");;
}
Example usage:
$video.setAttribute("poster", createPoster($video));
I recently stumbled on the same issue and here is how I got around it.
firstly it will be easier if you have the video as an HTML element, so you either have it in the HTML like this
<video src="http://www.w3schools.com/html/mov_bbb.mp4"></video>
or you take from the input and create an HTML element with it.
The trick is to set the start time in the video tag to the part you want to seek and have as your thumbnail, you can do this by adding #t=1.5 to the end of the video source.
<video src="http://www.w3schools.com/html/mov_bbb.mp4#t=1.5"></video>
where 1.5 is the time you want to seek and get a thumbnail of.
This, however, makes the video start playing from that section of the video so to avoid that we add an event listener on the video's play button(s) and have the video start from the beginning by setting video.currentTime = 0
const video = document.querySelector('video');
video.addEventListener('click', (e)=> {
video.currentTime = 0 ;
video.play();
})
I want to capture a frame from video every 5 seconds.
This is my JavaScript code:
video.addEventListener('loadeddata', function() {
var duration = video.duration;
var i = 0;
var interval = setInterval(function() {
video.currentTime = i;
generateThumbnail(i);
i = i+5;
if (i > duration) clearInterval(interval);
}, 300);
});
function generateThumbnail(i) {
//generate thumbnail URL data
var context = thecanvas.getContext('2d');
context.drawImage(video, 0, 0, 220, 150);
var dataURL = thecanvas.toDataURL();
//create img
var img = document.createElement('img');
img.setAttribute('src', dataURL);
//append img in container div
document.getElementById('thumbnailContainer').appendChild(img);
}
The problem I have is the 1st two images generated are the same and the duration-5 second image is not generated. I found out that the thumbnail is generated before the video frame of the specific time is displayed in < video> tag.
For example, when video.currentTime = 5, image of frame 0s is generated. Then the video frame jump to time 5s. So when video.currentTime = 10, image of frame 5s is generated.
Cause
The problem is that seeking video (by setting it's currentTime) is asynchronous.
You need to listen to the seeked event or else it will risk take the actual current frame which is likely your old value.
As it is asynchronous you must not use the setInterval() as it is asynchronous too and you will not be able to properly synchronize when the next frame is seeked to. There is no need to use setInterval() as we will utilize the seeked event instead which will keep everything is sync.
Solution
By re-writing the code a little you can use the seeked event to go through the video to capture the correct frame as this event ensures us that we are actually at the frame we requested by setting the currentTime property.
Example
// global or parent scope of handlers
var video = document.getElementById("video"); // added for clarity: this is needed
var i = 0;
video.addEventListener('loadeddata', function() {
this.currentTime = i;
});
Add this event handler to the party:
video.addEventListener('seeked', function() {
// now video has seeked and current frames will show
// at the time as we expect
generateThumbnail(i);
// when frame is captured, increase here by 5 seconds
i += 5;
// if we are not past end, seek to next interval
if (i <= this.duration) {
// this will trigger another seeked event
this.currentTime = i;
}
else {
// Done!, next action
}
});
If you'd like to extract all frames from a video, see this answer. The example below assumes that you want to extract a frame every 5 seconds, as OP requested.
This answer requires WebCodecs which is supported in Chrome and Edge as of writing.
<canvas id="canvasEl"></canvas>
<script type="module">
import getVideoFrames from "https://deno.land/x/get_video_frames#v0.0.8/mod.js"
let ctx = canvasEl.getContext("2d");
// `getVideoFrames` requires a video URL as input.
// If you have a file/blob instead of a videoUrl, turn it into a URL like this:
let videoUrl = URL.createObjectURL(fileOrBlob);
const saveFrameEverySeconds = 5;
let elapsedSinceLastSavedFrame = 0;
await getVideoFrames({
videoUrl,
onFrame(frame) { // `frame` is a VideoFrame object:
elapsedSinceLastSavedFrame += frame.duration / 1e6; // frame.duration is in microseconds, so we convert to seconds
if(elapsedSinceLastSavedFrame > saveFrameEverySeconds) {
ctx.drawImage(frame, 0, 0, canvasEl.width, canvasEl.height);
elapsedSinceLastSavedFrame = 0;
}
frame.close();
},
onConfig(config) {
canvasEl.width = config.codedWidth;
canvasEl.height = config.codedHeight;
},
});
URL.revokeObjectURL(fileOrBlob); // revoke URL to prevent memory leak
</script>
Demo: https://jsbin.com/qovapeziqi/edit?html,output
Github: https://github.com/josephrocca/getVideoFrames.js