Animation rendering using Nodejs on backend server side - javascript

I have simple animation created using create js and ffmpegserver.js.
ffmpegserver.js.
This is a simple node server and library that sends canvas frames to the server and uses FFmpeg to compress the video. It can be used standalone or with CCapture.js.
Here is repo: video rendering demo.
on folder public, I have demos eg test3.html and test3.js
Test3.html
<!DOCTYPE html>
<html>
<head>
<title>TweenJS: Simple Tween Demo</title>
<style>
canvas {
border: 1px solid #08bf31;
justify-content: center;
display: flex;
align-items: center;
margin: 0px auto;
margin-bottom: 40px;
}
a {
width: 150px;
height: 45px;
background: red;
text-align: center;
display: flex;
justify-content: center;
align-items: center;
border-radius: 300px;
color: white;
}
#container{
flex-direction: column;
justify-content: center;
display: flex;
align-items: center;
margin: 0px auto;
}
#progress{
margin: 30px;
}
#preview{
margin: 40px;
width: 150px;
height: 45px;
background: deepskyblue;
color: white;
border: none;
border-radius: 300px;
}
</style>
</head>
<body onload="init();">
<div>
<div id="container">
<h1>Simple Tween Demo</h1>
<canvas id="testCanvas" width="500" height="400"></canvas>
<div id="progress"></div>
</div>
</div>
<script src="http://localhost:8081/ffmpegserver/CCapture.js"></script>
<script src="http://localhost:8081/ffmpegserver/ffmpegserver.js"></script>
<script src="https://code.createjs.com/1.0.0/createjs.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/tween.js/17.2.0/Tween.js"></script>
<script src="test3.js"></script>
</body>
</html>
Test3.js
/* eslint-disable eol-last */
/* eslint-disable no-undef */
/* eslint-disable quotes */
var canvas, stage;
function init() {
var framesPerSecond = 60;
var numFrames = framesPerSecond * 5; // a 5 second 60fps video
var frameNum = 0;
var progressElem = document.getElementById("progress");
var progressNode = document.createTextNode("");
progressElem.appendChild(progressNode);
function onProgress(progress) {
progressNode.nodeValue = (progress * 100).toFixed(1) + "%";
}
function showVideoLink(url, size) {
size = size ? (" [size: " + (size / 1024 / 1024).toFixed(1) + "meg]") : " [unknown size]";
var a = document.createElement("a");
a.href = url;
var filename = url;
var slashNdx = filename.lastIndexOf("/");
if (slashNdx >= 0) {
filename = filename.substr(slashNdx + 1);
}
a.download = filename;
a.appendChild(document.createTextNode("Download"));
var container = document.getElementById("container").insertBefore(a, progressElem);
}
var capturer = new CCapture( {
format: 'ffmpegserver',
//workersPath: "3rdparty/",
//format: 'gif',
//verbose: true,
framerate: framesPerSecond,
onProgress: onProgress,
//extension: ".mp4",
//codec: "libx264",
} );
capturer.start();
canvas = document.getElementById("testCanvas");
stage = new createjs.Stage(canvas);
var ball = new createjs.Shape();
ball.graphics.setStrokeStyle(5, 'round', 'round');
// eslint-disable-next-line quotes
ball.graphics.beginStroke('#000000');
ball.graphics.beginFill("#FF0000").drawCircle(0, 0, 50);
ball.graphics.setStrokeStyle(1, 'round', 'round');
ball.graphics.beginStroke('#000000');
ball.graphics.moveTo(0, 0);
ball.graphics.lineTo(0, 50);
ball.graphics.endStroke();
ball.x = 200;
ball.y = -50;
createjs.Tween.get(ball, {loop: -1})
.to({x: ball.x, y: canvas.height - 55, rotation: -360}, 1500, createjs.Ease.bounceOut)
.wait(1000)
.to({x: canvas.width - 55, rotation: 360}, 2500, createjs.Ease.bounceOut)
.wait(1000)
.to({scaleX: 2, scaleY: 2}, 2500, createjs.Ease.quadOut)
.wait(1000)
stage.addChild(ball);
createjs.Ticker.addEventListener("tick", stage);
function render() {
requestAnimationFrame(render);
capturer.capture( canvas );
++frameNum;
if (frameNum < numFrames) {
progressNode.nodeValue = "rendered frame# " + frameNum + " of " + numFrames;
} else if (frameNum === numFrames) {
capturer.stop();
capturer.save(showVideoLink);
}
}
render();
}
Everything works fine, you can test it yourself if you want by cloning the repo.
Right now animation rendering happens in client side, I would like this animation rendering to happen in the backend side
What do I need to change to make this animation rendering in backend server side using Nodejs? any help or suggestions will be appreciated.

Since you do all your animations in the canvas, you can use node-canvas to do the same in Node.js. (You have to double check that create.js also work in Node.js, though. If not, find another library or write those routines yourself).
Spawn ffmpeg into it's own process accepting input through a pipe (ffmpeg -i - -f rawvideo -pix_fmt rgba etc. The pipe will probably be different according to which server environment you use). After each frame is drawn, extract the image array using canvas.getContext('2d').getImageData(0, 0, width, height).data and pipe the result to to ffmpeg.

Related

Nothing Rendering on Canvas?

I am trying to render the key points to a canvas that I can getting from Blaze Pose, but I can't seem to get them drawing the canvas. I know the x and y for each key point and being retrieved but I can't get them showing up on the canvas. I have tried changing the styling but no luck so far. Thanks for the help.
const video = document.getElementById('webcam');
const canvas = document.getElementById('output')
const liveView = document.getElementById('liveView');
const demosSection = document.getElementById('demos');
const enableWebcamButton = document.getElementById('webcamButton');
const ctx = canvas.getContext("2d");
let poses;
function getUserMediaSupported() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
if (getUserMediaSupported()) {
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
return;
}
// Hide the button once clicked.
event.target.classList.add('removed');
// getUsermedia parameters to force video but not audio.
const constraints = {
video: true
};
document.getElementById('output').style.zIndex = "6";
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
async function predictWebcam() {
const videoHeight = video.videoHeight;
const videoWidth = video.videoWidth;
video.width = videoWidth;
video.height = videoHeight;
canvas.width = videoWidth;
canvas.height = videoHeight;
poses = await detector.estimatePoses(video)
//ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
if(poses && poses.length > 0){
for(const pose of poses){
if(pose.keypoints != null){
drawKeypoints(pose.keypoints);
}
}
}
window.requestAnimationFrame(predictWebcam);
}
function drawKeypoints(keypoints){
for(let i = 0; i < keypoints.length; i++){
drawKeypoint(keypoints[i]);
}
}
function drawKeypoint(keypoint){
ctx.fillStyle = 'Orange';
ctx.strokeStyle = 'Green';
ctx.lineWidth = 2;
const radius = 4;
const circle = new Path2D();
circle.arc(keypoint.x, keypoint.y, radius, 0, 2 * Math.PI)
ctx.fill(circle)
ctx.stroke(circle)
}
// Store the resulting model in the global scope of our app.
let model = undefined;
let detector = undefined;
// Before we can use BlazePose class we must wait for it to finish
async function loadModel(){
model = poseDetection.SupportedModels.BlazePose;
const detectorConfig = {
runtime: 'tfjs',
enableSmoothing: true,
modelType: 'full'
};
detector = await poseDetection.createDetector(model, detectorConfig);
demosSection.classList.remove('invisible');
}
loadModel();
<!DOCTYPE html>
<html lang="en">
<head>
<title>Measuring App</title>
<meta charset="utf-8">
<!-- Import the webpage's stylesheet -->
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Measuring App</h1>
<p>Wait for the model to load before clicking the button to enable the webcam - at which point it will become visible to use.</p>
<section id="demos" class="invisible">
<div id="liveView" class="camView">
<button id="webcamButton">Enable Webcam</button>
<canvas id="output"></canvas>
<video id="webcam" autoplay muted width="640" height="480"></video>
</div>
</section>
<!-- Import TensorFlow.js library -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
<!-- Load the coco-ssd model to use to recognize things in images -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow-models/pose-detection"></script>
<!-- Import the page's JavaScript to do some stuff -->
<script src="script.js" defer></script>
</body>
</html>
body {
font-family: helvetica, arial, sans-serif;
margin: 2em;
color: #3D3D3D;
}
h1 {
font-style: italic;
color: #FF6F00;
}
video {
display: block;
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
.removed {
display: none;
z-index: -10;
}
.invisible {
opacity: 0.2;
}
.camView {
position: relative;
float: left;
width: calc(100% - 20px);
margin: 10px;
cursor: pointer;
}
.camView p {
position: absolute;
padding: 5px;
background-color: rgba(255, 111, 0, 0.85);
color: #FFF;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 1;
font-size: 12px;
}
#output {
position: absolute;
z-index: -100;
top: 0;
bottom: 0;
left: 0;
}

How to run pixi js app after page load to prevent it from increasing the page speed time?

I used pixi js on my website to create a dynamic gradient animation with shape-changing blobs. The animation works perfectly fine the only problem I am facing is when I run page speed tests test it assumes the page has not been rendered since the animation is running still and gives it astronomical loading times.
I assume the part of the problem is page speed runs until the javascript has finshed executing which never will since this is a 2d animation.
If anyone has any idea on how to solve this problem please let me know.
import * as PIXI from "https://cdn.skypack.dev/pixi.js";
import { KawaseBlurFilter } from "https://cdn.skypack.dev/#pixi/filter-kawase-blur";
import SimplexNoise from "https://cdn.skypack.dev/simplex-noise";
import hsl from "https://cdn.skypack.dev/hsl-to-hex";
import debounce from "https://cdn.skypack.dev/debounce";
// return a random number within a range
function random(min, max) {
return Math.random() * (max - min) + min;
}
// map a number from 1 range to another
function map(n, start1, end1, start2, end2) {
return ((n - start1) / (end1 - start1)) * (end2 - start2) + start2;
}
// Create a new simplex noise instance
const simplex = new SimplexNoise();
// ColorPalette class
class ColorPalette {
constructor() {
this.setColors();
this.setCustomProperties();
}
setColors() {
// pick a random hue somewhere between 220 and 360
this.hue = ~~random(220, 360);
this.complimentaryHue1 = this.hue + 30;
this.complimentaryHue2 = this.hue + 60;
// define a fixed saturation and lightness
this.saturation = 95;
this.lightness = 50;
// define a base color
this.baseColor = hsl(this.hue, this.saturation, this.lightness);
// define a complimentary color, 30 degress away from the base
this.complimentaryColor1 = hsl(
this.complimentaryHue1,
this.saturation,
this.lightness
);
// define a second complimentary color, 60 degrees away from the base
this.complimentaryColor2 = hsl(
this.complimentaryHue2,
this.saturation,
this.lightness
);
// store the color choices in an array so that a random one can be picked later
this.colorChoices = [
this.baseColor,
this.complimentaryColor1,
this.complimentaryColor2
];
}
randomColor() {
// pick a random color
return this.colorChoices[~~random(0, this.colorChoices.length)].replace(
"#",
"0x"
);
}
setCustomProperties() {
// set CSS custom properties so that the colors defined here can be used throughout the UI
document.documentElement.style.setProperty("--hue", this.hue);
document.documentElement.style.setProperty(
"--hue-complimentary1",
this.complimentaryHue1
);
document.documentElement.style.setProperty(
"--hue-complimentary2",
this.complimentaryHue2
);
}
}
// Orb class
class Orb {
// Pixi takes hex colors as hexidecimal literals (0x rather than a string with '#')
constructor(fill = 0x000000) {
// bounds = the area an orb is "allowed" to move within
this.bounds = this.setBounds();
// initialise the orb's { x, y } values to a random point within it's bounds
this.x = random(this.bounds["x"].min, this.bounds["x"].max);
this.y = random(this.bounds["y"].min, this.bounds["y"].max);
// how large the orb is vs it's original radius (this will modulate over time)
this.scale = 1;
// what color is the orb?
this.fill = fill;
// the original radius of the orb, set relative to window height
this.radius = random(window.innerHeight / 6, window.innerHeight / 3);
// starting points in "time" for the noise/self similar random values
this.xOff = random(0, 1000);
this.yOff = random(0, 1000);
// how quickly the noise/self similar random values step through time
this.inc = 0.002;
// PIXI.Graphics is used to draw 2d primitives (in this case a circle) to the canvas
this.graphics = new PIXI.Graphics();
this.graphics.alpha = 0.825;
// 250ms after the last window resize event, recalculate orb positions.
window.addEventListener(
"resize",
debounce(() => {
this.bounds = this.setBounds();
}, 250)
);
}
setBounds() {
// how far from the { x, y } origin can each orb move
const maxDist =
window.innerWidth < 1000 ? window.innerWidth / 3 : window.innerWidth / 5;
// the { x, y } origin for each orb (the bottom right of the screen)
const originX = window.innerWidth / 1.25;
const originY =
window.innerWidth < 1000
? window.innerHeight
: window.innerHeight / 1.375;
// allow each orb to move x distance away from it's x / y origin
return {
x: {
min: originX - maxDist,
max: originX + maxDist
},
y: {
min: originY - maxDist,
max: originY + maxDist
}
};
}
update() {
// self similar "psuedo-random" or noise values at a given point in "time"
const xNoise = simplex.noise2D(this.xOff, this.xOff);
const yNoise = simplex.noise2D(this.yOff, this.yOff);
const scaleNoise = simplex.noise2D(this.xOff, this.yOff);
// map the xNoise/yNoise values (between -1 and 1) to a point within the orb's bounds
this.x = map(xNoise, -1, 1, this.bounds["x"].min, this.bounds["x"].max);
this.y = map(yNoise, -1, 1, this.bounds["y"].min, this.bounds["y"].max);
// map scaleNoise (between -1 and 1) to a scale value somewhere between half of the orb's original size, and 100% of it's original size
this.scale = map(scaleNoise, -1, 1, 0.5, 1);
// step through "time"
this.xOff += this.inc;
this.yOff += this.inc;
}
render() {
// update the PIXI.Graphics position and scale values
this.graphics.x = this.x;
this.graphics.y = this.y;
this.graphics.scale.set(this.scale);
// clear anything currently drawn to graphics
this.graphics.clear();
// tell graphics to fill any shapes drawn after this with the orb's fill color
this.graphics.beginFill(this.fill);
// draw a circle at { 0, 0 } with it's size set by this.radius
this.graphics.drawCircle(0, 0, this.radius);
// let graphics know we won't be filling in any more shapes
this.graphics.endFill();
}
}
// Create PixiJS app
const app = new PIXI.Application({
// render to <canvas class="orb-canvas"></canvas>
view: document.querySelector(".orb-canvas"),
// auto adjust size to fit the current window
resizeTo: window,
// transparent background, we will be creating a gradient background later using CSS
transparent: true
});
// Create colour palette
const colorPalette = new ColorPalette();
app.stage.filters = [new KawaseBlurFilter(30, 10, true)];
// Create orbs
const orbs = [];
for (let i = 0; i < 10; i++) {
const orb = new Orb(colorPalette.randomColor());
app.stage.addChild(orb.graphics);
orbs.push(orb);
}
// Animate!
if (!window.matchMedia("(prefers-reduced-motion: reduce)").matches) {
app.ticker.add(() => {
orbs.forEach((orb) => {
orb.update();
orb.render();
});
});
} else {
orbs.forEach((orb) => {
orb.update();
orb.render();
});
}
document
.querySelector(".overlay__btn--colors")
.addEventListener("click", () => {
colorPalette.setColors();
colorPalette.setCustomProperties();
orbs.forEach((orb) => {
orb.fill = colorPalette.randomColor();
});
});
:root {
--dark-color: hsl(var(--hue), 100%, 9%);
--light-color: hsl(var(--hue), 95%, 98%);
--base: hsl(var(--hue), 95%, 50%);
--complimentary1: hsl(var(--hue-complimentary1), 95%, 50%);
--complimentary2: hsl(var(--hue-complimentary2), 95%, 50%);
--font-family: "Poppins", system-ui;
--bg-gradient: linear-gradient(
to bottom,
hsl(var(--hue), 95%, 99%),
hsl(var(--hue), 95%, 84%)
);
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
html {
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
body {
max-width: 1920px;
min-height: 100vh;
display: grid;
place-items: center;
padding: 2rem;
font-family: var(--font-family);
color: var(--dark-color);
background: var(--bg-gradient);
}
.orb-canvas {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
pointer-events: none;
z-index: -1;
}
strong {
font-weight: 600;
}
.overlay {
width: 100%;
max-width: 1140px;
max-height: 640px;
padding: 8rem 6rem;
display: flex;
align-items: center;
background: rgba(255, 255, 255, 0.375);
box-shadow: 0 0.75rem 2rem 0 rgba(0, 0, 0, 0.1);
border-radius: 2rem;
border: 1px solid rgba(255, 255, 255, 0.125);
}
.overlay__inner {
max-width: 36rem;
}
.overlay__title {
font-size: 1.875rem;
line-height: 2.75rem;
font-weight: 700;
letter-spacing: -0.025em;
margin-bottom: 2rem;
}
.text-gradient {
background-image: linear-gradient(
45deg,
var(--base) 25%,
var(--complimentary2)
);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
-moz-background-clip: text;
-moz-text-fill-color: transparent;
}
.overlay__description {
font-size: 1rem;
line-height: 1.75rem;
margin-bottom: 3rem;
}
.overlay__btns {
width: 100%;
max-width: 30rem;
display: flex;
}
.overlay__btn {
width: 50%;
height: 2.5rem;
display: flex;
justify-content: center;
align-items: center;
font-size: 0.875rem;
font-weight: 600;
color: var(--light-color);
background: var(--dark-color);
border: none;
border-radius: 0.5rem;
transition: transform 150ms ease;
outline-color: hsl(var(--hue), 95%, 50%);
}
.overlay__btn:hover {
transform: scale(1.05);
cursor: pointer;
}
.overlay__btn--transparent {
background: transparent;
color: var(--dark-color);
border: 2px solid var(--dark-color);
border-width: 2px;
margin-right: 0.75rem;
}
.overlay__btn-emoji {
margin-left: 0.375rem;
}
a {
text-decoration: none;
color: var(--dark-color);
width: 100%;
height: 100%;
display: flex;
justify-content: center;
align-items: center;
}
/* Not too many browser support this yet but it's good to add! */
#media (prefers-contrast: high) {
.orb-canvas {
display: none;
}
}
#media only screen and (max-width: 1140px) {
.overlay {
padding: 8rem 4rem;
}
}
#media only screen and (max-width: 840px) {
body {
padding: 1.5rem;
}
.overlay {
padding: 4rem;
height: auto;
}
.overlay__title {
font-size: 1.25rem;
line-height: 2rem;
margin-bottom: 1.5rem;
}
.overlay__description {
font-size: 0.875rem;
line-height: 1.5rem;
margin-bottom: 2.5rem;
}
}
#media only screen and (max-width: 600px) {
.overlay {
padding: 1.5rem;
}
.overlay__btns {
flex-wrap: wrap;
}
.overlay__btn {
width: 100%;
font-size: 0.75rem;
margin-right: 0;
}
.overlay__btn:first-child {
margin-bottom: 1rem;
}
}
<!-- Canvas -->
<canvas class="orb-canvas"></canvas>
<!-- Overlay -->
<div class="overlay">
<!-- Overlay inner wrapper -->
<div class="overlay__inner">
<!-- Title -->
<h1 class="overlay__title">
Hey, would you like to learn how to create a
<span class="text-gradient">generative</span> UI just like this?
</h1>
<!-- Description -->
<p class="overlay__description">
In this tutorial we will be creating a generative “orb” animation
using pixi.js, picking some lovely random colors and pulling it all
together in a nice frosty UI.
<strong>We're gonna talk accessibility, too.</strong>
</p>
<!-- Buttons -->
<div class="overlay__btns">
<button class="overlay__btn overlay__btn--transparent">
View
</a>
</button>
<button class="overlay__btn overlay__btn--colors">
<span>Randomise Colors</span>
<span class="overlay__btn-emoji">🎨</span>
</button>
</div>
</div>
</div>
Try execute your "pixi related code" after page is loaded. So i mean this code:
// Create a new simplex noise instance
const simplex = new SimplexNoise();
...
// Create PixiJS app
const app = new PIXI.Application({
// render to <canvas class="orb-canvas"></canvas>
view: document.querySelector(".orb-canvas"),
// auto adjust size to fit the current window
resizeTo: window,
// transparent background, we will be creating a gradient background later using CSS
transparent: true
});
// Create colour palette
const colorPalette = new ColorPalette();
app.stage.filters = [new KawaseBlurFilter(30, 10, true)];
// Create orbs
const orbs = [];
for (let i = 0; i < 10; i++) {
const orb = new Orb(colorPalette.randomColor());
app.stage.addChild(orb.graphics);
orbs.push(orb);
}
// Animate!
if (!window.matchMedia("(prefers-reduced-motion: reduce)").matches) {
app.ticker.add(() => {
orbs.forEach((orb) => {
orb.update();
orb.render();
});
});
} else {
orbs.forEach((orb) => {
orb.update();
orb.render();
});
}
document
.querySelector(".overlay__btn--colors")
.addEventListener("click", () => {
colorPalette.setColors();
colorPalette.setCustomProperties();
orbs.forEach((orb) => {
orb.fill = colorPalette.randomColor();
});
});
Execute all this not "as soon as possible" (as you do now - which causes page speed to count it as page load time) - but after page is loaded. Additionally you can also add "setTimeout" there with 1 second delay - so it will be: "after page load plus 1 second".
See some proposed solutions like these: Run a certain script JS after x seconds that the page is loaded

Setting Canvas size with JQuery blurs contents

I've got a simple resize script as such:
$(`#game`).css('width', '100%');
$(window).resize(function(){
$(`#game`).height($(`#game`).width() / 2.031);
})
(from https://stackoverflow.com/a/10750346/12359120)
The problem though, as cyberwombat said,
"This will cause the content to be blurred."
The other answers on that post don't really work for my situation, as they often make the contents go offscreen.
Right now this is how blurry it is:
With the other answers:
Is there any way to make it less blurry?
Edit: The element's width and height always say 300 and 150 even though the styles say otherwise. Here is some HTML + CSS + JS that can mostly reproduce this.
const canvas = document.getElementById('game')
const ctx = canvas.getContext("2d");
$(`#game`).css('width', '100%');
$(window).resize(function() {
$(`#game`).height($(`#game`).width() / 2.031);
})
const imageAt = ((href, x, y, sizex, sizey) => {
var img = new Image();
img.addEventListener('load', function() {
ctx.drawImage(img, x, y, sizex, sizey);
}, false);
img.src = href
})
imageAt('https://b.thumbs.redditmedia.com/bZedgr0gq7RQBBnVYVc-Nmzdr-5vEUg4Dj8nTrMb7yA.png',0,0,25,25)
#game {
padding-left: 0;
padding-right: 0;
display: block;
padding: 0;
}
#vertical-center {
display: flex;
align-items: center;
justify-content: center;
height: 100%;
}
body {
background-color: black;
padding: 0;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js"></script>
<div id="vertical-center">
<canvas id="game"></canvas>
</div>

How to make Animated Web Speech API UI in HTML

I have to create a animation Like Google.com Dekstop Mic shows (i.e. scaling of the mic border according to the loudness of voice). I have used the Web Speech API with reference from here (MDN) which shows how we can change the background colour of the webpage using our voice, it work's fine but I want to add Animation Like Google's site( mentioned above).I have searched a lot to find a way to achieve this animation but I was unable to find this. So I am asking here as this is the best place where I can get my answer :) Thanks a lot in advance for helping me out with this.
I'm not expert in this area but I followed the example in MDN and here is the result.
Beside the setup, the key point here is analyser.getByteFrequencyData which gives us the decibel levels.
In order to simplify the code, I took the highest decibel level in the array (Math.max.apply(null, dataArray)) but you can fine tuning it by average or any other calculation you like.
Demo
let audioCtx = new (window.AudioContext || window.webkitAudioContext)();
let distortion = audioCtx.createWaveShaper();
let gainNode = audioCtx.createGain();
let biquadFilter = audioCtx.createBiquadFilter();
let analyser = audioCtx.createAnalyser();
analyser.minDecibels = -90;
analyser.maxDecibels = -10;
analyser.fftSize = 256;
const mic = document.querySelector('.mic');
let isListening = false;
let tracks = [];
if (!navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
mic.addEventListener('click', async () => {
if (!isListening) {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
isListening = true;
tracks = stream.getTracks();
source = audioCtx.createMediaStreamSource(stream);
source.connect(distortion);
distortion.connect(biquadFilter);
biquadFilter.connect(gainNode);
gainNode.connect(analyser);
analyser.connect(audioCtx.destination);
requestAnimationFrame(function log() {
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
const level = Math.max.apply(null, dataArray);
document.querySelector('#level span').textContent = level;
mic.style.setProperty('--border', `${level / 5}px`);
requestAnimationFrame(log);
});
} catch (err) {
console.log('The following gUM error occured: ' + err);
}
} else {
isListening = false;
tracks.forEach((track) => {
track.stop();
});
}
});
body {
margin: 0;
height: 100vh;
position: relative;
}
.content {
height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
gap: 20px;
}
.mic {
background: #fff;
width: 50px;
height: 50px;
border: 1px solid #eee;
border-radius: 100%;
bottom: 0;
box-shadow: 0 2px 5px var(--border) rgb(0 0 0 / 10%);
cursor: pointer;
display: inline-flex;
align-items: center;
justify-content: center;
}
<html>
<head>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="styles.css" />
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css"
integrity="sha512-Fo3rlrZj/k7ujTnHg4CGR2D7kSs0v4LLanw2qksYuRlEzO+tcaEPQogQ0KaoGN26/zrn20ImR1DfuLWnOo7aBA=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
</head>
<body>
<div class="content">
<div class="mic">
<i class="fas fa-microphone"></i>
</div>
<div id="level">Level: <span></span></div>
</div>
<script src="script.js"></script>
</body>
</html>

make the video fill the screen width and and remove the frame

I have created this frame for playing the movie which resizes based on the user's screen size.
Now I want to be able to remove the frame and fill the whole width of the user screen using another function but each time I failed.
Now I'm using injectViewportSizes() function. I want a new function to do this:
remove the frame completely without affecting anything else.
stretch the width of the movie until it fills the whole width of the screen.
of course stretching the movie is much simpler for me than removing the frame ... each time I've tried to remove it the whole movie removes or a distortion occurs for the rest of the elements.
Here is the code:
const clipSource = `https://langfox.ir/movie/movieclip/My_name_is_Edward_Bloom.mp4`;
const content = document.querySelector('.content');
const box = document.getElementById("box");
let video = document.createElement('video');
content.appendChild(video);
video.id = 'clip';
let clip = document.getElementById("clip");
clip.currentTime = 0;
let source = document.createElement('source');
source.src = clipSource;
source.type = 'video/mp4';
video.appendChild(source);
video.load();
setTimeout(() => {
injectViewportSizes(); // or goFull();
clip.play();
}, 3000);
function goFull(){
// Remove the frame and make the video fill the while 'Width' of the screen
}
function injectViewportSizes(){
let screenWidth = screen.width;
let screenHeight = screen.height;
let vwPixels = screenWidth / 100;
let clipWidth = clip.videoWidth;
let clipHeight = clip.videoHeight;
screenWidth = screenWidth - (screenWidth * 0.10); // available space to put the clip inside
screenHeight = screenHeight - (screenHeight * 0.10);
let clipWidthNew;
let clipHeightNew;
if(clipWidth > clipHeight){
clipWidthNew = clipWidth;
let ratio = clipWidth / clipHeight;
while(screenWidth < clipWidthNew) {
clipWidthNew--;
}
clipHeightNew = clipWidthNew / ratio;
} else {
clipHeightNew = clipHeight;
let ratio = clipWidth / clipHeight;
while(screenHeight < clipHeightNew) {
clipHeightNew--;
}
clipWidthNew = clipHeightNew * ratio;
}
let viewPortClipWidth = clipWidthNew * (100 / document.body.clientWidth);
let viewPortClipHeight = clipHeightNew / vwPixels;
document.querySelector('.box .content').style.width = `${viewPortClipWidth}vw`;
document.querySelector('.box .content').style.height = `${viewPortClipHeight}vw`;
}
video {
position: absolute;
top: 0;
left: 0;
max-width: 100%;
max-height: 100%;
width: 100%;
height: 100%;
-o-object-fit: cover;
object-fit: cover;
}
body {
height: 100vh;
display: flex;
align-items: center;
justify-content: center;
background-color: #0b0e12;
}
.box {
border-radius: 0.31vh;
position: relative;
overflow: hidden;
}
.box::after {
content: '';
position: absolute;
z-index: -1;
top: 0;
right: 0;
bottom: 0;
left: 0;
background: repeating-linear-gradient(-45deg, white 0 0.48828125vw, #f00c36 0 0.9765625vw) 0 0/1.380859375vw 1.380859375vw;
width: calc(100vw + 1.380859375vw);
height: calc(100vh + 1.380859375vw);
}
.box .content {
position: relative;
max-width: 100vw;
max-height: 100vh;
box-shadow: 0 0 0.262vh black, 0 0 0.6553vh rgba(0, 0, 0, 1), inset 0 0 0.6553vh rgba(0, 0, 0, 1);
margin: 0.45vh;
}
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="style.css">
</head>
<body>
<div id='box' class="box">
<div class="content"></div>
</div>
</body>
</html>
Note: it seems that this code snippet can't show the original frame correctly but it works in a raw HTML file locally.

Categories