I am trying to render the key points to a canvas that I can getting from Blaze Pose, but I can't seem to get them drawing the canvas. I know the x and y for each key point and being retrieved but I can't get them showing up on the canvas. I have tried changing the styling but no luck so far. Thanks for the help.
const video = document.getElementById('webcam');
const canvas = document.getElementById('output')
const liveView = document.getElementById('liveView');
const demosSection = document.getElementById('demos');
const enableWebcamButton = document.getElementById('webcamButton');
const ctx = canvas.getContext("2d");
let poses;
function getUserMediaSupported() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
if (getUserMediaSupported()) {
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
return;
}
// Hide the button once clicked.
event.target.classList.add('removed');
// getUsermedia parameters to force video but not audio.
const constraints = {
video: true
};
document.getElementById('output').style.zIndex = "6";
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
async function predictWebcam() {
const videoHeight = video.videoHeight;
const videoWidth = video.videoWidth;
video.width = videoWidth;
video.height = videoHeight;
canvas.width = videoWidth;
canvas.height = videoHeight;
poses = await detector.estimatePoses(video)
//ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
if(poses && poses.length > 0){
for(const pose of poses){
if(pose.keypoints != null){
drawKeypoints(pose.keypoints);
}
}
}
window.requestAnimationFrame(predictWebcam);
}
function drawKeypoints(keypoints){
for(let i = 0; i < keypoints.length; i++){
drawKeypoint(keypoints[i]);
}
}
function drawKeypoint(keypoint){
ctx.fillStyle = 'Orange';
ctx.strokeStyle = 'Green';
ctx.lineWidth = 2;
const radius = 4;
const circle = new Path2D();
circle.arc(keypoint.x, keypoint.y, radius, 0, 2 * Math.PI)
ctx.fill(circle)
ctx.stroke(circle)
}
// Store the resulting model in the global scope of our app.
let model = undefined;
let detector = undefined;
// Before we can use BlazePose class we must wait for it to finish
async function loadModel(){
model = poseDetection.SupportedModels.BlazePose;
const detectorConfig = {
runtime: 'tfjs',
enableSmoothing: true,
modelType: 'full'
};
detector = await poseDetection.createDetector(model, detectorConfig);
demosSection.classList.remove('invisible');
}
loadModel();
<!DOCTYPE html>
<html lang="en">
<head>
<title>Measuring App</title>
<meta charset="utf-8">
<!-- Import the webpage's stylesheet -->
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Measuring App</h1>
<p>Wait for the model to load before clicking the button to enable the webcam - at which point it will become visible to use.</p>
<section id="demos" class="invisible">
<div id="liveView" class="camView">
<button id="webcamButton">Enable Webcam</button>
<canvas id="output"></canvas>
<video id="webcam" autoplay muted width="640" height="480"></video>
</div>
</section>
<!-- Import TensorFlow.js library -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
<!-- Load the coco-ssd model to use to recognize things in images -->
<script src="https://cdn.jsdelivr.net/npm/#tensorflow-models/pose-detection"></script>
<!-- Import the page's JavaScript to do some stuff -->
<script src="script.js" defer></script>
</body>
</html>
body {
font-family: helvetica, arial, sans-serif;
margin: 2em;
color: #3D3D3D;
}
h1 {
font-style: italic;
color: #FF6F00;
}
video {
display: block;
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
.removed {
display: none;
z-index: -10;
}
.invisible {
opacity: 0.2;
}
.camView {
position: relative;
float: left;
width: calc(100% - 20px);
margin: 10px;
cursor: pointer;
}
.camView p {
position: absolute;
padding: 5px;
background-color: rgba(255, 111, 0, 0.85);
color: #FFF;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 1;
font-size: 12px;
}
#output {
position: absolute;
z-index: -100;
top: 0;
bottom: 0;
left: 0;
}
Related
I have to create a animation Like Google.com Dekstop Mic shows (i.e. scaling of the mic border according to the loudness of voice). I have used the Web Speech API with reference from here (MDN) which shows how we can change the background colour of the webpage using our voice, it work's fine but I want to add Animation Like Google's site( mentioned above).I have searched a lot to find a way to achieve this animation but I was unable to find this. So I am asking here as this is the best place where I can get my answer :) Thanks a lot in advance for helping me out with this.
I'm not expert in this area but I followed the example in MDN and here is the result.
Beside the setup, the key point here is analyser.getByteFrequencyData which gives us the decibel levels.
In order to simplify the code, I took the highest decibel level in the array (Math.max.apply(null, dataArray)) but you can fine tuning it by average or any other calculation you like.
Demo
let audioCtx = new (window.AudioContext || window.webkitAudioContext)();
let distortion = audioCtx.createWaveShaper();
let gainNode = audioCtx.createGain();
let biquadFilter = audioCtx.createBiquadFilter();
let analyser = audioCtx.createAnalyser();
analyser.minDecibels = -90;
analyser.maxDecibels = -10;
analyser.fftSize = 256;
const mic = document.querySelector('.mic');
let isListening = false;
let tracks = [];
if (!navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
mic.addEventListener('click', async () => {
if (!isListening) {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
isListening = true;
tracks = stream.getTracks();
source = audioCtx.createMediaStreamSource(stream);
source.connect(distortion);
distortion.connect(biquadFilter);
biquadFilter.connect(gainNode);
gainNode.connect(analyser);
analyser.connect(audioCtx.destination);
requestAnimationFrame(function log() {
let bufferLength = analyser.frequencyBinCount;
let dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
const level = Math.max.apply(null, dataArray);
document.querySelector('#level span').textContent = level;
mic.style.setProperty('--border', `${level / 5}px`);
requestAnimationFrame(log);
});
} catch (err) {
console.log('The following gUM error occured: ' + err);
}
} else {
isListening = false;
tracks.forEach((track) => {
track.stop();
});
}
});
body {
margin: 0;
height: 100vh;
position: relative;
}
.content {
height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
gap: 20px;
}
.mic {
background: #fff;
width: 50px;
height: 50px;
border: 1px solid #eee;
border-radius: 100%;
bottom: 0;
box-shadow: 0 2px 5px var(--border) rgb(0 0 0 / 10%);
cursor: pointer;
display: inline-flex;
align-items: center;
justify-content: center;
}
<html>
<head>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="styles.css" />
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css"
integrity="sha512-Fo3rlrZj/k7ujTnHg4CGR2D7kSs0v4LLanw2qksYuRlEzO+tcaEPQogQ0KaoGN26/zrn20ImR1DfuLWnOo7aBA=="
crossorigin="anonymous"
referrerpolicy="no-referrer"
/>
</head>
<body>
<div class="content">
<div class="mic">
<i class="fas fa-microphone"></i>
</div>
<div id="level">Level: <span></span></div>
</div>
<script src="script.js"></script>
</body>
</html>
How do I get my canvas animation of the rain to start when the play button is clicked and vice versa (stop the rain when the pause button is clicked)?
I really would appreciate a helping hand.
So far I have the rain animation to play as soon as the window is loaded and the audio in the background is the only thing that plays when interacting with the play button.
I'm not sure how to go about getting the rain animation to start so that the audio also plays in sync with the rain and vice versa.
// Get the canvas and context and store in variables
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
// Set canvas dimensions to window height and width
canvas.width = window.innerHeight;
canvas.height = window.innerHeight;
// Generate the raindrops and apply attributes
var rainNum = 200; // max raindrops
var rainDrops = [];
let isClicked = true;
// Loop through the empty raindrops and apply attributes
for (var i = 0; i < rainNum; i++) {
rainDrops.push({
x: Math.random() * canvas.width,
y: Math.random() * canvas.height,
})
}
// Draw raindrops onto canvas
function draw() {
context.clearRect(0, 0, canvas.width, canvas.height);
context.lineWidth = 0.1;
context.strokeStyle = "white";
context.beginPath();
for (var i = 0; i < rainNum; i++) {
var r = rainDrops[i];
context.moveTo(r.x, r.y);
context.lineTo(r.x, r.y + 30);
rainDrops[i].y += 13;
context.stroke();
}
if (isClicked == true) {
moveRain();
} else {
return false
}
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
// Animate the raindrops
function moveRain() {
for (var i = 0; i < rainNum; i++) {
// Store current raindrops
var r = rainDrops[i];
// If the rain reaches the bottom, send a new one to the top
if (r.y > canvas.height) {
rainDrops[i] = {
x: Math.random() * canvas.width,
y: 0
};
}
}
}
// Create a reference to the audio
var audioOne = document.querySelector("#audio-1");
function playAudio() {
if (isClicked == true) {
isClicked = false
audioOne.pause();
btn.className = "play";
} else if (isClicked == false) {
isClicked = true
audioOne.play();
btn.className = "pause";
draw()
}
}
html {
height: 100%;
width: 100%;
}
body {
height: 100vh;
width: 100vw;
margin: 0;
padding: 0;
overflow: hidden;
}
canvas {
height: 100%;
width: 100%;
background-color: transparent;
position: absolute;
z-index: 10;
}
#sky-top {
height: 100%;
width: 100%;
position: absolute;
z-index: 1;
animation: lightning 20s ease-in-out infinite;
}
#keyframes lightning {
/****** This will create a lightning effect every 20 seconds ******/
0% {
background-color: rgb(46, 46, 46);
}
6.25% {
background-color: rgb(46, 46, 46);
}
8% {
background-color: rgb(255, 255, 255);
}
9% {
background-color: rgb(46, 46, 46);
}
11% {
background-color: rgb(255, 255, 255);
}
30% {
background-color: rgb(46, 46, 46);
}
100% {
background-color: rgb(46, 46, 46);
}
}
#sky-bottom {
height: 100%;
width: 100%;
position: absolute;
z-index: 2;
background: linear-gradient(rgba(255, 255, 255, 0), rgb(45, 45, 45));
}
.center-container {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
text-align: center;
z-index: 20;
background-color: transparent;
}
.button-center {
position: absolute;
top: 40%;
-webkit-transform: translateY(-50%);
-moz-transform: translateY(-50%);
-ms-transform: translateY(-50%);
-o-transform: translateY(-50%);
transform: translateY(-50%);
-webkit-transform: translateX(-50%);
-moz-transform: translateX(-50%);
-ms-transform: translateX(-50%);
-o-transform: translateX(-50%);
transform: translateX(-50%);
}
.center-container:after,
.button-center {
display: inline-block;
vertical-align: middle;
}
#btn {
height: 130px;
width: 130px;
border: none;
background-size: 100% 100%;
outline: none;
}
.play {
background: url('../image/play-button.png');
border-radius: 50%;
cursor: pointer;
-webkit-filter: drop-shadow(2px 2px 2px #666666);
filter: drop-shadow(2px 2px 2px #666666);
}
.pause {
background: url('../image/pause-button.png');
border-radius: 50%;
cursor: pointer;
-webkit-filter: drop-shadow(2px 2px 2px #666666);
filter: drop-shadow(2px 2px 2px #666666);
}
<!DOCTYPE html>
<html lang="en">
<head>
<title>Rain</title>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="Relax your mind with some rain and thunder.">
<link href="css/styles.css" type="text/css" rel="stylesheet">
</head>
<body>
<div id="sky-top"></div>
<div id="sky-bottom"></div>
<canvas id="canvas"></canvas>
<div class="center-container">
<div class="button-center">
<button id="btn" class="play" class="pause" onclick="playAudio()"></button>
</div>
<audio src="audio/rain-and-thunder.mp3" id="audio-1" loop="loop" type="audio/mp3"></audio>
</div>
<script src="js/script.js"></script>
</body>
</html>
First off, the audio file won't work for me here locally (I downloaded my own mp3 - I am working on getting it to work now), so I focused on playing/pausing the canvas animation and ignored the sound functionality. Let's start with the HTML:
<html>
<head>
<link rel = "stylesheet"
type = "text/css"
href = "index.css" />
</head>
<body>
<canvas id="canvas"></canvas>
<div class="center-container">
<div class="button-center">
<button id="btn" class="play" class="pause" onclick="playAudio()"></button>
</div>
<audio loop="loop" id="audio-1" width="100%" height="auto">
<source src="audio/rain-and-thunder.mp3" type="audio/mpeg">
</audio>
</div>
</body>
<script src='index.js'></script>
</html>
You'll see I did a few things to display the canvas properly. For whatever reason, I couldn't see your button and the canvas was showing up blank. I decided to inline CSS the width and height, as well as put the script within the body.
Also, since we are ignoring the sound functionality, I renamed the function to startStopRain() to make more sense in your JS file. Let's talk about that next:
// Get the canvas and context and store in variables
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
var audioOne = document.getElementById("audio-1");
// Set canvas dimensions to window height and width
canvas.width = window.innerHeight;
canvas.height = window.innerHeight;
// Generate the raindrops and apply attributes
var rainNum = 200; // max raindrops
var rainDrops = [];
let isClicked = false
// Loop through the empty raindrops and apply attributes
for(var i = 0; i < rainNum; i ++)
{
rainDrops.push({
x: Math.random() * canvas.width,
y: Math.random() * canvas.height,
})
}
// Draw raindrops onto canvas
function draw()
{
context.clearRect(0, 0, canvas.width, canvas.height);
context.lineWidth = 0.1;
context.strokeStyle = "red";
context.beginPath();
for(var i = 0; i < rainNum; i ++)
{
var r = rainDrops[i];
context.moveTo(r.x, r.y);
context.lineTo(r.x, r.y + 30);
rainDrops[i].y += 13;
context.stroke();
}
if (isClicked == true) {
moveRain();
} else{
return false
}
document.getElementById("canvas").onclick = moveRain();
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
// Animate the raindrops
function moveRain(){
for(var i = 0; i < rainNum; i++)
{
// Store current raindrops
var r = rainDrops[i];
// If the rain reaches the bottom, send a new one to the top
if(r.y > canvas.height) {
rainDrops[i] = {x: Math.random() * canvas.width, y: 0};
}
}
}
// Create a reference to the audio
// need to figure out why the audio wont play - this is new to me :D
function playAudio(){
audioOne.play();
if (isClicked == true){
isClicked = false
audioOne.pause();
} else if (isClicked == false){
isClicked = true
audioOne.play();
draw()
}
}
Your original intent was to use two class names on the btn so you could distinguish between play/pause. While there are use cases for using multiple class names, I don't personally believe this is one of them. Instead, I used a global isClicked variable in the script so I could control the flow of the functions.
Follow the isClicked variable from top to bottom to better understand how I used it to call/stop functions. Minus initializing the variable at the top, you'll notice it shows up in only two functions: draw() and startStopRain().
EDIT: The flow control for the sound should be no different. You can use the same isClicked boolean value to determine when the sound should be played or not. If you want, I can update it to reflect that but truthfully, that would be good practice for you. Also, I changed the id of the button to audio-1 since the original code was selecting the element via that specific id
I want to make an input where it can be automatically filled in after scanning a QR or barcode scanner using a webcam or phone cam.
for the script I imagined something like this
<video autoplay = "true" id = "video-webcam">
</video>
<input type = "text" id = "scanresult">
<script>
external or internal script for scan qr or barcode. save result in variable = result
html DOM getElementById ('scanresult'). value (result);
</script>
I hope anyone can give me suggestions or feedback for my problem.
Thank you
Before it thank you for JaromandaX,
i have found script for barcode scanner on Html5 using webcam.
this is my index.html
<!DOCTYPE html>
<html>
<head>
<title>QR Code Scanner</title>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0; user-scalable=0;" />
<link rel="stylesheet" href="style.css" />
<script src="https://rawgit.com/sitepoint-editors/jsqrcode/master/src/qr_packed.js"></script>
</head>
<body>
<div id="container">
<h1>QR Code Scanner</h1>
<a id="btn-scan-qr">
<img src="https://dab1nmslvvntp.cloudfront.net/wp-content/uploads/2017/07/1499401426qr_icon.svg">
<a/>
<canvas hidden="" id="qr-canvas"></canvas>
<div id="qr-result" hidden="">
<b>Data:</b> <span id="outputData"></span>
</div>
</div>
<script src="qrCodeScanner.js"></script>
</body>
</html>
qsCodeScanner.js
//const qrcode = window.qrcode;
const video = document.createElement("video");
const canvasElement = document.getElementById("qr-canvas");
const canvas = canvasElement.getContext("2d");
const qrResult = document.getElementById("qr-result");
const outputData = document.getElementById("outputData");
const btnScanQR = document.getElementById("btn-scan-qr");
let scanning = false;
qrcode.callback = res => {
if (res) {
outputData.innerText = res;
scanning = false;
video.srcObject.getTracks().forEach(track => {
track.stop();
});
qrResult.hidden = false;
canvasElement.hidden = true;
btnScanQR.hidden = false;
}
};
btnScanQR.onclick = () => {
navigator.mediaDevices
.getUserMedia({ video: { facingMode: "environment" } })
.then(function(stream) {
scanning = true;
qrResult.hidden = true;
btnScanQR.hidden = true;
canvasElement.hidden = false;
video.setAttribute("playsinline", true); // required to tell iOS safari we don't want fullscreen
video.srcObject = stream;
video.play();
tick();
scan();
});
};
function tick() {
canvasElement.height = video.videoHeight;
canvasElement.width = video.videoWidth;
canvas.drawImage(video, 0, 0, canvasElement.width, canvasElement.height);
scanning && requestAnimationFrame(tick);
}
function scan() {
try {
qrcode.decode();
} catch (e) {
setTimeout(scan, 300);
}
}
style.css
html {
height: 100%;
}
body {
font-family: sans-serif;
padding: 0 10px;
height: 100%;
background: black;
margin: 0;
}
h1 {
color: white;
margin: 0;
padding: 15px;
}
#container {
text-align: center;
margin: 0;
}
#qr-canvas {
margin: auto;
width: calc(100% - 20px);
max-width: 400px;
}
#btn-scan-qr {
cursor: pointer;
}
#btn-scan-qr img {
height: 10em;
padding: 15px;
margin: 15px;
background: white;
}
#qr-result {
font-size: 1.2em;
margin: 20px auto;
padding: 20px;
max-width: 700px;
background-color: white;
}
There are lots of better libraries out there for doing this, but I'd personally recommend QrScanner because of its simplicity and intuitiveness.
Your live QrCode Scanner would be like this...
scanner.html
<div id="holder">
<h3>Scan QR Code from Camera</h3>
<div class="make">
<video id="scan"></video>
</div>
<div>
<input type = "text" id = "scanresult"><br>
<button id="start">Start</button>
<button id="stop">Stop</button>
</div>
</div>
Then add a little CSS as:
style.css
#holder{
width: 30%;
margin:auto;
}
#holder .make {
width: 99%;
height: 30vh;
margin-bottom: 15px;
text-align: center;
}
video {
width: 99%;
margin:auto;
}
Then add your QrScanner code as:
<script type="module">
import QrScanner from "/path/to/qr-scanner.min.js";
QrScanner.WORKER_PATH = "/path/to/qr-scanner-worker.min.js";
// Scanner Object
const scanner = new QrScanner(
document.getElementById("scan"),
function(result){
document.getElementById("scanresult").value = result;
}
);
document.getElementById("start").onclick = e => scanner.start();
document.getElementById("stop").onclick = e => scanner.stop();
</script>
Then connect your camera and click start button...
Below this text is my code - I am trying to create the chrome dino game, and so far everything is going well, but I have one problem - after the red block jumps, the dino doesn't stop falling down - while it should. I don't understand where my problem is, as I wrote about the same code yesterday and everything worked just fine. The gravity function can be found in the JavaScript under // Gravity.
Notice how it's code is very similar to the jump function, but it doesn't work as good as the jump function. Any help would be appreciated!!!!!
// HTML Elements + Other Variables
const floor1 = document.getElementById("floor1");
const floor2 = document.getElementById("floor2");
const floor3 = document.getElementById("floor3");
const floor4 = document.getElementById("floor4");
const floor5 = document.getElementById("floor5");
const floor6 = document.getElementById("floor6");
const floor7 = document.getElementById("floor7");
const dino = document.getElementById("dino");
const highBird = document.getElementById("highBird");
const lowBird = document.getElementById("lowBird");
const wideCactus = document.getElementById("wideCactus");
const thinCactus = document.getElementById("thinCactus");
let jump = 0;
// Floor Function
setTimeout(function () {
floor1.classList.add("floor1Animation");
}, 0);
setTimeout(function () {
floor2.classList.add("floor2Animation");
}, 1000);
setTimeout(function () {
floor3.classList.add("floor3Animation");
}, 2000);
setTimeout(function () {
floor4.classList.add("floor4Animation");
}, 3000);
setTimeout(function () {
floor5.classList.add("floor5Animation");
}, 4000);
setTimeout(function () {
floor6.classList.add("floor6Animation");
}, 5000);
setTimeout(function () {
floor7.classList.add("floor7Animation");
}, 6000);
// Jump
document.onkeydown = function (event) {
let key = event.key;
if (key == "ArrowUp") {
let jumpCount = 0;
if (dino.offsetTop == 95) {
let jumpInterval = setInterval(function () {
dino.style.top = (dino.offsetTop - 5) + "px";
jumpCount += 1;
jump = true;
if (jumpCount == 20) {
clearInterval(jumpInterval);
jump = false;
jumpCount = 0;
}
}, 10);
}
}
}
// Gravity
setInterval(function () {
if (jump == false) {
let jumpGravity = setInterval(function () {
dino.style.top = (dino.offsetTop + 5) + "px";
}, 10);
if (dino.offsetTop == 95) {
clearInterval(jumpGravity);
}
}
}, 10);
body {
margin: 0;
padding: 0;
justify-content: center;
align-items: center;
display: flex;
width: 100vw;
height: 100vh;
}
#gameBoard {
width: 1000px;
height: 150px;
border: 2px solid black;
overflow: hidden;
margin: auto;
position: relative;
background-color: white;
}
#dino {
width: 30px;
height: 50px;
background-color: red;
left: 10px;
top: 95px;
position: absolute;
}
.floorBackground {
position: relative;
height: 10px;
width: 200px;
display: flex;
justify-content: center;
align-items: center;
}
.floor {
position: absolute;
top: 140px;
height: 10px;
width: 200px;
}
#floor1 {
right: -200px;
background-color: red;
}
.floor1Animation {
animation: floorAnimation 6s infinite linear;
}
#floor2 {
right: -200px;
background-color: blue;
}
.floor2Animation {
animation: floorAnimation 6s infinite linear;
}
#floor3 {
right: -200px;
background-color: green;
}
.floor3Animation {
animation: floorAnimation 6s infinite linear;
}
#floor4 {
right: -200px;
background-color: purple;
}
.floor4Animation {
animation: floorAnimation 6s infinite linear;
}
#floor5 {
right: -200px;
background-color: brown;
}
.floor5Animation {
animation: floorAnimation 6s infinite linear;
}
#floor6 {
right: -200px;
background-color: orange;
}
.floor6Animation {
animation: floorAnimation 6s infinite linear;
}
#floor7 {
right: -200px;
background-color: yellow;
}
.floor7Animation {
animation: floorAnimation 6s infinite linear;
}
#keyframes floorAnimation {
from {
right: -200px;
}
to {
right: 1000px;
}
}
#keyframes jumping {
}
<!DOCTYPE html>
<html lang = "en">
<head>
<meta charset = "UTF-8">
<title>
Dino Game
</title>
<link rel = "stylesheet" type = "text/css" href = "DinoCSS.css">
</head>
<body>
<div id = "gameBoard">
<div id = "floor1" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor2" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor3" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor4" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor5" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor6" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "floor7" class = "floor">
<img src = "Pictures/dinoBackground.PNG" class = "floorBackground">
</div>
<div id = "dino"></div>
<div id = "highBird"></div>
<div id = "lowBird"></div>
<div id = "wideCactus"></div>
<div id = "thinCactus"></div>
</div>
<script type = "text/javascript" src = "DinoJS.js"></script>
</body>
</html>
You should try simplifying your code, it may make the issue easier to find or it may even remove it entirely.
I'd suggest using a velocity system, where every frame you adjust the dino's offsetTop by the dino's current velocity, and then subtract the gravity amount from the dino's current velocity.
This causes the velocity to decrease at a constant rate, but the dino's offsetTop position to decrease at an exponential pace, mimicking real gravity.
Then, to make collision work, just test if the dino is at or above the correct "offsetTop" value before subtracting the gravity. If you find that the dino is below the ground, just set the offsetTop to the ground level and clear the velocity.
I'd also suggest moving your game over to the HTML5 Canvas API, since then you just have to deal with properties of objects rather than DOM element style attributes.
Velocity example with canvas:
var canvas = document.getElementById('canvas');
var ctx = canvas.getContext('2d');
var x = 10;
var y = 10;
var VelocityX = 0;
var VelocityY = 0;
const gravity = 0.75;
// width and height of the square
var width = 10;
var height = 10;
function animate() {
ctx.clearRect(0, 0, canvas.width, canvas.height); // Clear the canvas
x += VelocityX;
if( x > 500) {
x = 0;
}
if ( y >= 450 ) {
y = 450
VelocityY = 0
} else {
y += VelocityY;
VelocityY += gravity; // Higher values = lower position on canvas
}
ctx.fillRect(x, y, width, height);
}
setInterval(animate, 1000 / 60); // 60 FPS
#canvas {
border: 1px solid black;
}
<canvas id="canvas" width="500" height="500"></canvas>
JSFiddle
I have simple animation created using create js and ffmpegserver.js.
ffmpegserver.js.
This is a simple node server and library that sends canvas frames to the server and uses FFmpeg to compress the video. It can be used standalone or with CCapture.js.
Here is repo: video rendering demo.
on folder public, I have demos eg test3.html and test3.js
Test3.html
<!DOCTYPE html>
<html>
<head>
<title>TweenJS: Simple Tween Demo</title>
<style>
canvas {
border: 1px solid #08bf31;
justify-content: center;
display: flex;
align-items: center;
margin: 0px auto;
margin-bottom: 40px;
}
a {
width: 150px;
height: 45px;
background: red;
text-align: center;
display: flex;
justify-content: center;
align-items: center;
border-radius: 300px;
color: white;
}
#container{
flex-direction: column;
justify-content: center;
display: flex;
align-items: center;
margin: 0px auto;
}
#progress{
margin: 30px;
}
#preview{
margin: 40px;
width: 150px;
height: 45px;
background: deepskyblue;
color: white;
border: none;
border-radius: 300px;
}
</style>
</head>
<body onload="init();">
<div>
<div id="container">
<h1>Simple Tween Demo</h1>
<canvas id="testCanvas" width="500" height="400"></canvas>
<div id="progress"></div>
</div>
</div>
<script src="http://localhost:8081/ffmpegserver/CCapture.js"></script>
<script src="http://localhost:8081/ffmpegserver/ffmpegserver.js"></script>
<script src="https://code.createjs.com/1.0.0/createjs.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/tween.js/17.2.0/Tween.js"></script>
<script src="test3.js"></script>
</body>
</html>
Test3.js
/* eslint-disable eol-last */
/* eslint-disable no-undef */
/* eslint-disable quotes */
var canvas, stage;
function init() {
var framesPerSecond = 60;
var numFrames = framesPerSecond * 5; // a 5 second 60fps video
var frameNum = 0;
var progressElem = document.getElementById("progress");
var progressNode = document.createTextNode("");
progressElem.appendChild(progressNode);
function onProgress(progress) {
progressNode.nodeValue = (progress * 100).toFixed(1) + "%";
}
function showVideoLink(url, size) {
size = size ? (" [size: " + (size / 1024 / 1024).toFixed(1) + "meg]") : " [unknown size]";
var a = document.createElement("a");
a.href = url;
var filename = url;
var slashNdx = filename.lastIndexOf("/");
if (slashNdx >= 0) {
filename = filename.substr(slashNdx + 1);
}
a.download = filename;
a.appendChild(document.createTextNode("Download"));
var container = document.getElementById("container").insertBefore(a, progressElem);
}
var capturer = new CCapture( {
format: 'ffmpegserver',
//workersPath: "3rdparty/",
//format: 'gif',
//verbose: true,
framerate: framesPerSecond,
onProgress: onProgress,
//extension: ".mp4",
//codec: "libx264",
} );
capturer.start();
canvas = document.getElementById("testCanvas");
stage = new createjs.Stage(canvas);
var ball = new createjs.Shape();
ball.graphics.setStrokeStyle(5, 'round', 'round');
// eslint-disable-next-line quotes
ball.graphics.beginStroke('#000000');
ball.graphics.beginFill("#FF0000").drawCircle(0, 0, 50);
ball.graphics.setStrokeStyle(1, 'round', 'round');
ball.graphics.beginStroke('#000000');
ball.graphics.moveTo(0, 0);
ball.graphics.lineTo(0, 50);
ball.graphics.endStroke();
ball.x = 200;
ball.y = -50;
createjs.Tween.get(ball, {loop: -1})
.to({x: ball.x, y: canvas.height - 55, rotation: -360}, 1500, createjs.Ease.bounceOut)
.wait(1000)
.to({x: canvas.width - 55, rotation: 360}, 2500, createjs.Ease.bounceOut)
.wait(1000)
.to({scaleX: 2, scaleY: 2}, 2500, createjs.Ease.quadOut)
.wait(1000)
stage.addChild(ball);
createjs.Ticker.addEventListener("tick", stage);
function render() {
requestAnimationFrame(render);
capturer.capture( canvas );
++frameNum;
if (frameNum < numFrames) {
progressNode.nodeValue = "rendered frame# " + frameNum + " of " + numFrames;
} else if (frameNum === numFrames) {
capturer.stop();
capturer.save(showVideoLink);
}
}
render();
}
Everything works fine, you can test it yourself if you want by cloning the repo.
Right now animation rendering happens in client side, I would like this animation rendering to happen in the backend side
What do I need to change to make this animation rendering in backend server side using Nodejs? any help or suggestions will be appreciated.
Since you do all your animations in the canvas, you can use node-canvas to do the same in Node.js. (You have to double check that create.js also work in Node.js, though. If not, find another library or write those routines yourself).
Spawn ffmpeg into it's own process accepting input through a pipe (ffmpeg -i - -f rawvideo -pix_fmt rgba etc. The pipe will probably be different according to which server environment you use). After each frame is drawn, extract the image array using canvas.getContext('2d').getImageData(0, 0, width, height).data and pipe the result to to ffmpeg.