How to display aside subtitles of an embedded youtube video? - javascript

Could you help me please to solve this problem, I'm stuck
I want to display subtitles of an embedded youtube video out of the video player box to allow the user to copy and paste it somewhere in order to easily learn from it.
I've already prepared the subtitle of that video in VTT format and it's locally saved. Now my concern is to display subtitles alongside that video, I mean each sentence spoken must be simultaneously displayed in the subtitle side.
<div class="container">
<div class="row">
<div class="col-md-6">
<div class="thumbnail">
<div class="embed-responsive embed-responsive-4by3">
<iframe class="embed-responsive-item" src="<%= lugha.video %>"></iframe>
</div>
<h4 class="title" style="font-weight: bold;"><%=lugha.title%></h4>
</div>
</div>
<div class="col-md-6">
<div class="well well-lg">
<!-- Subs here -->
<div>
</div>
</div>
Embedded video link
var data = [
{
title: "War room",
video: "https://www.youtube.com/embed/Hk_m7HUoaUA?controls=0&clip=UgkxDvl5kU8Mri2usSYI5v0W5S4zfPw_oWtG&clipt=EO3BAxifswY"
}
One part of the subtitles from a locally saved vtt file
WEBVTT - Some title
00:01.220 --> 00:03.100
You can't have my marriage.
00:03.680 --> 00:05.510
You can't have my daughter.
00:05.760 --> 00:07.575
And you sure can't have my man.
00:08.195 --> 00:10.425
This house is under new management.
00:10.715 --> 00:13.045
And that means you are out.

Using the <track> element
The <track> element is the native way to provide captions etc. for media.
We can use it to parse WebVTT files. The parsed TextTrackCues can be accessed via HTMLTrackElement.track:
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
// `track` now contains all cues
const track = trackElement.track;
function getVttUrl() {
// Some external resources are disallowed in StackOverflow snippets.
// Here I'm blobifying the VTT-file's content:
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
With the cuechange event
We can add the <track> element to a media element, so that it activates the correct cues accordingly.
Setting a TextTrack's mode property to "hidden" hides it, but it can still be used programmatically. We can add a listener for its cuechange event:
const video = document.querySelector("video");
const captions = document.getElementById("captions");
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
video.append(trackElement);
const track = trackElement.track;
track.mode = "hidden";
track.addEventListener("cuechange", () => {
captions.replaceChildren(...Array.from(track.activeCues).map(cue => cue.getCueAsHTML()));
});
function getVttUrl() {
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
<head><base href="https://interactive-examples.mdn.mozilla.net"></head>
<body>
<video controls width=320 src="/media/cc0-videos/friday.mp4"></video>
<p id="captions"></p>
</body>
With the media's timeupdate event
I also tried the video's timeupdate event, since their rate of fire depends on the browser's implementation:
const video = document.querySelector("video");
const captions = document.getElementById("captions");
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
video.append(trackElement);
const track = trackElement.track;
track.mode = "hidden";
video.addEventListener("timeupdate", () => {
captions.replaceChildren(...Array.from(track.activeCues).map(cue => cue.getCueAsHTML()));
});
function getVttUrl() {
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
<head><base href="https://interactive-examples.mdn.mozilla.net"></head>
<body>
<video controls width=320 src="/media/cc0-videos/friday.mp4"></video>
<p id="captions"></p>
</body>
Note: Keep the <track> element added to the media, so that activeCues is still kept live!
With requestAnimationFrame()
Technically, using the requestAnimationFrame() function should provide the most up-to-date captions possible, since it fires before every frame:
const video = document.querySelector("video");
const captions = document.getElementById("captions");
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
video.append(trackElement);
const track = trackElement.track;
track.mode = "hidden";
track.cues; // Accessing `cues` fixes a "not iterable" error (on Chrome)
requestAnimationFrame(function captionsCallback() {
updateCaptions();
requestAnimationFrame(captionsCallback);
});
function updateCaptions() {
captions.replaceChildren(...Array.from(track.activeCues).map(cue => cue.getCueAsHTML()));
}
function getVttUrl() {
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
<head><base href="https://interactive-examples.mdn.mozilla.net"></head>
<body>
<video controls width=320 src="/media/cc0-videos/friday.mp4"></video>
<p id="captions"></p>
</body>
We can decouple it from TextTrack's activeCues by implementing a custom "activeness evaluation" using HTMLMediaElement.currentTime. This may be "even more" live:
const video = document.querySelector("video");
const captions = document.getElementById("captions");
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
video.append(trackElement);
const track = trackElement.track;
track.mode = "hidden";
track.cues; // Accessing `cues` fixes a "not iterable" error (on Chrome)
requestAnimationFrame(function captionsCallback() {
updateCaptions();
requestAnimationFrame(captionsCallback);
});
function updateCaptions() {
const activeCues = Array.from(track.cues).filter(cue => {
// Check according to:
// https://developer.mozilla.org/en-US/docs/Web/API/WebVTT_API#cue_timings (see "Non-overlapping cue timing examples")
return cue.startTime <= video.currentTime && cue.endTime > video.currentTime;
});
captions.replaceChildren(...activeCues.map(cue => cue.getCueAsHTML()));
}
function getVttUrl() {
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
<head><base href="https://interactive-examples.mdn.mozilla.net"></head>
<body>
<video controls width=320 src="/media/cc0-videos/friday.mp4"></video>
<p id="captions"></p>
</body>
Comparing the methods
Here you can try the methods from above yourself:
const video = document.querySelector("video");
const trackElement = document.createElement("track");
trackElement.src = getVttUrl();
video.append(trackElement);
const track = trackElement.track;
track.mode = "hidden";
(function captionsCuechange() {
const captions = document.getElementById("cap-cuechange");
track.addEventListener("cuechange", () => {
captions.replaceChildren(...Array.from(track.activeCues).map(cue => cue.getCueAsHTML()));
});
})();
(function captionsTimeupdate() {
const captions = document.getElementById("cap-timeupdate");
video.addEventListener("timeupdate", () => {
captions.replaceChildren(...Array.from(track.activeCues).map(cue => cue.getCueAsHTML()));
});
})();
(function captionsRequestAnimationFrame() {
const captions = document.getElementById("cap-frame");
track.cues; // Accessing `cues` fixes a "not iterable" error (on Chrome)
requestAnimationFrame(function captionsCallback() {
updateCaptions();
requestAnimationFrame(captionsCallback);
});
function updateCaptions() {
const cuesAsHTML = Array.from(track.activeCues).map(cue => cue.getCueAsHTML());
captions.replaceChildren(...cuesAsHTML);
}
})();
(function captionsRequestAnimationFrameAndCurrentTime() {
const captions = document.getElementById("cap-frame-current");
track.cues; // Accessing `cues` fixes a "not iterable" error (on Chrome)
requestAnimationFrame(function captionsCallback() {
updateCaptions();
requestAnimationFrame(captionsCallback);
});
function updateCaptions() {
const activeCues = Array.from(track.cues).filter(cue => {
return cue.startTime <= video.currentTime && cue.endTime > video.currentTime;
});
const cuesAsHTML = activeCues.map(cue => cue.getCueAsHTML());
captions.replaceChildren(...cuesAsHTML);
}
})();
function getVttUrl() {
const vtt = `WEBVTT
00:00:00.000 --> 00:00:00.999 line:80%
Hildy!
00:00:01.000 --> 00:00:01.499 line:80%
How are you?
00:00:01.500 --> 00:00:02.999 line:80%
Tell me, is the lord of the universe in?
00:00:03.000 --> 00:00:04.299 line:80%
Yes, he's in - in a bad humor
00:00:04.300 --> 00:00:06.000 line:80%
Somebody must've stolen the crown jewels
`;
const vttBlob = new Blob([vtt], { type: "text/vtt" });
return URL.createObjectURL(vttBlob);
}
.caption {min-height: 1lh}
<head><base href="https://interactive-examples.mdn.mozilla.net"></head>
<body>
<video controls width=320 src="/media/cc0-videos/friday.mp4"></video>
<div>
<p>On <code>cuechange</code>:</p>
<p id="cap-cuechange" class="caption"></p>
</div>
<div>
<p>On <code>timeupdate</code>:</p>
<p id="cap-timeupdate" class="caption"></p>
</div>
<div>
<p>With <code>requestAnimationFrame()</code>:</p>
<p id="cap-frame" class="caption"></p>
</div>
<div>
<p>With <code>requestAnimationFrame()</code> and <code>currentTime</code>:</p>
<p id="cap-frame-current" class="caption"></p>
</div>
</body>
Also try scrubbing the timeline and see how the captions react!
Note: You may want to try the custom activeness check with the other events as well.

Related

How to redirect webpage if copyright text removed using JavaScript or jQuery [duplicate]

I have some HTML template, that has developed by HTML CSS Javascript and Jquery. I want to add a copyright text to all of my theme. But there's a fact. If anybody remove the code or text, the theme will be automatically redirect to an URL. I saw this system in Blogger/Blogspot templates. They use a copyright text in all theme footer (Paid and Free). If anybody remove the code/make it invisible (visibility:hidden) or (display:none), the theme automatically redirect. They did it with just jquery and javascript.
How I can do it..?
This will check for any changes within the footer. Try running it, and use your browser's editor to either delete or edit the stuff inside <footer> OR you can also try to change the CSS on <div class="copyright">. The setInterval() timer is set to execute every 10 seconds.
let footer = document.querySelector('footer');
let copyright = document.getElementById('copyright');
let originalFooter = footer.innerHTML;
let originalCopyrightStyle = String.toString(getComputedStyle(copyright));
function checkFooter() {
let currentFooter = footer.innerHTML;
let currentCopyrightStyle = String.toString(getComputedStyle(copyright));
if ((currentFooter !== originalFooter) ||
(currentCopyrightStyle !== originalCopyrightStyle)) {
location.href = `https://www.youtube.com/`;
}
}
setInterval(() => {
checkFooter()
}, 10000);
<footer>
<div id="copyright">
This is my copyright, do not change it!
</div>
</footer>
W3Schools:
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="/style.css">
<title>Document</title>
</head>
<body>
<footer>
<div id="copyright">
This is my copyright, do not change it!
</div>
</footer>
</body>
<script>
let footer = document.querySelector('footer');
let copyright = document.getElementById('copyright');
let originalFooter = footer.innerHTML;
let originalCopyrightStyle = String.toString(getComputedStyle(copyright));
function checkFooter() {
let currentFooter = footer.innerHTML;
let currentCopyrightStyle = String.toString(getComputedStyle(copyright));
if ((currentFooter !== originalFooter) ||
(currentCopyrightStyle !== originalCopyrightStyle)) {
location.href = `https://www.youtube.com/`;
}
}
setInterval(() => {
checkFooter()
console.log('check');
}, 1000);
</script>
</html>
Use an IntersectionObserver and check the element's intersectionRatio to determine whether it's been removed/hidden:
btn.addEventListener('click', () => copyright.remove())
btn1.addEventListener('click', () => copyright.style.display = "none")
const observer = new IntersectionObserver((elems, obs) => {
if(elems[0].intersectionRatio == 0) {
console.log('elem hidden: redirect')
}
})
observer.observe(copyright)
<footer>
<span id='copyright'> Copyright 2023 - Theme Title </span> | All Rights Reserved.
</footer>
<button id="btn">Remove copyright</button>
<button id="btn1">Hide copyright</button>

Nextjs/React/JS How compress video file uploaded by user through input (client side preferred)

I have made a basic app where the user can upload a video file through an input.
As you can see in the code i also retrieve the duration and the size of the initial video.
Now the question is, how i can compress the video file in the function "compressvid" so that the size of the video becomes massively smaller (at later stage i want to upload these videos to firebase firestore).
I'e read something about ffmpeg but wasn't able to figure out how to use it here.
I prefer it to be client side as the videos a client can upload are at max 30sec long.
If client side is not possible how would it work server side?
import Head from 'next/head'
import styles from '../styles/Home.module.css'
import { useState, useEffect } from 'react'
export default function Home() {
const [videofile, setVideo] = useState("")
const [viddur, setviddur] = useState("")
useEffect(() => {
// only run this if videofile exists
if (videofile != "") {
console.log("compress video now ")
console.log(videofile.type)
// get duration of video by creating a theoretical video component
var video = document.createElement('video');
video.preload = 'metadata';
video.onloadedmetadata = function() {
window.URL.revokeObjectURL(video.src);
// here now can check if video is too long
setviddur(video.duration)
}
video.src = URL.createObjectURL(videofile)
}
}, [videofile]);
const clickedvideo = () => {
console.log("clicked video")
}
const compressvid = () => {
// here need to compress the video so that the size is smaller: preferred client-side; if that's not possible howis it posssible server side or with a cheap api
}
return (
<div className={styles.container}>
<Head>
<title>Video Compressor</title>
<meta name="description" content="Video compressor" />
<link rel="icon" href="/favicon.ico" />
</Head>
<main className={styles.main}>
<h1 className={styles.title}>
Video compressor
</h1>
<p>Size of video before: {videofile.size}</p>
<p>Duration of video: {viddur}</p>
<p>Size of video after: { }</p>
<input className={styles.videoinput} id="myvideo" type="file" accept="video/mp4,video/x-m4v,video/*" onChange={(e) => setVideo(e.target.files[0])}></input>
<div>
{(videofile != "") ? <video autoPlay loop id="video" src={URL.createObjectURL(videofile)} onClick={clickedvideo} width="300px" height="300px" ></video> : null}
</div>
</main>
</div>
)
}
I tried to compress a video uploaded by a user but didn't figure out how to solve it.

How Can I use TawkTo on specific components only in react

I am using the Tawkto script in my index.html file in my react project but I don't want it to be accessible in all components. How do I do this.
This is all I know so far.
<!--Start of Tawk.to Script-->
<!-- <script type="text/javascript">
var Tawk_API = Tawk_API || {},
Tawk_LoadStart = new Date()
;(function () {
var s1 = document.createElement('script'),
s0 = document.getElementsByTagName('script')[0]
s1.async = true
s1.src = 'https://embed.tawk.to/propertyId/tawkId'
s1.charset = 'UTF-8'
s1.setAttribute('crossorigin', '*')
s0.parentNode.insertBefore(s1, s0)
})()
</script> -->
<!--End of Tawk.to Script-->
Please anyone should help me

How to implement custom Tensorflow.js models into webpage?

I would like to create a website that can classify different types of cars. I have managed to get the website working to use the mobile net model, but I would like to use a custom model that I trained in google colab and then converted into javascript. Does anyone know how I could achieve this?
Here is the javascript code:
// Defining Variables
const webcamElement = document.getElementById('webcam');
let net;
var webcamrunning = false; // Flag, indicates if webcam-prediction is running or not
var bw = document.getElementById('butwebcam')
var bi = document.getElementById('butimage')
// App that predicts image
async function app() {
console.log('Loading mobilenet..');
const uploadJSONInput = document.getElementById('upload-json');
const model = await tf.loadLayersModel(tf.io.browserFiles([uploadJSONInput.files[0]]));
// Check if model loaded, if not, load it.
if (net == undefined)
{bi.innerHTML = 'Wait for Initiation...';
net = await model.load();
console.log('Sucessfully loaded model');
bi.innerHTML = 'Predict'}
else {console.log('Model already loaded')};
// Make a prediction through the model on our image.
const imgEl = document.getElementById('output');
const result = await net.classify(imgEl);
document.getElementById('console_pic').innerText =
`Prediction: ${result[0].className}
Probability: ${Math.round(result[0].probability*100)} %
`;
}
// Function that activates (starts webcam app) and deactivates the Webcam-Prediction
function start_webcam(){
if (webcamrunning == false)
{app_webcam();
}
else {webcamrunning = false;
bw.innerHTML = 'Activate Predicting';
};
};
// Setup Webcam
async function setupWebcam() {
return new Promise((resolve, reject) => {
const navigatorAny = navigator;
navigator.getUserMedia = navigator.getUserMedia ||
navigatorAny.webkitGetUserMedia || navigatorAny.mozGetUserMedia ||
navigatorAny.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({video: true},
stream => {
webcamElement.srcObject = stream;
webcamElement.addEventListener('loadeddata', () => resolve(), false);
},
error => reject());
} else {
reject();
}
});
}
// Webcam application
async function app_webcam() {
console.log('Loading mobilenet..');
// Check if model loaded, if not, load it.
if (net == undefined)
{bw.innerHTML = 'Wait for Initiation...';
net = await mobilenet.load();
console.log('Sucessfully loaded model');}
else {console.log('Model already loaded')};
await setupWebcam();
webcamrunning =true;
bw.innerHTML = 'Stop Predicting';
while (webcamrunning) {
const result = await net.classify(webcamElement);
document.getElementById('console_vid').innerText =
`Prediction: ${result[0].className}
Probability: ${Math.round(result[0].probability*100)} %
`;
// Give some breathing room by waiting for the next animation frame to
// fire.
await tf.nextFrame();
}
}
;
Here is the html code:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel='stylesheet' href='styles.css'/>
<input type="file" id="upload-json" src="C:\Users\USER\Desktop\ImageClassifier-master\model\model.json"/>
<!-- Load the latest version of TensorFlow.js -->
<script src="https://unpkg.com/#tensorflow/tfjs"></script>
<script src="https://unpkg.com/#tensorflow-models/mobilenet"></script>
<title> Image Classifier with MobileNet </title>
</head>
<body>
<img style='margin-top: -6px; z-index: 19;' id="header" height ="320">
<h1 style='margin-top: -35px'> What car is that?</h1>
<br>
<hr/>
<br>
<em> <strong> </strong> </em>
<br>
<br>
<hr/>
<br>
<h2> Upload your own Picture</h2>
<!-- Upload Function with File Preview -->
<input type="file" accept=".png, .jpg, .jpeg" height="200"
onchange="document.getElementById('output').src = window.URL.createObjectURL(this.files[0])">
<!-- Predict button, calls predict function in Javascript -->
<button id="butimage" onclick="app()"> Predict! </button>
<!-- Window for Picture Preview -->
<div class = "window">
<span class="helper"></span>
<img class="center" id="output" alt="your image" src = "img/example.jpg" />
</div>
<div class = "result" id="console_pic">Result</div>
<br>
<hr/>
<br>
<br>
<br>
<br>
<script src="index.js"></script>
</body>
</html>

Create Volume Control with JavaScript and HTML 5

I am having trouble understanding an assignment that my lecturer gave me. He supplies us with a program that is supposed to record from the computer microphone and tells us, to build a volume control for it that controls the db for the loudspeaker. It just confuses me, because there is, as far as I can tell, no loudspeaker involved in this program.
<html>
<head>
<title>Audio Renderer -chrome</title>
<style>
</style>
</head>
<body>
<h1>HTML5 webmic-Renderer </h1>
<h4>Chrome</h4>
<pre id="preLog">Access to micro</pre>
<p>
<input type="button" id="buttonStart" value="Start" onclick="start()" />
<input type="button" id="buttonStop" value="Stop" onclick="stop()" />
</p>
<script>
var audioContext = new webkitAudioContext();
var realAudioInput = null;
var preLog ;
var zeroGain;
var channel = 2;
var bufferSize =1024;
function log(text){
preLog = document.getElementById('preLog');
if (preLog) preLog.textContent += ('\n' + text);
else alert(text);
}
function start() {
log('Get user media..');
if (navigator.webkitGetUserMedia) navigator.webkitGetUserMedia({audio:true}, gotStream, noStream);
else log('getUserMedia() not available from your Web browser!');
}
function noStream() {
log('Access to Micro was denied!');
}
function gotStream(stream) {
log('Access to Micro was started');
// Create an AudioNode from the stream.
realAudioInput = audioContext.createMediaStreamSource(stream);
// Create an GainNode .
zeroGain = audioContext.createGain();
zeroGain.gain.value = 1.0;
// create an audio node with 2 input and 1 output channels, and 1024 byte buffer size per audio frame
jsNode = audioContext.createScriptProcessor(bufferSize, channel, channel-1);
jsNode.onaudioprocess = audioProcess;
// Signal Graph
realAudioInput.connect( jsNode );
// zeroGain.connect(??);
jsNode.connect( audioContext.destination );
}
function stop() {
log('Access to Micro stopped');
realAudioInput.disconnect(0);
}
// this function is called every audio frame
function audioProcess(event) {
var sampleIn_l = event.inputBuffer.getChannelData(channel-2); // Stereo: 0 = left channel, 1 = right channel
var sampleIn_r = event.inputBuffer.getChannelData(channel-1);
var sampleOut = event.outputBuffer.getChannelData(channel-2);
// loop through every sample and add sample values to out buffer
for(i = 0; i < event.inputBuffer.length; i++) {
var sample_l = sampleIn_l[i] ;
var sample_r = sampleIn_r[i] ;
sampleOut[i] = ( sample_l );
}
}
</script>
</body>
</html>
It says in his assignment: Create for the program Audio Renderer an interactive volume control for the loudspeaker on node-level: zeroGain.gain.value = 1.0;
I just don't understand what he wants from us. I would be so glad if anyone could help :)
Thank you very much for reading!

Categories