Adding Quality Selector to plyr when using HLS Stream - javascript

I am using plyr as wrapper around HTML5 video tag and using Hls.js to stream my .m3u8 video .
I was going around a lot of issues on plyr to enable quality selectors and came arounf multiple PR's which had this question but was closed saying the implementation is merged, till i came around this PR which says it's still open, but there was a custom implementation in the Comments which assured that it works . I was trying that implementation locally in order to check if we can add a quality selector but seems like i am missing something/ or the implementation dosent work .
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>HLS Demo</title>
<link rel="stylesheet" href="https://cdn.plyr.io/3.5.10/plyr.css" />
<style>
body {
max-width: 1024px;
}
</style>
</head>
<body>
<video preload="none" id="player" autoplay controls crossorigin></video>
<script src="https://cdn.plyr.io/3.5.10/plyr.js"></script>
<script src="https://cdn.jsdelivr.net/hls.js/latest/hls.js"></script>
<script>
(function () {
var video = document.querySelector('#player');
var playerOptions= {
quality: {
default: '720',
options: ['720']
}
};
var player;
player = new Plyr(video,playerOptions);
if (Hls.isSupported()) {
var hls = new Hls();
hls.loadSource('https://content.jwplatform.com/manifests/vM7nH0Kl.m3u8');
//hls.loadSource('https://test-streams.mux.dev/x36xhzz/x36xhzz.m3u8');
hls.attachMedia(video);
hls.on(Hls.Events.MANIFEST_PARSED,function(event,data) {
// uncomment to see data here
// console.log('levels', hls.levels); we get data here but not able to see in settings .
playerOptions.quality = {
default: hls.levels[hls.levels.length - 1].height,
options: hls.levels.map((level) => level.height),
forced: true,
// Manage quality changes
onChange: (quality) => {
console.log('changes',quality);
hls.levels.forEach((level, levelIndex) => {
if (level.height === quality) {
hls.currentLevel = levelIndex;
}
});
}
};
});
}
// Start HLS load on play event
player.on('play', () => hls.startLoad());
// Handle HLS quality changes
player.on('qualitychange', () => {
console.log('changed');
if (player.currentTime !== 0) {
hls.startLoad();
}
});
})();
</script>
</body>
</html>
The above snippet works please run , but also if you uncomment the
line in HLS Manifest you will see we get data in levels and also pass
the data to player options but it dosent come up in settings.How can
we add a quality selector to plyr when using Hls stream .

I made a lengthy comment about this on github [1].
Working example: https://codepen.io/datlife/pen/dyGoEXo
The main idea to fix this is:
Configure Plyr options properly to allow the switching happen.
Let HLS perform the quality switching, not Plyr. Hence, we only need a single source tag in video tag.
<video>
<source
type="application/x-mpegURL"
<!-- contain all the stream -->
src="https://bitdash-a.akamaihd.net/content/sintel/hls/playlist.m3u8">
</video>
[1] https://github.com/sampotts/plyr/issues/1741#issuecomment-640293554

Related

Javascript MediaSource API and H264 video

I have a problem playing a H264 video using javascript MediaSource Extension API.
I'll describe the scenario with details below.
I've already successfully achieved the result playing audio and video source
of vp8, vp9, opus and vorbis codec, also from a range request ( if server has the capability, using any byte range ) or chunked files, chunks done using shaka packager.
The problem comes when the source is an H264 video, in details in my case
codecs are avc1.64001e and mp4a.40.2, full codec string is
video/mp4;codecs="avc1.64001e, mp4a.40.2" but the issue still happens with any other avc1 codec.
What I am trying to do is to play a 10 megabytes chunk of the full video,
chunk generated by a byterange curl request saving the response locally using -o.
Below the stream info from shaka packager passing this file as input
[0530/161459:INFO:demuxer.cc(88)] Demuxer::Run() on file '10mega.mp4'.
[0530/161459:INFO:demuxer.cc(160)] Initialize Demuxer for file '10mega.mp4'.
File "10mega.mp4":
Found 2 stream(s).
Stream [0] type: Video
codec_string: avc1.64001e
time_scale: 17595
duration: 57805440 (3285.3 seconds)
is_encrypted: false
codec: H264
width: 720
height: 384
pixel_aspect_ratio: 1:1
trick_play_factor: 0
nalu_length_size: 4
Stream [1] type: Audio
codec_string: mp4a.40.2
time_scale: 44100
duration: 144883809 (3285.3 seconds)
is_encrypted: false
codec: AAC
sample_bits: 16
num_channels: 2
sampling_frequency: 44100
language: und
Packaging completed successfully.
The chunk is playable with external media player applications ( like VLC )
and more important, it plays without problem adding it to the webpage using the < source > tag.
This is the error I can see in the Chrome console
Uncaught (in promise) DOMException: Failed to load because no supported source was found.
Here below the html and js code if you want to reproduce ( I did all local tests using the built-in php7.2 dev server )
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>VideoTest</title>
<link rel="icon" href="/favicon.ico" />
<script type="text/javascript" src="/script.js"></script>
<style>
video {
width: 98%;
height: 300px;
border: 0px solid #000;
display: flex;
}
</style>
</head>
<body>
<div id="videoContainer">
<video controls></video>
</div>
<video controls>
<source src="/media/10mega.mp4" type="video/mp4">
</video>
</body>
</html>
And here below the JS code ( scripjs )
class MediaTest {
constructor() {
}
init(link) {
this.link = link;
this.media = new MediaSource();
this.container = document.getElementsByTagName('video')[0];
this.container.src = window.URL.createObjectURL(this.media);
return new Promise(resolve => {
this.media.addEventListener('sourceopen', (e) => {
this.media = e.target;
return resolve(this);
});
});
}
addSourceBuffer() {
let codec = 'video/mp4;codecs="avc1.64001e, mp4a.40.2"';
let sourceBuffer = this.media.addSourceBuffer(codec);
// These are the same headers sent by the < source > tag
// with or without the issue remains
let headers = new Headers({
'Range': 'bytes=0-131072',
'Accept-Encoding': 'identity;q=1, *;q=0'
});
let requestData = {
headers: headers
};
let request = new Request(this.link, requestData);
return new Promise(resolve => {
fetch(request).then((response) => {
if(200 !== response.status) {
throw new Error('addSourceBuffer error with status ' + response.status);
}
return response.arrayBuffer();
}).then((buffer) => {
sourceBuffer.appendBuffer(buffer);
console.log('Buffer appended');
return resolve(this);
}).catch(function(e) {
console.log('addSourceBuffer error');
console.log(e);
});
});
}
play() {
this.container.play();
}
}
window.addEventListener('load', () => {
let media = new MediaTest();
media.init('/media/10mega.mp4').then(() => {
console.log('init ok');
return media.addSourceBuffer();
}).then((obj) => {
console.log('play');
media.play();
});
});
What I want to achieve is to play the file with MediaSource API since it plays well using < source > tag.
I don't want to demux and re-encode it, but use it as is.
Here below the error dump taken from chrome://media-internals
render_id: 180 player_id: 11 pipeline_state: kStopped event:
WEBMEDIAPLAYER_DESTROYED
To reproduce I think it is possible to use any H264 video that has audio and video track within it.
This question is strictly related with this other question I've found H264 video works using src attribute. Same video fails using the MediaSource API (Chromium) but it is from 4 years ago so I decided not to answer there.
Does anybody have some idea about this issue?
Is there any way to solve it or h264 It is just not compatible with MSE?
Thanks in advance
Its not the codec, its the container. MSE requires fragmented mp4 files. Standard mp4 is not supported. for standard mp4 you must use <video src="my.mp4">

OpenCV.JS Uncaught ReferenceError: cv is not defined

I am working on a computer vision project using OpneCV.JS and springboot using Thymeleaf as my HTML5 template engine. I have been following the OpenCJ.Js tutorial here. I am suppose to get two output, one that will display the VideoInput and the other one the canvas that will display the Canvaoutput where the face tracking will take place.
However, the Video display and its working as expected. However, the Canvas display did not show. When I inspect my code in the chrome browser, I realize that I am getting an Uncaught Reference error which says CV is not defined.
Can somebody assist to tell me if their is anything I am doing wrong in my Code.
Below is my code
<!DOCTYPE html>
<html xmlns:th="http://www.thymeleaf.org" xmlns:layout="http://www.ultraq.net.nz/web/thymeleaf/layout" layout:decorate="layout">
<head>
<script async type="text/javascript" th:src="#{/opencv/opencv.js}"></script>
<title>Self-Service Portal</title>
</head>
<body>
<h2>Trying OpenCV Javascript Computer Vision</h2>
<p id="status">Loading with OpenCV.js...</p>
<video id="video" autoplay="true" play width="300" height="225"></video> <br/>
<canvas id="canvasOutput" autoplay="true" width="300" height="225"></canvas>
<!--div>
<div class="inputoutput">
<img id="imageSrc" alt="No Image" />
<div class="caption">ImageScr<input type="file" id="fileInput" name="file" /></div>
</div>
<div class="inputoutput">
<canvas id="canvasOutput" ></canvas>
<div class="caption">canvasOutput</div>
</div>
</div-->
<script type="text/javascript">
navigator.mediaDevices.getUserMedia({
video: true,
audio: false
})
.then(function(stream) {
video.srcObject = stream;
video.play();
})
.catch(function(err) {
console.log("An error occured While accessing media! " + err);
});
let video = document.getElementById('video');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(video);
let faces = new cv.RectVector();
let classifier = new cv.CascadeClassifier();
//load pre-trained classifiers
classifier.load('haarcascade_frontalface_default.xml');
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
src.delete();
dst.delete();
gray.delete();
faces.delete();
classifier.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(src);
src.copyTo(dst);
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
// detect faces.
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
// draw faces.
for (let i = 0; i < faces.size(); ++i) {
let face = faces.get(i);
let point1 = new cv.Point(face.x, face.y);
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
// schedule the next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
catch (err) {
utils.printError(err);
}
};
//schedule the first one.
setTimeout(processVideo, 0);
</script>
<!--script async src="/opencv/opencv.js" onload="onOpenCvReady;" type="text/javascript"></script-->
</body>
</html>
The cv is not defined error can be fixed by assigning cv to the window cv object like, let cv = window.cv
Turning off async would not be ideal because the OpenCV JS library is large and it would affect the time to initial load. Maybe assign a state variable that changes when it finishes loading and run a check on this variable and update the UI accordingly
the code you find in the examples on the opencv page usually focuses on the functionality of the library and sometimes they do not have all the necessary functions to work. The examples on the opencv page usually use a utils.js file that contains the additional functions to work and this is not so obvious.
Most likely your problem will be solved with the answer of this forum.
Additionally I have created a codesandbox that contains all the necessary functions for the video examples to work. Normally they will work for you if you replace the code under the comment
/ ** Your example code here * /
by your facial recognition code.
The async on
<script async type="text/javascript" th:src="#{/opencv/opencv.js}"></script>
...means it won't hold up parsing and processing of the following markup while waiting for the file to load; details. So your inline script later in the file can run before that file is loaded and its JavaScript is run.
Remove async and move that script tag to the bottom of the document, just before your inline script that uses what it defines.

Count number of faces using javascript

Is there a way that I can count the number of faces in a live camera.
For example I've three persons in front of a webcam and I'm having 2 pages say, Success.html and error .html and as part of the underlying way to judge the redirection to Success or error pages the condition would be, if only one face is detected, it should be sent to success, else, it should be sent to error .
I'm using tracking.js to detect the face in my web page and currently my code is as below.
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face with camera</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="js/tracking-min.js"></script>
<script src="js/data/face-min.js"></script>
<script src="js/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
video, canvas {
margin-left: 230px;
margin-top: 120px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p>
tracking.js -
get user's webcam and detect faces
</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="320" height="240" preload autoplay loop
muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.ObjectTracker('face');
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#video', tracker, {
camera : true
});
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data
.forEach(function(rect) {
console.log('face detected');
});
});
</script>
</body>
</html>
Here in my console I'm able to print, face detected.
please let me know how can I detect multiples faces in this window.
Thanks
From the documentation:
myTracker.on('track', function(event) {
if (event.data.length === 0) {
// No targets were detected in this frame.
} else {
event.data.forEach(function(data) {
// Plots the detected targets here.
});
}
});
It seems to be event.data.length that gives you the number of tracked elements.
Fom the docs at https://trackingjs.com/docs.html#trackers
myTracker.on('track', function(event) {
if (event.data.length === 0) {
// No targets were detected in this frame.
} else {
event.data.forEach(function(data) {
// Plots the detected targets here.
});
}
});
Use event.data.length to count the amount of faces.
On your piece of code :
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
// console log amount of elements found
console.log(event.data.length);
event.data.forEach(function(rect) {
console.log('face detected');
});
});

How to make sure the incoming voice from Google tts have fully loaded?

Testing in low network speed, for a proper use of Google tts API from JavaScript, the case is that sometimes it reads and some times it's not, it may read very short text like 'hi', how to make sure the requested audio have load.
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title></title>
<script>
function playit() {
var audio = new Audio();
audio.src = 'http://translate.google.com/translate_tts?ie=UTF-8&tl=en&q=Okay i can read but not alwayes';
//function(){testbeforplaying;}
audio.play();
}
</script>
</head>
<body>
<input type="button" value="say" onclick="playit()"/>
</body>
</html>
Loading audio (or video, or images) is an asynchronous operation so you need to listen to tap into the browser's callback mechanism to know when the file is ready for use.
Update
Added type to the mix, and demo:
Fiddle
In case of audio (and video) you can listen to the canplay event:
function playit() {
var audio = new Audio();
audio.addEventListener('canplay', function() {
this.play();
}, false);
audio.type = 'audio/mpeg';
audio.src = '...';
}
You could also use canplaythrough instead of canplay.
Optionally do this:
function playit() {
var audio = new Audio();
audio.preload = 'auto';
audio.autoplay = true;
audio.type = 'audio/mpeg';
audio.src = '...';
}
There seem to be an issue with Chrome though - it cancels flat out:

Improve performance in converting video blob to gif

I have a program(Here) that converts a video blob into a gif using getusermedia. To take advantage of library (GIFEncoder) that let's me convert canvas frames into frames of a gif, I am first drawing the vide blob
to the canvas using ctx.drawImage then I am drawing the frames to the gif.
Is there any more efficient way to do this?
Code:
<!Doctype html>
<html>
<head>
<style type="text/css">
body {
}
</style>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<script type="text/javascript" src="LZWEncoder.js"></script>
<script type="text/javascript" src="NeuQuant.js"></script>
<script type="text/javascript" src="GIFEncoder.js"></script>
<script type="text/javascript" src="b64.js"></script>
</head>
<body>
<title>RecorderGif</title>
<header>
<h1>RecordGif</h1>
</header>
<article>
<video id="video" width="320" height="200" style="display:none" autoplay=""></video>
<section>
<button id="btnStart">Start video</button>
<button id="btnStop">Stop video</button>
<button id="btnSave">Download</button>
</section>
<canvas id="canvas" width="320" height="240"></canvas>
</article>
<script type="text/javascript">//<![CDATA[
var encoder = new GIFEncoder();
encoder.setRepeat(0);
encoder.setDelay(100);
encoder.start();
window.onload = function() {
//Compatibility
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
var data = []
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
btnStart = document.getElementById("btnStart"),
btnStop = document.getElementById("btnStop"),
btnSave = document.getElementById("btnSave")
videoObj = {
video: true,
};
btnStart.addEventListener("click", function() {
var localMediaStream;
if (navigator.getUserMedia) {
navigator.getUserMedia(videoObj, function(stream) {
video.src = (navigator.webkitGetUserMedia) ? window.webkitURL.createObjectURL(stream) : stream;
localMediaStream = stream;
var addFrame = setInterval(function() {
data.push(canvas.toDataURL('image/png'))
},100);
}, function(error) {
console.error("Video capture error: ", error.code);
});
btnStop.addEventListener("click", function() {
clearInterval(addFrame)
localMediaStream.stop();
});
btnSave.addEventListener("click", function() {
for (var i = 0; i < data.length; i++) {
var frame = new Image();
frame.src=data[i]
context.drawImage(frame,0,0)
encoder.addFrame(context);
};
encoder.finish();
var binary_gif = encoder.stream().getData() //notice this is different from the as3gif package!
var data_url = 'data:image/gif;base64,'+encode64(binary_gif);
var gif = window.open('about:blank','','width=320,height=240')
gif.document.location.href=data_url
});
setInterval(function() {context.drawImage(video, 0, 0, 320, 240)},100);
}
});
};
//]]>
</script>
</body>
</html>
As there is no native support for GIF encoding in the browsers you very much depend on JavaScript solutions such as the one you're using.
The only way to improve the encoding is to go through the source code and see if you can optimize parts of it such as utilizing typed arrays (if not), conform to engine specifics knowing what works well with the engine compilers (ie. things like using simulated types, full constructor, non-morphing objects etc.).
Another tip is to encode the GIF when the video is not playing (stepping it forward "manually" for each frame using currentPosition and the event for it) and with an off-screen video element. This may leave some more resources for the browser and the engine to run the script faster (depending partly on the system and its support for hardware decoding video - there will still be an overhead updating the video on-screen which may or may not affect the encoding process).
My 2.5 cents..

Categories