saving captured image to server(web application in javascript) - javascript

I am taking a snapshot of stream and displaying it as a canvas.
How can I save the captured snapshot to file on my server?
Ps: The below code is part of a jsp page and the project is hosted on server(tomcat). I don't want to save it on user system, just on the server hosting project.
<canvas id="snapshot" width=130 height=130 align="right"></canvas>
<video id="player" width="220" height="140" align="right"; controls autoplay></video>
<button id="capture">Capture</button>
<script>
var player = document.getElementById('player');
var snapshotCanvas = document.getElementById('snapshot');
var captureButton = document.getElementById('capture');
var videoTracks;
var handleSuccess = function(stream) {
// Attach the video stream to the video element and autoplay.
player.srcObject = stream;
videoTracks = stream.getVideoTracks();
};
captureButton.addEventListener('click', function() {
var context = snapshot.getContext('2d');
context.drawImage(player, 150, 150,320,240,0,0, snapshotCanvas.width, snapshotCanvas.height);
var imgdata = snapshot.toDataURL("image/png");
var newdata = imgdata.replace(/^data:image\/png/,'data:application/octet-stream');
});
navigator.mediaDevices.getUserMedia({video: true}).then(handleSuccess);
// Stop all video streams.
videoTracks.forEach(function(track) {track.stop()});
</script>

Related

ffmpeg wasm - how to take client-side created mp4 and upload it to the same server hosting the index/js files being used

Ok, so Im an IT guy and kind of a noob on the dev side of the fence. But I've been able to create this ffmpeg wasm page that takes a canvas and converts it to webm / and .mp4 -- what i WANT to do is take the resulting .mp4 file and upload it to the server where the page/js are being served from. is this possible? I will include my source code which is fairly simple and straight forward, I just don't know how to manipulate the resulting mp4 file that ffmpeg spits out (i realize it is happening client side) to be able to push it up to the server (maybe with aupload.php type situation?) the solution can be html/java/php whatever, so long as it takes the mp4 output and gets it onto the server. I'd VERY MUCH appreciate a hand here.
Going to try my best to properly insert the html and js. please bear with me if i've done something wrong, i've never had to -ask- a question on here, usually just look up existing answers.
const { createFFmpeg } = FFmpeg;
const ffmpeg = createFFmpeg({
log: true
});
const transcode = async (webcamData) => {
const message = document.getElementById('message');
const name = 'record.webm';
await ffmpeg.load();
message.innerHTML = 'Start transcoding';
await ffmpeg.write(name, webcamData);
await ffmpeg.transcode(name, 'output.mp4');
message.innerHTML = 'Complete transcoding';
const data = ffmpeg.read('output.mp4');
const video = document.getElementById('output-video');
video.src = URL.createObjectURL(new Blob([data.buffer], { type: 'video/mp4' }));
dl.href = video.src;
dl.innerHTML = "download mp4"
}
fn().then(async ({url, blob})=>{
transcode(new Uint8Array(await (blob).arrayBuffer()));
})
function fn() {
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
// for demo, removed stop() call to capture more than one frame
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res({url, blob}); // resolve both blob and url in an object
myVideo.src = url;
// removed data url conversion for brevity
}
// for demo, draw random lines and then stop recording
var i = 0,
tid = setInterval(()=>{
if(i++ > 20) { // draw 20 lines
clearInterval(tid);
mediaRecorder.stop();
}
let canvas = document.querySelector("canvas");
let cx = canvas.getContext("2d");
cx.beginPath();
cx.strokeStyle = 'green';
cx.moveTo(Math.random()*100, Math.random()*100);
cx.lineTo(Math.random()*100, Math.random()*100);
cx.stroke();
},200)
});
}
<html>
<head>
<script src="https://unpkg.com/#ffmpeg/ffmpeg#0.8.1/dist/ffmpeg.min.js" defer></script>
<script src="canvas2mp4.js" defer></script>
</head>
<body>
here is a canvas<br>
<canvas id="canvas" style="height:100px;width:100px"></canvas><br>
here is a recorded video of the canvas in webM format<br>
<video id="myVideo" controls="controls"></video><br>
here is a transcoded mp4 from the webm above CLIENT SIDE using ffmpeg<br>
<video id="output-video" controls="controls"></video><br>
<a id="dl" href="" download="download.mp4"></a>
<div id="message"></div><br><br>
</body>
</html>

Video capture of canvas element by MediaStream Recording API is not working

I am trying to record and download video of canvas element using official MediaStream Recording API
<!DOCTYPE html>
<html>
<body>
<h1>Lets test mediaRecorder</h1>
<canvas id="myCanvas" width="200" height="100" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML canvas tag.
</canvas>
<script>
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
ctx.font = "30px Arial";
ctx.fillText("Hello World", 10, 50);
const stream = c.captureStream(25);
var recordedChunks = [];
console.log(stream);
var options = { mimeType: "video/webm; codecs=vp9" };
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start();
function handleDataAvailable(event) {
console.log("data-available");
if (event.data.size > 0) {
recordedChunks.push(event.data);
console.log(recordedChunks);
download();
} else {
// ...
}
}
function download() {
var blob = new Blob(recordedChunks, {
type: "video/webm"
});
var url = URL.createObjectURL(blob);
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
a.href = url;
a.download = "test.webm";
a.click();
window.URL.revokeObjectURL(url);
}
// demo: to download after 10 sec
setTimeout(event => {
console.log("stopping");
mediaRecorder.stop();
}, 10000);
</script>
</body>
</html>
code is working and I am able to download test.webm but I guess that does not have any data as I am not seeing any content while playing this file in VLC Media Player
What I am missing to make it working?
You are facing a few bugs here.
First one is a bit excusable, Chrome doesn't generate seekable webm files. This is because of how media files are built and how the MediaRecorder API works. For them to be able to add this information they'd have to keep the chunk where the metadata is in order to add this information when the recording is done.
I'm not too sure what Firefox does differently here, but VLC prefers their file.
An other Chrome bug, a lot less excusable, is that they don't pass a new frame to the MediaRecorder until we draw on the source canvas again.
So since in your case you are not drawing anything after you started the MediaRecorder, you'll get nothing in the output...
To workaround that, simply drawing a frame right before we stop the recorder should have been enough, except that there is nothing letting us know exactly when the browser will push that frame to the recorder...
So the only working workaround here is to draw on the canvas continuously while we record it. The good thing is that it doesn't need to be painting anything new: we can trick the browser in thinking something new was painted by drawing a transparent rectangle.
A final note, while Chrome does support exporting the canvas with transparency, not all browsers can and even when supported most players have a default black background. So be sure to draw yourself a background in an other color when you record it.
All that said, here is a fixed demo:
var c = document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// draw a white background
ctx.fillStyle = "white";
ctx.fillRect(0, 0, c.width, c.height);
ctx.fillStyle = "black";
ctx.font = "30px Arial";
ctx.fillText("Hello World", 10, 50);
const stream = c.captureStream(25);
var recordedChunks = [];
var options = {};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start();
// Chrome requires we draw on the canvas while recording
mediaRecorder.onstart = animationLoop;
function animationLoop() {
// draw nothing, but still draw
ctx.globalAlpha = 0;
ctx.fillRect(0, 0, 1, 1);
// while we're recording
if (mediaRecorder.state !== "inactive") {
requestAnimationFrame(animationLoop);
}
}
// wait for the stop event to export the final video
// the dataavailable can fire before
mediaRecorder.onstop = (evt) => download();
function handleDataAvailable(event) {
recordedChunks.push(event.data);
}
function download() {
var blob = new Blob(recordedChunks, {
type: "video/webm"
});
var url = URL.createObjectURL(blob);
// exporting to a video element for that demo
// the downloaded video will still not work in some programs
// For this one would need to fix the markers using something like ffmpeg.
var video = document.getElementById('video');
video.src = url;
// hack to make the video seekable in the browser
// see https://stackoverflow.com/questions/38443084/
video.onloadedmetadata = (evt) => {
video.currentTime = 10e6;
video.addEventListener("seeked", () => video.currentTime = 0, {
once: true
})
}
}
setTimeout(() => {
console.clear();
mediaRecorder.stop();
}, 10000);
console.log("please wait while recording (10s)");
<h1>Lets test mediaRecorder</h1>
<canvas id="myCanvas" width="200" height="100" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML canvas tag.
</canvas>
<video controls id="video"></video>

HTML5 Canvas captureStream - Problem with hi-res images [duplicate]

I've looked at a sample for Streaming from canvas to video element so I can see that the principle works but i can't get it to play/display a static image in the video.
Here is my code so far with an image borrowed from stackoverflow. How can I change my code to display the canvas as a video?
const canvas = document.getElementById('viewport');
const context = canvas.getContext('2d');
const video = document.getElementById('videoPlayBack');
make_base();
function make_base() {
base_image = new Image();
base_image.onload = function () {
context.drawImage(base_image, 0, 0);
}
base_image.src = "https://cdn.sstatic.net/Img/ico-binoculars.svg?v=d4dbaac4eec9";
}
const stream = canvas.captureStream(25);
video.srcObject = stream;
<canvas id="viewport"></canvas>
<video id="videoPlayBack" playsinline autoplay muted></video>
You are drawing a cross-origin image, this will thus taint the canvas and mute the CanvasCaptureMediaStreamTrack, resulting in no data being passed.
Keep your canvas untainted if you wish to stream it:
const canvas = document.getElementById('viewport');
const context = canvas.getContext('2d');
const video = document.getElementById('videoPlayBack');
make_base();
function make_base() {
base_image = new Image();
base_image.onload = function () {
context.drawImage(base_image, 0, 0, 400, 300);
}
base_image.crossOrigin = "anonymous";
base_image.src = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png";
}
const stream = canvas.captureStream(25);
video.srcObject = stream;
<canvas id="viewport" width="400" height="300"></canvas>
<video id="videoPlayBack" controls playsinline autoplay muted ></video>
You don't get to save from a canvas if you draw "not your images" on it, because of the canvas security model (the moment you draw any image to it that isn't same origin, and doesn't explicitly have a CORS header that okays your use, you no longer have access to the pixels - not through ImageData, not through toDataURL, etc. etc).
To prevent the canvas from flagging itself as "tainted", see https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image for what your options might be.

set canvas data to an input

I want to take a picture from the user and send it to the server without ajax.
I searched and found this code that takes the photo from video and draws it on canvas. now I want to set that image to an input on form but I don't know how.
this is HTML code:
<video id="player" controls autoplay></video>
<button id="capture" onclick="$('#loader').hide()">Capture</button>
<canvas id="canvas" width=320 height=240></canvas>
<input id="image-input">
and this is javascript code :
const player = document.getElementById('player');
const canvas = document.getElementById('canvas');
const context = canvas.getContext('2d');
const captureButton = document.getElementById('capture');
const constraints = {
video: true,
};
captureButton.addEventListener('click', () => {
// Draw the video frame to the canvas.
context.drawImage(player, 0, 0, canvas.width, canvas.height);
player.srcObject.getVideoTracks().forEach(track => track.stop());
var Pic = document.getElementById("canvas").toDataURL("image/png");
Pic = Pic.replace(/^data:image\/(png|jpg);base64,/, "")
});
// Attach the video stream to the video element and autoplay.
navigator.mediaDevices.getUserMedia(constraints)
.then((stream) => {
player.srcObject = stream;
});
now I want to set this image as the image of input.
how can I do that?

How to enable front camera in webview for android

How do you enable the front camera on a Webview? I have enable the features in AndroidManifest.xml
<uses-feature android:name="android.hardware.camera" android:required="true" />
<uses-feature android:name="android.hardware.camera.front" android:required="true" />
The camera is not going to be used for taking photos or recording, just to switch on the front camera.
When I go to the website using the phone browser the phone camera works once allow the prompt message. How can this work with a webview?
In the html file has a Canvas and Video tag that displays webcam It doesn't record or take pictures it just shows you the camera view.
Here is the html code
<canvas id="inCanvas" width="500" height="500" style="display:none"></canvas>
<video id="inputVideo" width="100" height="100" autoplay loop ></video>
It work with webcam but not with webview in android.
I didnt quite understand, but i thing there could one of the following two, what you want.
1) access camera and just show the video on the screen(not capturing
image):
html:
<canvas id='canvas' width='100' height='100'></canvas>
js:
var onFailSoHard = function(e)
{
console.log('failed',e);
}
window.URL = window.URL || window.webkitURL ;
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
var video = document.querySelector('video');
if(navigator.getUserMedia)
{
navigator.getUserMedia({video: true},function(stream) {
video.src = window.URL.createObjectURL(stream);
},onFailSoHard);
}
var canvas = document.getElementById('canvas');
var ctx = canvas.getContext('2d');
setInterval(function(){
ctx.drawImage(video,0,0);
}, 100);
}
2) capture image from the camera:
here is the doc for that.
navigator.camera.getPicture(onSuccess, onFail, { quality: 50,
destinationType: Camera.DestinationType.DATA_URL
});
function onSuccess(imageData) {
var image = document.getElementById('myImage');
image.src = "data:image/jpeg;base64," + imageData;
}
function onFail(message) {
alert('Failed because: ' + message);
}
I would use something similar to the below as a script to access the phone camera.
<script>
var errorCallback = function(e) {
console.log('Rejected!', e);
};
// Not showing vendor prefixes.
navigator.getUserMedia({video: true, audio: true}, function(localMediaStream) {
var video = document.querySelector('video');
video.src = window.URL.createObjectURL(localMediaStream);
// Note: onloadedmetadata doesn't fire in Chrome when using it with getUserMedia.
// See crbug.com/110938.
video.onloadedmetadata = function(e) {
// Ready to go. Do some stuff.
};
}, errorCallback);
</script>
Used the following tutorial to help me.
Hope it sets yuo on the right track :)

Categories