Video to canvas and sending the canvas to Socketio - javascript

I am trying to send the canvas to client, But no luck because not Sure if I am doing the right way... But I when I tried it using the camera on my laptop, the camera work and I can send the local video without a canvas.
What I am trying to do is capture the media video from the html and draw it as a canvas and send the video that capture video canvas to client:
Here is the code that ive tried:
var inputCtx = $( '.input-canvas canvas' )[ 0 ].getContext( '2d' );
var outputCtx = $( '.output-canvas canvas' )[ 0 ].getContext( '2d' );
const video = document.querySelector("video");
video.addEventListener('canplay', () => {
video.srcObject = stream;
drawToCanvas();
socket.emit("broadcaster");
});
function drawToCanvas() {
inputCtx.drawImage( localVideo, 0, 0, width, height );
var pixelData = inputCtx.getImageData( 0, 0, width, height );
outputCtx.putImageData( pixelData, 0, 0 );
requestAnimationFrame( drawToCanvas );
}
socket.on("watcher", id => {
const peerConnection = new RTCPeerConnection(config);
peerConnections[id] = peerConnection;
let stream = outputCtx.streamCapture;
stream.getTracks().forEach(track => peerConnection.addTrack(track, stream));
peerConnection.onicecandidate = event => {
if (event.candidate) {
socket.emit("candidate", id, event.candidate);
}
};
peerConnection
.createOffer()
.then(sdp => peerConnection.setLocalDescription(sdp))
.then(() => {
socket.emit("offer", id, peerConnection.localDescription);
});
});
reciever:
socket.on("broadcaster", () => {
socket.emit("watcher");
});

Related

Three,js- OBJLoader not importing model due to a material error

I am attempting to use threejs to load a model (OBJ file) into the browser, however, there seems to be some issue with the materials. Is there any way I can just set a default material like black so it loads the model? I have tried to set a texture, however, this error originates from inside the OBJLoader library. How can I fix this?
The error is :
An error happened TypeError: Cannot read properties of undefined (reading 'scene')
at Scene.THREE.Mesh.THREE.MeshStandardMaterial.color ((index):168:10)
at Object.onLoad (OBJLoader.js:457:5)
at three.module.js:39650:38
My code is:
<!DOCTYPE html>
<html>
<head>
<title>
three.js WebRTC template
</title>
<meta name='viewport' content='initial-scale=1,maximum-scale=1,user-scalable=no' />
<script src="js/libs/socket.io.min.js"></script>
<script src="js/libs/three.min.js"></script>
<script src="resources/threejs/r105/js/controls/OrbitControls.js"></script>
<script src="js/libs/firstPersonControls.js"></script>
<script src="js/libs/simplepeer.min.js"></script>
<script src="js/environment.js"></script>
<script type="importmap">
{
"imports": {
"three": "https://unpkg.com/three#0.138.0/build/three.module.js",
"OrbitControls": "https://unpkg.com/three#0.138.0/examples/jsm/controls/OrbitControls.js"
}
}
</script>
<script type="module">
import { Group , ObjectLoader } from 'three';
//import MODEL from './park.obj';
import * as THREE from 'https://unpkg.com/three#0.138.0/build/three.module.js';
import {OBJLoader} from 'https://unpkg.com/three#0.138.0/examples/jsm/loaders/OBJLoader.js'
//import obj from "./park.obj"
// import { loadScene } from '../../Loaders/loader';
// instantiate a loader
class Scene {
constructor() {
//THREE scene
this.scene = new THREE.Scene();
const loader = new OBJLoader();
// load a resource
loader.load(
// resource URL
'./park.obj',
// called when resource is loaded
function ( object ) {
console.log(object)
object.traverse( function ( child ) {
if ( child instanceof THREE.Mesh ) {
const texture = new THREE.TextureLoader().load( "assets/texture.png" );
child.material.map = texture;
}
} );
this.scene.add( object );
},
// called when loading is in progresses
function ( xhr ) {
console.log( ( xhr.loaded / xhr.total * 100 ) + '% loaded' );
},
// called when loading has errors
function ( error ) {
console.log( 'An error happened', error );
}
);
// Floor
const floor = new THREE.Mesh(
new THREE.PlaneBufferGeometry(20, 20),
new THREE.MeshStandardMaterial({ color: "#a9c388" })
);
floor.position.y = 0;
floor.rotation.x = -Math.PI * 0.5;
this.scene.add(floor);
//Utility
this.width = window.innerWidth;
this.height = window.innerHeight * 0.9;
// lerp value to be used when interpolating positions and rotations
this.lerpValue = 0;
//THREE Camera
this.camera = new THREE.PerspectiveCamera(
50,
this.width / this.height,
0.1,
5000
);
this.camera.position.set(0, 3, 6);
this.scene.add(this.camera);
// create an AudioListener and add it to the camera
this.listener = new THREE.AudioListener();
this.camera.add(this.listener);
//THREE WebGL renderer
this.renderer = new THREE.WebGLRenderer({
antialiasing: true,
});
this.renderer.setClearColor(new THREE.Color("lightblue"));
this.renderer.setSize(this.width, this.height);
// add controls:
this.controls = new FirstPersonControls(this.scene, this.camera, this.renderer);
//Push the canvas to the DOM
let domElement = document.getElementById("canvas-container");
domElement.append(this.renderer.domElement);
//Setup event listeners for events and handle the states
window.addEventListener("resize", (e) => this.onWindowResize(e), false);
// Helpers
this.scene.add(new THREE.GridHelper(500, 500));
this.scene.add(new THREE.AxesHelper(10));
//createEnvironment(this.scene);
// Start the loop
this.frameCount = 0;
this.update();
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Lighting 💡
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Clients 👫
// add a client meshes, a video element and canvas for three.js video texture
addClient(id) {
let videoMaterial = makeVideoMaterial(id);
let otherMat = new THREE.MeshNormalMaterial();
let head = new THREE.Mesh(new THREE.BoxGeometry(1, 1, 1), [otherMat,otherMat,otherMat,otherMat,otherMat,videoMaterial]);
// set position of head before adding to parent object
head.position.set(0, 0, 0);
// https://threejs.org/docs/index.html#api/en/objects/Group
var group = new THREE.Group();
group.add(head);
// add group to scene
this.scene.add(group);
peers[id].group = group;
peers[id].previousPosition = new THREE.Vector3();
peers[id].previousRotation = new THREE.Quaternion();
peers[id].desiredPosition = new THREE.Vector3();
peers[id].desiredRotation = new THREE.Quaternion();
}
removeClient(id) {
this.scene.remove(peers[id].group);
}
// overloaded function can deal with new info or not
updateClientPositions(clientProperties) {
this.lerpValue = 0;
for (let id in clientProperties) {
if (id != mySocket.id) {
peers[id].previousPosition.copy(peers[id].group.position);
peers[id].previousRotation.copy(peers[id].group.quaternion);
peers[id].desiredPosition = new THREE.Vector3().fromArray(
clientProperties[id].position
);
peers[id].desiredRotation = new THREE.Quaternion().fromArray(
clientProperties[id].rotation
);
}
}
}
interpolatePositions() {
this.lerpValue += 0.1; // updates are sent roughly every 1/5 second == 10 frames
for (let id in peers) {
if (peers[id].group) {
peers[id].group.position.lerpVectors(peers[id].previousPosition,peers[id].desiredPosition, this.lerpValue);
peers[id].group.quaternion.slerpQuaternions(peers[id].previousRotation,peers[id].desiredRotation, this.lerpValue);
}
}
}
updateClientVolumes() {
for (let id in peers) {
let audioEl = document.getElementById(id + "_audio");
if (audioEl && peers[id].group) {
let distSquared = this.camera.position.distanceToSquared(
peers[id].group.position
);
if (distSquared > 500) {
audioEl.volume = 0;
} else {
// from lucasio here: https://discourse.threejs.org/t/positionalaudio-setmediastreamsource-with-webrtc-question-not-hearing-any-sound/14301/29
let volume = Math.min(1, 10 / distSquared);
audioEl.volume = volume;
}
}
}
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Interaction 🤾‍♀️
getPlayerPosition() {
// TODO: use quaternion or are euler angles fine here?
return [
[
this.camera.position.x,
this.camera.position.y,
this.camera.position.z,
],
[
this.camera.quaternion._x,
this.camera.quaternion._y,
this.camera.quaternion._z,
this.camera.quaternion._w,
],
];
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Rendering 🎥
update() {
requestAnimationFrame(() => this.update());
this.frameCount++;
//updateEnvironment();
if (this.frameCount % 25 === 0) {
this.updateClientVolumes();
}
this.interpolatePositions();
this.controls.update();
this.render();
}
render() {
this.renderer.render(this.scene, this.camera);
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Event Handlers 🍽
onWindowResize(e) {
this.width = window.innerWidth;
this.height = Math.floor(window.innerHeight * 0.9);
this.camera.aspect = this.width / this.height;
this.camera.updateProjectionMatrix();
this.renderer.setSize(this.width, this.height);
}
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Utilities
function makeVideoMaterial(id) {
let videoElement = document.getElementById(id + "_video");
let videoTexture = new THREE.VideoTexture(videoElement);
let videoMaterial = new THREE.MeshBasicMaterial({
map: videoTexture,
overdraw: true,
side: THREE.DoubleSide,
});
return videoMaterial;
}
/*
*
* This uses code from a THREE.js Multiplayer boilerplate made by Or Fleisher:
* https://github.com/juniorxsound/THREE.Multiplayer
* And a WEBRTC chat app made by Mikołaj Wargowski:
* https://github.com/Miczeq22/simple-chat-app
*
* Aidan Nelson, April 2020
*
*/
// socket.io
let mySocket;
// array of connected clients
let peers = {};
// Variable to store our three.js scene:
let myScene;
// set video width / height / framerate here:
const videoWidth = 80;
const videoHeight = 60;
const videoFrameRate = 15;
// Our local media stream (i.e. webcam and microphone stream)
let localMediaStream = null;
// Constraints for our local audio/video stream
let mediaConstraints = {
audio: true,
video: {
width: videoWidth,
height: videoHeight,
frameRate: videoFrameRate,
},
};
////////////////////////////////////////////////////////////////////////////////
// Start-Up Sequence:
////////////////////////////////////////////////////////////////////////////////
window.onload = async () => {
console.log("Window loaded.");
// first get user media
localMediaStream = await getMedia(mediaConstraints);
createLocalVideoElement();
// then initialize socket connection
initSocketConnection();
// finally create the threejs scene
console.log("Creating three.js scene...");
myScene = new Scene();
// start sending position data to the server
setInterval(function () {
mySocket.emit("move", myScene.getPlayerPosition());
}, 200);
};
////////////////////////////////////////////////////////////////////////////////
// Local media stream setup
////////////////////////////////////////////////////////////////////////////////
// https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
async function getMedia(_mediaConstraints) {
let stream = null;
try {
stream = await navigator.mediaDevices.getUserMedia(_mediaConstraints);
} catch (err) {
console.log("Failed to get user media!");
console.warn(err);
}
return stream;
}
////////////////////////////////////////////////////////////////////////////////
// Socket.io
////////////////////////////////////////////////////////////////////////////////
// establishes socket connection
function initSocketConnection() {
console.log("Initializing socket.io...");
mySocket = io();
mySocket.on("connect", () => {
console.log("My socket ID:", mySocket.id);
});
//On connection server sends the client his ID and a list of all keys
mySocket.on("introduction", (otherClientIds) => {
// for each existing user, add them as a client and add tracks to their peer connection
for (let i = 0; i < otherClientIds.length; i++) {
if (otherClientIds[i] != mySocket.id) {
let theirId = otherClientIds[i];
console.log("Adding client with id " + theirId);
peers[theirId] = {};
let pc = createPeerConnection(theirId, true);
peers[theirId].peerConnection = pc;
createClientMediaElements(theirId);
myScene.addClient(theirId);
}
}
});
// when a new user has entered the server
mySocket.on("newUserConnected", (theirId) => {
if (theirId != mySocket.id && !(theirId in peers)) {
console.log("A new user connected with the ID: " + theirId);
console.log("Adding client with id " + theirId);
peers[theirId] = {};
createClientMediaElements(theirId);
myScene.addClient(theirId);
}
});
mySocket.on("userDisconnected", (clientCount, _id, _ids) => {
// Update the data from the server
if (_id != mySocket.id) {
console.log("A user disconnected with the id: " + _id);
myScene.removeClient(_id);
removeClientVideoElementAndCanvas(_id);
delete peers[_id];
}
});
mySocket.on("signal", (to, from, data) => {
// console.log("Got a signal from the server: ", to, from, data);
// to should be us
if (to != mySocket.id) {
console.log("Socket IDs don't match");
}
// Look for the right simplepeer in our array
let peer = peers[from];
if (peer.peerConnection) {
peer.peerConnection.signal(data);
} else {
console.log("Never found right simplepeer object");
// Let's create it then, we won't be the "initiator"
// let theirSocketId = from;
let peerConnection = createPeerConnection(from, false);
peers[from].peerConnection = peerConnection;
// Tell the new simplepeer that signal
peerConnection.signal(data);
}
});
// Update when one of the users moves in space
mySocket.on("positions", (_clientProps) => {
myScene.updateClientPositions(_clientProps);
});
}
////////////////////////////////////////////////////////////////////////////////
// Clients / WebRTC
////////////////////////////////////////////////////////////////////////////////
// this function sets up a peer connection and corresponding DOM elements for a specific client
function createPeerConnection(theirSocketId, isInitiator = false) {
console.log('Connecting to peer with ID', theirSocketId);
console.log('initiating?', isInitiator);
let peerConnection = new SimplePeer({ initiator: isInitiator })
// simplepeer generates signals which need to be sent across socket
peerConnection.on("signal", (data) => {
// console.log('signal');
mySocket.emit("signal", theirSocketId, mySocket.id, data);
});
// When we have a connection, send our stream
peerConnection.on("connect", () => {
// Let's give them our stream
peerConnection.addStream(localMediaStream);
console.log("Send our stream");
});
// Stream coming in to us
peerConnection.on("stream", (stream) => {
console.log("Incoming Stream");
updateClientMediaElements(theirSocketId, stream);
});
peerConnection.on("close", () => {
console.log("Got close event");
// Should probably remove from the array of simplepeers
});
peerConnection.on("error", (err) => {
console.log(err);
});
return peerConnection;
}
// temporarily pause the outgoing stream
function disableOutgoingStream() {
localMediaStream.getTracks().forEach((track) => {
track.enabled = false;
});
}
// enable the outgoing stream
function enableOutgoingStream() {
localMediaStream.getTracks().forEach((track) => {
track.enabled = true;
});
}
////////////////////////////////////////////////////////////////////////////////
// Three.js
////////////////////////////////////////////////////////////////////////////////
function onPlayerMove() {
// console.log('Sending movement update to server.');
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Utilities 🚂
// created <video> element for local mediastream
function createLocalVideoElement() {
const videoElement = document.createElement("video");
videoElement.id = "local_video";
videoElement.autoplay = true;
videoElement.width = videoWidth;
videoElement.height = videoHeight;
// videoElement.style = "visibility: hidden;";
if (localMediaStream) {
let videoStream = new MediaStream([localMediaStream.getVideoTracks()[0]]);
videoElement.srcObject = videoStream;
}
document.body.appendChild(videoElement);
}
// created <video> element using client ID
function createClientMediaElements(_id) {
console.log("Creating <html> media elements for client with ID: " + _id);
const videoElement = document.createElement("video");
videoElement.id = _id + "_video";
videoElement.autoplay = true;
// videoElement.style = "visibility: hidden;";
document.body.appendChild(videoElement);
// create audio element for client
let audioEl = document.createElement("audio");
audioEl.setAttribute("id", _id + "_audio");
audioEl.controls = "controls";
audioEl.volume = 1;
document.body.appendChild(audioEl);
audioEl.addEventListener("loadeddata", () => {
audioEl.play();
});
}
function updateClientMediaElements(_id, stream) {
let videoStream = new MediaStream([stream.getVideoTracks()[0]]);
let audioStream = new MediaStream([stream.getAudioTracks()[0]]);
const videoElement = document.getElementById(_id + "_video");
videoElement.srcObject = videoStream;
let audioEl = document.getElementById(_id + "_audio");
audioEl.srcObject = audioStream;
}
// remove <video> element and corresponding <canvas> using client ID
function removeClientVideoElementAndCanvas(_id) {
console.log("Removing <video> element for client with id: " + _id);
let videoEl = document.getElementById(_id + "_video");
if (videoEl != null) {
videoEl.remove();
}
}
</script>
<style>
body {
overflow: hidden;
}
#info {
padding: 1em;
border: 1em;
color: rgb(223, 223, 223);
}
</style>
</head>
<body style="background: yellow; margin: 0; padding: 0">
<div id="canvas-container"></div>
</body>
</html>
Any help appreciated.

Can't figure out how to fix this: "Uncaught TypeError (webcam.snap is not a function)" <-- javascript

I have a JavaScript script that I am getting an error for that I can't figure out. I am trying to take a picture of the webcam feed using JavaScript
The error is:
Uncaught TypeError: webcam.snap is not a function
I am using webcam.js to take the snapshot.
Here is my JavaScript code:
<script>
var video = document.createElement("video");
var canvasElement = document.getElementById("canvas");
var canvas = canvasElement.getContext("2d");
var loadingMessage = document.getElementById("loadingMessage");
var outputContainer = document.getElementById("output");
var outputMessage = document.getElementById("outputMessage");
var outputData = document.getElementById("outputData");
const jsQR = require("jsqr");
function drawLine(begin, end, color) {
canvas.beginPath();
canvas.moveTo(begin.x, begin.y);
canvas.lineTo(end.x, end.y);
canvas.lineWidth = 4;
canvas.strokeStyle = color;
canvas.stroke();
}
// Use facingMode: environment to attemt to get the front camera on phones
navigator.mediaDevices.getUserMedia({ video: { facingMode: "environment" } }).then(function(stream) {
video.srcObject = stream;
video.setAttribute("playsinline", true); // required to tell iOS safari we don't want fullscreen
video.play();
requestAnimationFrame(tick);
});
function tick() {
loadingMessage.innerText = "⌛ Loading video..."
if (video.readyState === video.HAVE_ENOUGH_DATA) {
loadingMessage.hidden = true;
canvasElement.hidden = false;
outputContainer.hidden = false;
canvasElement.height = video.videoHeight;
canvasElement.width = video.videoWidth;
canvas.drawImage(video, 0, 0, canvasElement.width, canvasElement.height);
var imageData = canvas.getImageData(0, 0, canvasElement.width, canvasElement.height);
var code = jsQR(imageData.data, imageData.width, imageData.height, {
inversionAttempts: "invertFirst",
});
if (code) {
drawLine(code.location.topLeftCorner, code.location.topRightCorner, "#FF3B58");
drawLine(code.location.topRightCorner, code.location.bottomRightCorner, "#FF3B58");
drawLine(code.location.bottomRightCorner, code.location.bottomLeftCorner, "#FF3B58");
drawLine(code.location.bottomLeftCorner, code.location.topLeftCorner, "#FF3B58");
outputMessage.hidden = true;
outputData.parentElement.hidden = false;
outputData.innerText = code.data;
takeSnapShot();
}
else {
outputMessage.hidden = false;
outputData.parentElement.hidden = true;
}
}
requestAnimationFrame(tick);
}
// TAKE A SNAPSHOT.
takeSnapShot = function () {
webcam.snap(function (data_uri) {
downloadImage('video', data_uri);
});
}
// DOWNLOAD THE IMAGE.
downloadImage = function (name, datauri) {
var a = document.createElement('a');
a.setAttribute('download', name + '.png');
a.setAttribute('href', datauri);
a.click();
}
</script>
This is the first line that causes a problem:
webcam.snap(function (data_uri) {
downloadImage('video', data_uri);
});
This is the second line that causes a problem:
takeSnapShot();
how do I correct this properly?
****** UPDATE ******
The version of webcam.js I am using is WebcamJS v1.0.26. My application is a Django application that launches the HTML file as defined in main.js.
snap: function(user_callback, user_canvas) {
// use global callback and canvas if not defined as parameter
if (!user_callback) user_callback = this.params.user_callback;
if (!user_canvas) user_canvas = this.params.user_canvas;
// take snapshot and return image data uri
var self = this;
var params = this.params;
if (!this.loaded) return this.dispatch('error', new WebcamError("Webcam is not loaded yet"));
// if (!this.live) return this.dispatch('error', new WebcamError("Webcam is not live yet"));
if (!user_callback) return this.dispatch('error', new WebcamError("Please provide a callback function or canvas to snap()"));
// if we have an active preview freeze, use that
if (this.preview_active) {
this.savePreview( user_callback, user_canvas );
return null;
}
// create offscreen canvas element to hold pixels
var canvas = document.createElement('canvas');
canvas.width = this.params.dest_width;
canvas.height = this.params.dest_height;
var context = canvas.getContext('2d');
// flip canvas horizontally if desired
if (this.params.flip_horiz) {
context.translate( params.dest_width, 0 );
context.scale( -1, 1 );
}
// create inline function, called after image load (flash) or immediately (native)
var func = function() {
// render image if needed (flash)
if (this.src && this.width && this.height) {
context.drawImage(this, 0, 0, params.dest_width, params.dest_height);
}
// crop if desired
if (params.crop_width && params.crop_height) {
var crop_canvas = document.createElement('canvas');
crop_canvas.width = params.crop_width;
crop_canvas.height = params.crop_height;
var crop_context = crop_canvas.getContext('2d');
crop_context.drawImage( canvas,
Math.floor( (params.dest_width / 2) - (params.crop_width / 2) ),
Math.floor( (params.dest_height / 2) - (params.crop_height / 2) ),
params.crop_width,
params.crop_height,
0,
0,
params.crop_width,
params.crop_height
);
// swap canvases
context = crop_context;
canvas = crop_canvas;
}
// render to user canvas if desired
if (user_canvas) {
var user_context = user_canvas.getContext('2d');
user_context.drawImage( canvas, 0, 0 );
}
// fire user callback if desired
user_callback(
user_canvas ? null : canvas.toDataURL('image/' + params.image_format, params.jpeg_quality / 100 ),
canvas,
context
);
};
// grab image frame from userMedia or flash movie
if (this.userMedia) {
// native implementation
context.drawImage(this.video, 0, 0, this.params.dest_width, this.params.dest_height);
// fire callback right away
func();
}
else if (this.iOS) {
var div = document.getElementById(this.container.id+'-ios_div');
var img = document.getElementById(this.container.id+'-ios_img');
var input = document.getElementById(this.container.id+'-ios_input');
// function for handle snapshot event (call user_callback and reset the interface)
iFunc = function(event) {
func.call(img);
img.removeEventListener('load', iFunc);
div.style.backgroundImage = 'none';
img.removeAttribute('src');
input.value = null;
};
if (!input.value) {
// No image selected yet, activate input field
img.addEventListener('load', iFunc);
input.style.display = 'block';
input.focus();
input.click();
input.style.display = 'none';
} else {
// Image already selected
iFunc(null);
}
}
else {
// flash fallback
var raw_data = this.getMovie()._snap();
// render to image, fire callback when complete
var img = new Image();
img.onload = func;
img.src = 'data:image/'+this.params.image_format+';base64,' + raw_data;
}
return null;
},
Your implementation doesn't need Webcamjs, because you're using navigator media devices.
You can either use WebcamJS by initializing it at first and attaching it to some canvas, like in the following code
Webcam.set({
width: 320,
height: 240,
image_format: 'jpeg',
jpeg_quality: 90
});
Webcam.attach( '#my_camera' );
Or you can update your takeSnapShot function to the following :
takeSnapShot = function () {
downloadImage('video',canvasElement.toDataURL())
// Webcam.snap(function (data_uri) {
// downloadImage('video', data_uri);
// });
}
Here's a working example based on your code https://codepen.io/majdsalloum/pen/RwVKBbK
It seems like either:
the webcam's code are missing (not imported)
in this case you need to first call the script from the URL and add it with script tag
<script src="WEBCAM_JS_SOURCE">
or
they are imported, but used with typo. From the webcam source code it is defined as:
var Webcam = {
version: '1.0.26',
// globals
...
};
so you should use with a capital one.

Javascript: Saving getUserMedia() images

I want to load the user's webcam to a canvas, and then save an image from that canvas as a blob. Hopefully, I will get 1 image every 1 second.
Unfortunately, I only get 1 image saved 5 times, rather than 5 different images. My code is fairly straightforward, but I suspect the issue is with my takeASnap() function. What is going wrong?
Edit: This is observed on Safari on iPad/iPhone, but not in desktop chrome.
var NUM_IMAGES = 5;
const vid = document.querySelector('video');
navigator.mediaDevices.getUserMedia({ video: true }) // request cam
.then(stream => {
vid.srcObject = stream; // don't use createObjectURL(MediaStream)
return vid.play(); // returns a Promise
})
.then(() => { // enable the button
const btn = document.getElementById('download-button');
btn.disabled = false;
btn.onclick = e => {
imageId = 0;
userId = Math.floor(Math.random() * 10000);
recursiveDelay(kickOff, NUM_IMAGES, 1000)
};
});
function kickOff() {
takeASnap().then(saveBlob); // saveBlob out of scope of this question.
}
function recursiveDelay(functionToCall, executionsNumber, timeoutInMilliseconds) {
if (executionsNumber) { //exit condition
functionToCall(); // external function execution
setTimeout(
() => {
recursiveDelay(functionToCall, executionsNumber - 1, timeoutInMilliseconds); //recursive call
}, timeoutInMilliseconds);
}
}
function takeASnap() {
const canvas = document.createElement('canvas'); // create a canvas
const ctx = canvas.getContext('2d'); // get its context
canvas.width = vid.videoWidth; // set its size to the one of the video
canvas.height = vid.videoHeight;
ctx.drawImage(vid, 0, 0); // the video
return new Promise((res, rej) => {
canvas.toBlob(res, 'image/jpeg'); // request a Blob from the canvas
});
}

Converting image taken from camera to canvas has gray on the side

Im using vanilla javascript to capture images from the camera and into the canvas. Its working fine on Chrome Android but not in iOS Safari. As seen in the screenshot, there is a gray area on the image. Is there something I need to do to not get that rendered?
JavaScript for the camera
navigator.mediaDevices.getUserMedia({
'audio': false,
'video': {
facingMode: 'environment'
}
}).then(stream => {
this.VIDEO.srcObject = stream;
this.VIDEO.addEventListener('loadeddata', async () => {
this.VIDEO.width = this.VIDEO.videoWidth;
this.VIDEO.height = this.VIDEO.videoHeight;
this.CANVAS.width = this.VIDEO.videoWidth;
this.CANVAS.height = this.VIDEO.videoHeight;
resolve();
}, false);
}).catch(error => {
console.error(error);
self.MESSAGE.textContent = error;
});
JavaScript for drawing on the canvas and the preview window
self.CONTEXT.drawImage(self.VIDEO, 0, 0, self.VIDEO.width, self.VIDEO.height);
let imgDataURL = self.CANVAS.toDataURL('image/png');
self.PLANT.src = imgDataURL;
self.POLAROID.style.backgroundImage = `url(${imgDataURL})`;

How to take a screenshot, crop it, and upload it as an image on a web form? [duplicate]

Google's "Report a Bug" or "Feedback Tool" lets you select an area of your browser window to create a screenshot that is submitted with your feedback about a bug.
Screenshot by Jason Small, posted in a duplicate question.
How are they doing this? Google's JavaScript feedback API is loaded from here and their overview of the feedback module will demonstrate the screenshot capability.
JavaScript can read the DOM and render a fairly accurate representation of that using canvas. I have been working on a script which converts HTML into a canvas image. Decided today to make an implementation of it into sending feedbacks like you described.
The script allows you to create feedback forms which include a screenshot, created on the client's browser, along with the form. The screenshot is based on the DOM and as such may not be 100% accurate to the real representation as it does not make an actual screenshot, but builds the screenshot based on the information available on the page.
It does not require any rendering from the server, as the whole image is created on the client's browser. The HTML2Canvas script itself is still in a very experimental state, as it does not parse nearly as much of the CSS3 attributes I would want it to, nor does it have any support to load CORS images even if a proxy was available.
Still quite limited browser compatibility (not because more couldn't be supported, just haven't had time to make it more cross browser supported).
For more information, have a look at the examples here:
http://hertzen.com/experiments/jsfeedback/
edit
The html2canvas script is now available separately here and some examples here.
edit 2
Another confirmation that Google uses a very similar method (in fact, based on the documentation, the only major difference is their async method of traversing/drawing) can be found in this presentation by Elliott Sprehn from the Google Feedback team:
http://www.elliottsprehn.com/preso/fluentconf/
Your web app can now take a 'native' screenshot of the client's entire desktop using getUserMedia():
Have a look at this example:
https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/
The client will have to be using chrome (for now) and will need to enable screen capture support under chrome://flags.
PoC
As Niklas mentioned you can use the html2canvas library to take a screenshot using JS in the browser. I will extend his answer in this point by providing an example of taking a screenshot using this library ("Proof of Concept"):
function report() {
let region = document.querySelector("body"); // whole screen
html2canvas(region, {
onrendered: function(canvas) {
let pngUrl = canvas.toDataURL(); // png in dataURL format
let img = document.querySelector(".screen");
img.src = pngUrl;
// here you can allow user to set bug-region
// and send it with 'pngUrl' to server
},
});
}
.container {
margin-top: 10px;
border: solid 1px black;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script>
<div>Screenshot tester</div>
<button onclick="report()">Take screenshot</button>
<div class="container">
<img width="75%" class="screen">
</div>
In report() function in onrendered after getting image as data URI you can show it to the user and allow him to draw "bug region" by mouse and then send a screenshot and region coordinates to the server.
In this example async/await version was made: with nice makeScreenshot() function.
UPDATE
Simple example which allows you to take screenshot, select region, describe bug and send POST request (here jsfiddle) (the main function is report()).
async function report() {
let screenshot = await makeScreenshot(); // png dataUrl
let img = q(".screen");
img.src = screenshot;
let c = q(".bug-container");
c.classList.remove('hide')
let box = await getBox();
c.classList.add('hide');
send(screenshot,box); // sed post request with bug image, region and description
alert('To see POST requset with image go to: chrome console > network tab');
}
// ----- Helper functions
let q = s => document.querySelector(s); // query selector helper
window.report = report; // bind report be visible in fiddle html
async function makeScreenshot(selector="body")
{
return new Promise((resolve, reject) => {
let node = document.querySelector(selector);
html2canvas(node, { onrendered: (canvas) => {
let pngUrl = canvas.toDataURL();
resolve(pngUrl);
}});
});
}
async function getBox(box) {
return new Promise((resolve, reject) => {
let b = q(".bug");
let r = q(".region");
let scr = q(".screen");
let send = q(".send");
let start=0;
let sx,sy,ex,ey=-1;
r.style.width=0;
r.style.height=0;
let drawBox= () => {
r.style.left = (ex > 0 ? sx : sx+ex ) +'px';
r.style.top = (ey > 0 ? sy : sy+ey) +'px';
r.style.width = Math.abs(ex) +'px';
r.style.height = Math.abs(ey) +'px';
}
//console.log({b,r, scr});
b.addEventListener("click", e=>{
if(start==0) {
sx=e.pageX;
sy=e.pageY;
ex=0;
ey=0;
drawBox();
}
start=(start+1)%3;
});
b.addEventListener("mousemove", e=>{
//console.log(e)
if(start==1) {
ex=e.pageX-sx;
ey=e.pageY-sy
drawBox();
}
});
send.addEventListener("click", e=>{
start=0;
let a=100/75 //zoom out img 75%
resolve({
x:Math.floor(((ex > 0 ? sx : sx+ex )-scr.offsetLeft)*a),
y:Math.floor(((ey > 0 ? sy : sy+ey )-b.offsetTop)*a),
width:Math.floor(Math.abs(ex)*a),
height:Math.floor(Math.abs(ex)*a),
desc: q('.bug-desc').value
});
});
});
}
function send(image,box) {
let formData = new FormData();
let req = new XMLHttpRequest();
formData.append("box", JSON.stringify(box));
formData.append("screenshot", image);
req.open("POST", '/upload/screenshot');
req.send(formData);
}
.bug-container { background: rgb(255,0,0,0.1); margin-top:20px; text-align: center; }
.send { border-radius:5px; padding:10px; background: green; cursor: pointer; }
.region { position: absolute; background: rgba(255,0,0,0.4); }
.example { height: 100px; background: yellow; }
.bug { margin-top: 10px; cursor: crosshair; }
.hide { display: none; }
.screen { pointer-events: none }
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script>
<body>
<div>Screenshot tester</div>
<button onclick="report()">Report bug</button>
<div class="example">Lorem ipsum</div>
<div class="bug-container hide">
<div>Select bug region: click once - move mouse - click again</div>
<div class="bug">
<img width="75%" class="screen" >
<div class="region"></div>
</div>
<div>
<textarea class="bug-desc">Describe bug here...</textarea>
</div>
<div class="send">SEND BUG</div>
</div>
</body>
Get screenshot as Canvas or Jpeg Blob / ArrayBuffer using getDisplayMedia API:
FIX 1: Use the getUserMedia with chromeMediaSource only for Electron.js
FIX 2: Throw error instead return null object
FIX 3: Fix demo to prevent the error: getDisplayMedia must be called from a user gesture handler
// docs: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia
// see: https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/#20893521368186473
// see: https://github.com/muaz-khan/WebRTC-Experiment/blob/master/Pluginfree-Screen-Sharing/conference.js
function getDisplayMedia(options) {
if (navigator.mediaDevices && navigator.mediaDevices.getDisplayMedia) {
return navigator.mediaDevices.getDisplayMedia(options)
}
if (navigator.getDisplayMedia) {
return navigator.getDisplayMedia(options)
}
if (navigator.webkitGetDisplayMedia) {
return navigator.webkitGetDisplayMedia(options)
}
if (navigator.mozGetDisplayMedia) {
return navigator.mozGetDisplayMedia(options)
}
throw new Error('getDisplayMedia is not defined')
}
function getUserMedia(options) {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
return navigator.mediaDevices.getUserMedia(options)
}
if (navigator.getUserMedia) {
return navigator.getUserMedia(options)
}
if (navigator.webkitGetUserMedia) {
return navigator.webkitGetUserMedia(options)
}
if (navigator.mozGetUserMedia) {
return navigator.mozGetUserMedia(options)
}
throw new Error('getUserMedia is not defined')
}
async function takeScreenshotStream() {
// see: https://developer.mozilla.org/en-US/docs/Web/API/Window/screen
const width = screen.width * (window.devicePixelRatio || 1)
const height = screen.height * (window.devicePixelRatio || 1)
const errors = []
let stream
try {
stream = await getDisplayMedia({
audio: false,
// see: https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamConstraints/video
video: {
width,
height,
frameRate: 1,
},
})
} catch (ex) {
errors.push(ex)
}
// for electron js
if (navigator.userAgent.indexOf('Electron') >= 0) {
try {
stream = await getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
// chromeMediaSourceId: source.id,
minWidth : width,
maxWidth : width,
minHeight : height,
maxHeight : height,
},
},
})
} catch (ex) {
errors.push(ex)
}
}
if (errors.length) {
console.debug(...errors)
if (!stream) {
throw errors[errors.length - 1]
}
}
return stream
}
async function takeScreenshotCanvas() {
const stream = await takeScreenshotStream()
// from: https://stackoverflow.com/a/57665309/5221762
const video = document.createElement('video')
const result = await new Promise((resolve, reject) => {
video.onloadedmetadata = () => {
video.play()
video.pause()
// from: https://github.com/kasprownik/electron-screencapture/blob/master/index.js
const canvas = document.createElement('canvas')
canvas.width = video.videoWidth
canvas.height = video.videoHeight
const context = canvas.getContext('2d')
// see: https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
resolve(canvas)
}
video.srcObject = stream
})
stream.getTracks().forEach(function (track) {
track.stop()
})
if (result == null) {
throw new Error('Cannot take canvas screenshot')
}
return result
}
// from: https://stackoverflow.com/a/46182044/5221762
function getJpegBlob(canvas) {
return new Promise((resolve, reject) => {
// docs: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlob
canvas.toBlob(blob => resolve(blob), 'image/jpeg', 0.95)
})
}
async function getJpegBytes(canvas) {
const blob = await getJpegBlob(canvas)
return new Promise((resolve, reject) => {
const fileReader = new FileReader()
fileReader.addEventListener('loadend', function () {
if (this.error) {
reject(this.error)
return
}
resolve(this.result)
})
fileReader.readAsArrayBuffer(blob)
})
}
async function takeScreenshotJpegBlob() {
const canvas = await takeScreenshotCanvas()
return getJpegBlob(canvas)
}
async function takeScreenshotJpegBytes() {
const canvas = await takeScreenshotCanvas()
return getJpegBytes(canvas)
}
function blobToCanvas(blob, maxWidth, maxHeight) {
return new Promise((resolve, reject) => {
const img = new Image()
img.onload = function () {
const canvas = document.createElement('canvas')
const scale = Math.min(
1,
maxWidth ? maxWidth / img.width : 1,
maxHeight ? maxHeight / img.height : 1,
)
canvas.width = img.width * scale
canvas.height = img.height * scale
const ctx = canvas.getContext('2d')
ctx.drawImage(img, 0, 0, img.width, img.height, 0, 0, canvas.width, canvas.height)
resolve(canvas)
}
img.onerror = () => {
reject(new Error('Error load blob to Image'))
}
img.src = URL.createObjectURL(blob)
})
}
DEMO:
document.body.onclick = async () => {
// take the screenshot
var screenshotJpegBlob = await takeScreenshotJpegBlob()
// show preview with max size 300 x 300 px
var previewCanvas = await blobToCanvas(screenshotJpegBlob, 300, 300)
previewCanvas.style.position = 'fixed'
document.body.appendChild(previewCanvas)
// send it to the server
var formdata = new FormData()
formdata.append("screenshot", screenshotJpegBlob)
await fetch('https://your-web-site.com/', {
method: 'POST',
body: formdata,
'Content-Type' : "multipart/form-data",
})
}
// and click on the page
Here is a complete screenshot example that works with chrome in 2021. The end result is a blob ready to be transmitted. Flow is: request media > grab frame > draw to canvas > transfer to blob. If you want to do it more memory efficient explore OffscreenCanvas or possibly ImageBitmapRenderingContext
https://jsfiddle.net/v24hyd3q/1/
// Request media
navigator.mediaDevices.getDisplayMedia().then(stream =>
{
// Grab frame from stream
let track = stream.getVideoTracks()[0];
let capture = new ImageCapture(track);
capture.grabFrame().then(bitmap =>
{
// Stop sharing
track.stop();
// Draw the bitmap to canvas
canvas.width = bitmap.width;
canvas.height = bitmap.height;
canvas.getContext('2d').drawImage(bitmap, 0, 0);
// Grab blob from canvas
canvas.toBlob(blob => {
// Do things with blob here
console.log('output blob:', blob);
});
});
})
.catch(e => console.log(e));
Heres an example using: getDisplayMedia
document.body.innerHTML = '<video style="width: 100%; height: 100%; border: 1px black solid;"/>';
navigator.mediaDevices.getDisplayMedia()
.then( mediaStream => {
const video = document.querySelector('video');
video.srcObject = mediaStream;
video.onloadedmetadata = e => {
video.play();
video.pause();
};
})
.catch( err => console.log(`${err.name}: ${err.message}`));
Also worth checking out is the Screen Capture API docs.
You can try my new JS library: screenshot.js.
It's enable to take real screenshot.
You load the script:
<script src="https://raw.githubusercontent.com/amiad/screenshot.js/master/screenshot.js"></script>
and take screenshot:
new Screenshot({success: img => {
// callback function
myimage = img;
}});
You can read more options in project page.

Categories