How to select rear-facing camera on a progressive web application? - javascript

I have a camera PWA, it's working fine with taking photos and uploading them, but I want to use the rear-facing camera instead of the front facing cam. How do I proceed on doing this?
This is the current lines of coenter code heredes I'm using for initializing the camera and taking the photo. This is on .js
// This will initialize the camera
function initializeMedia() {
if (!('mediaDevices' in navigator)) {
navigator.mediaDevices = {};
}
if (!('getUserMedia' in navigator.mediaDevices)) {
navigator.mediaDevices.getUserMedia = function(constraints) {
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error('getUserMedia is not implemented!'));
}
return new Promise(function(resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
navigator.mediaDevices.getUserMedia({
video: true
})
.then(function(stream) {
videoPlayer.srcObject = stream;
videoPlayer.style.display = 'block';
})
.catch(function(err) {
imagePicker.style.display = 'block';
});
}
// capture image
captureButton.addEventListener('click', function(event) {
canvasElement.style.display = 'block';
videoPlayer.style.display = 'none';
captureButton.style.display = 'none';
var context = canvasElement.getContext('2d');
context.drawImage(videoPlayer, 0, 0, canvas.width, videoPlayer.videoHeight / (videoPlayer.videoWidth / canvas.width));
videoPlayer.srcObject.getVideoTracks().forEach(function(track) {
track.stop();
});
picture = dataURItoBlob(canvasElement.toDataURL());
});

You can set video.facingMode to either 'user' for the front camera, or 'environment' for the back camera in the constraints object that you pass to navigator.mediaDevices.getUserMedia().
Example from the MDN:
var constraints = { video: { facingMode: "environment" } };
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia

Related

Three,js- OBJLoader not importing model due to a material error

I am attempting to use threejs to load a model (OBJ file) into the browser, however, there seems to be some issue with the materials. Is there any way I can just set a default material like black so it loads the model? I have tried to set a texture, however, this error originates from inside the OBJLoader library. How can I fix this?
The error is :
An error happened TypeError: Cannot read properties of undefined (reading 'scene')
at Scene.THREE.Mesh.THREE.MeshStandardMaterial.color ((index):168:10)
at Object.onLoad (OBJLoader.js:457:5)
at three.module.js:39650:38
My code is:
<!DOCTYPE html>
<html>
<head>
<title>
three.js WebRTC template
</title>
<meta name='viewport' content='initial-scale=1,maximum-scale=1,user-scalable=no' />
<script src="js/libs/socket.io.min.js"></script>
<script src="js/libs/three.min.js"></script>
<script src="resources/threejs/r105/js/controls/OrbitControls.js"></script>
<script src="js/libs/firstPersonControls.js"></script>
<script src="js/libs/simplepeer.min.js"></script>
<script src="js/environment.js"></script>
<script type="importmap">
{
"imports": {
"three": "https://unpkg.com/three#0.138.0/build/three.module.js",
"OrbitControls": "https://unpkg.com/three#0.138.0/examples/jsm/controls/OrbitControls.js"
}
}
</script>
<script type="module">
import { Group , ObjectLoader } from 'three';
//import MODEL from './park.obj';
import * as THREE from 'https://unpkg.com/three#0.138.0/build/three.module.js';
import {OBJLoader} from 'https://unpkg.com/three#0.138.0/examples/jsm/loaders/OBJLoader.js'
//import obj from "./park.obj"
// import { loadScene } from '../../Loaders/loader';
// instantiate a loader
class Scene {
constructor() {
//THREE scene
this.scene = new THREE.Scene();
const loader = new OBJLoader();
// load a resource
loader.load(
// resource URL
'./park.obj',
// called when resource is loaded
function ( object ) {
console.log(object)
object.traverse( function ( child ) {
if ( child instanceof THREE.Mesh ) {
const texture = new THREE.TextureLoader().load( "assets/texture.png" );
child.material.map = texture;
}
} );
this.scene.add( object );
},
// called when loading is in progresses
function ( xhr ) {
console.log( ( xhr.loaded / xhr.total * 100 ) + '% loaded' );
},
// called when loading has errors
function ( error ) {
console.log( 'An error happened', error );
}
);
// Floor
const floor = new THREE.Mesh(
new THREE.PlaneBufferGeometry(20, 20),
new THREE.MeshStandardMaterial({ color: "#a9c388" })
);
floor.position.y = 0;
floor.rotation.x = -Math.PI * 0.5;
this.scene.add(floor);
//Utility
this.width = window.innerWidth;
this.height = window.innerHeight * 0.9;
// lerp value to be used when interpolating positions and rotations
this.lerpValue = 0;
//THREE Camera
this.camera = new THREE.PerspectiveCamera(
50,
this.width / this.height,
0.1,
5000
);
this.camera.position.set(0, 3, 6);
this.scene.add(this.camera);
// create an AudioListener and add it to the camera
this.listener = new THREE.AudioListener();
this.camera.add(this.listener);
//THREE WebGL renderer
this.renderer = new THREE.WebGLRenderer({
antialiasing: true,
});
this.renderer.setClearColor(new THREE.Color("lightblue"));
this.renderer.setSize(this.width, this.height);
// add controls:
this.controls = new FirstPersonControls(this.scene, this.camera, this.renderer);
//Push the canvas to the DOM
let domElement = document.getElementById("canvas-container");
domElement.append(this.renderer.domElement);
//Setup event listeners for events and handle the states
window.addEventListener("resize", (e) => this.onWindowResize(e), false);
// Helpers
this.scene.add(new THREE.GridHelper(500, 500));
this.scene.add(new THREE.AxesHelper(10));
//createEnvironment(this.scene);
// Start the loop
this.frameCount = 0;
this.update();
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Lighting 💡
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Clients 👫
// add a client meshes, a video element and canvas for three.js video texture
addClient(id) {
let videoMaterial = makeVideoMaterial(id);
let otherMat = new THREE.MeshNormalMaterial();
let head = new THREE.Mesh(new THREE.BoxGeometry(1, 1, 1), [otherMat,otherMat,otherMat,otherMat,otherMat,videoMaterial]);
// set position of head before adding to parent object
head.position.set(0, 0, 0);
// https://threejs.org/docs/index.html#api/en/objects/Group
var group = new THREE.Group();
group.add(head);
// add group to scene
this.scene.add(group);
peers[id].group = group;
peers[id].previousPosition = new THREE.Vector3();
peers[id].previousRotation = new THREE.Quaternion();
peers[id].desiredPosition = new THREE.Vector3();
peers[id].desiredRotation = new THREE.Quaternion();
}
removeClient(id) {
this.scene.remove(peers[id].group);
}
// overloaded function can deal with new info or not
updateClientPositions(clientProperties) {
this.lerpValue = 0;
for (let id in clientProperties) {
if (id != mySocket.id) {
peers[id].previousPosition.copy(peers[id].group.position);
peers[id].previousRotation.copy(peers[id].group.quaternion);
peers[id].desiredPosition = new THREE.Vector3().fromArray(
clientProperties[id].position
);
peers[id].desiredRotation = new THREE.Quaternion().fromArray(
clientProperties[id].rotation
);
}
}
}
interpolatePositions() {
this.lerpValue += 0.1; // updates are sent roughly every 1/5 second == 10 frames
for (let id in peers) {
if (peers[id].group) {
peers[id].group.position.lerpVectors(peers[id].previousPosition,peers[id].desiredPosition, this.lerpValue);
peers[id].group.quaternion.slerpQuaternions(peers[id].previousRotation,peers[id].desiredRotation, this.lerpValue);
}
}
}
updateClientVolumes() {
for (let id in peers) {
let audioEl = document.getElementById(id + "_audio");
if (audioEl && peers[id].group) {
let distSquared = this.camera.position.distanceToSquared(
peers[id].group.position
);
if (distSquared > 500) {
audioEl.volume = 0;
} else {
// from lucasio here: https://discourse.threejs.org/t/positionalaudio-setmediastreamsource-with-webrtc-question-not-hearing-any-sound/14301/29
let volume = Math.min(1, 10 / distSquared);
audioEl.volume = volume;
}
}
}
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Interaction 🤾‍♀️
getPlayerPosition() {
// TODO: use quaternion or are euler angles fine here?
return [
[
this.camera.position.x,
this.camera.position.y,
this.camera.position.z,
],
[
this.camera.quaternion._x,
this.camera.quaternion._y,
this.camera.quaternion._z,
this.camera.quaternion._w,
],
];
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Rendering 🎥
update() {
requestAnimationFrame(() => this.update());
this.frameCount++;
//updateEnvironment();
if (this.frameCount % 25 === 0) {
this.updateClientVolumes();
}
this.interpolatePositions();
this.controls.update();
this.render();
}
render() {
this.renderer.render(this.scene, this.camera);
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Event Handlers 🍽
onWindowResize(e) {
this.width = window.innerWidth;
this.height = Math.floor(window.innerHeight * 0.9);
this.camera.aspect = this.width / this.height;
this.camera.updateProjectionMatrix();
this.renderer.setSize(this.width, this.height);
}
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Utilities
function makeVideoMaterial(id) {
let videoElement = document.getElementById(id + "_video");
let videoTexture = new THREE.VideoTexture(videoElement);
let videoMaterial = new THREE.MeshBasicMaterial({
map: videoTexture,
overdraw: true,
side: THREE.DoubleSide,
});
return videoMaterial;
}
/*
*
* This uses code from a THREE.js Multiplayer boilerplate made by Or Fleisher:
* https://github.com/juniorxsound/THREE.Multiplayer
* And a WEBRTC chat app made by Mikołaj Wargowski:
* https://github.com/Miczeq22/simple-chat-app
*
* Aidan Nelson, April 2020
*
*/
// socket.io
let mySocket;
// array of connected clients
let peers = {};
// Variable to store our three.js scene:
let myScene;
// set video width / height / framerate here:
const videoWidth = 80;
const videoHeight = 60;
const videoFrameRate = 15;
// Our local media stream (i.e. webcam and microphone stream)
let localMediaStream = null;
// Constraints for our local audio/video stream
let mediaConstraints = {
audio: true,
video: {
width: videoWidth,
height: videoHeight,
frameRate: videoFrameRate,
},
};
////////////////////////////////////////////////////////////////////////////////
// Start-Up Sequence:
////////////////////////////////////////////////////////////////////////////////
window.onload = async () => {
console.log("Window loaded.");
// first get user media
localMediaStream = await getMedia(mediaConstraints);
createLocalVideoElement();
// then initialize socket connection
initSocketConnection();
// finally create the threejs scene
console.log("Creating three.js scene...");
myScene = new Scene();
// start sending position data to the server
setInterval(function () {
mySocket.emit("move", myScene.getPlayerPosition());
}, 200);
};
////////////////////////////////////////////////////////////////////////////////
// Local media stream setup
////////////////////////////////////////////////////////////////////////////////
// https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
async function getMedia(_mediaConstraints) {
let stream = null;
try {
stream = await navigator.mediaDevices.getUserMedia(_mediaConstraints);
} catch (err) {
console.log("Failed to get user media!");
console.warn(err);
}
return stream;
}
////////////////////////////////////////////////////////////////////////////////
// Socket.io
////////////////////////////////////////////////////////////////////////////////
// establishes socket connection
function initSocketConnection() {
console.log("Initializing socket.io...");
mySocket = io();
mySocket.on("connect", () => {
console.log("My socket ID:", mySocket.id);
});
//On connection server sends the client his ID and a list of all keys
mySocket.on("introduction", (otherClientIds) => {
// for each existing user, add them as a client and add tracks to their peer connection
for (let i = 0; i < otherClientIds.length; i++) {
if (otherClientIds[i] != mySocket.id) {
let theirId = otherClientIds[i];
console.log("Adding client with id " + theirId);
peers[theirId] = {};
let pc = createPeerConnection(theirId, true);
peers[theirId].peerConnection = pc;
createClientMediaElements(theirId);
myScene.addClient(theirId);
}
}
});
// when a new user has entered the server
mySocket.on("newUserConnected", (theirId) => {
if (theirId != mySocket.id && !(theirId in peers)) {
console.log("A new user connected with the ID: " + theirId);
console.log("Adding client with id " + theirId);
peers[theirId] = {};
createClientMediaElements(theirId);
myScene.addClient(theirId);
}
});
mySocket.on("userDisconnected", (clientCount, _id, _ids) => {
// Update the data from the server
if (_id != mySocket.id) {
console.log("A user disconnected with the id: " + _id);
myScene.removeClient(_id);
removeClientVideoElementAndCanvas(_id);
delete peers[_id];
}
});
mySocket.on("signal", (to, from, data) => {
// console.log("Got a signal from the server: ", to, from, data);
// to should be us
if (to != mySocket.id) {
console.log("Socket IDs don't match");
}
// Look for the right simplepeer in our array
let peer = peers[from];
if (peer.peerConnection) {
peer.peerConnection.signal(data);
} else {
console.log("Never found right simplepeer object");
// Let's create it then, we won't be the "initiator"
// let theirSocketId = from;
let peerConnection = createPeerConnection(from, false);
peers[from].peerConnection = peerConnection;
// Tell the new simplepeer that signal
peerConnection.signal(data);
}
});
// Update when one of the users moves in space
mySocket.on("positions", (_clientProps) => {
myScene.updateClientPositions(_clientProps);
});
}
////////////////////////////////////////////////////////////////////////////////
// Clients / WebRTC
////////////////////////////////////////////////////////////////////////////////
// this function sets up a peer connection and corresponding DOM elements for a specific client
function createPeerConnection(theirSocketId, isInitiator = false) {
console.log('Connecting to peer with ID', theirSocketId);
console.log('initiating?', isInitiator);
let peerConnection = new SimplePeer({ initiator: isInitiator })
// simplepeer generates signals which need to be sent across socket
peerConnection.on("signal", (data) => {
// console.log('signal');
mySocket.emit("signal", theirSocketId, mySocket.id, data);
});
// When we have a connection, send our stream
peerConnection.on("connect", () => {
// Let's give them our stream
peerConnection.addStream(localMediaStream);
console.log("Send our stream");
});
// Stream coming in to us
peerConnection.on("stream", (stream) => {
console.log("Incoming Stream");
updateClientMediaElements(theirSocketId, stream);
});
peerConnection.on("close", () => {
console.log("Got close event");
// Should probably remove from the array of simplepeers
});
peerConnection.on("error", (err) => {
console.log(err);
});
return peerConnection;
}
// temporarily pause the outgoing stream
function disableOutgoingStream() {
localMediaStream.getTracks().forEach((track) => {
track.enabled = false;
});
}
// enable the outgoing stream
function enableOutgoingStream() {
localMediaStream.getTracks().forEach((track) => {
track.enabled = true;
});
}
////////////////////////////////////////////////////////////////////////////////
// Three.js
////////////////////////////////////////////////////////////////////////////////
function onPlayerMove() {
// console.log('Sending movement update to server.');
}
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// Utilities 🚂
// created <video> element for local mediastream
function createLocalVideoElement() {
const videoElement = document.createElement("video");
videoElement.id = "local_video";
videoElement.autoplay = true;
videoElement.width = videoWidth;
videoElement.height = videoHeight;
// videoElement.style = "visibility: hidden;";
if (localMediaStream) {
let videoStream = new MediaStream([localMediaStream.getVideoTracks()[0]]);
videoElement.srcObject = videoStream;
}
document.body.appendChild(videoElement);
}
// created <video> element using client ID
function createClientMediaElements(_id) {
console.log("Creating <html> media elements for client with ID: " + _id);
const videoElement = document.createElement("video");
videoElement.id = _id + "_video";
videoElement.autoplay = true;
// videoElement.style = "visibility: hidden;";
document.body.appendChild(videoElement);
// create audio element for client
let audioEl = document.createElement("audio");
audioEl.setAttribute("id", _id + "_audio");
audioEl.controls = "controls";
audioEl.volume = 1;
document.body.appendChild(audioEl);
audioEl.addEventListener("loadeddata", () => {
audioEl.play();
});
}
function updateClientMediaElements(_id, stream) {
let videoStream = new MediaStream([stream.getVideoTracks()[0]]);
let audioStream = new MediaStream([stream.getAudioTracks()[0]]);
const videoElement = document.getElementById(_id + "_video");
videoElement.srcObject = videoStream;
let audioEl = document.getElementById(_id + "_audio");
audioEl.srcObject = audioStream;
}
// remove <video> element and corresponding <canvas> using client ID
function removeClientVideoElementAndCanvas(_id) {
console.log("Removing <video> element for client with id: " + _id);
let videoEl = document.getElementById(_id + "_video");
if (videoEl != null) {
videoEl.remove();
}
}
</script>
<style>
body {
overflow: hidden;
}
#info {
padding: 1em;
border: 1em;
color: rgb(223, 223, 223);
}
</style>
</head>
<body style="background: yellow; margin: 0; padding: 0">
<div id="canvas-container"></div>
</body>
</html>
Any help appreciated.

How to access both webcams mediastream in chrome browser simultaneously? [duplicate]

guys i have two cameras that is
-the web camera
-the laptop camera
i want to stream those camera in a website
i already have some reference
here is some code that is working on jsfiddle
here
<video id="video" width="640" height="480" autoplay></video>
<button id="snap" class="sexyButton">Snap Photo</button>
<canvas id="canvas" width="640" height="480"></canvas>
<script>
// Put event listeners into place
window.addEventListener("DOMContentLoaded", function() {
// Grab elements, create settings, etc.
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
videoObj = { "video": true },
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if(navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if(navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream){
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
} else if(navigator.mozGetUserMedia) { // WebKit-prefixed
navigator.mozGetUserMedia(videoObj, function(stream){
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
// Trigger photo take
document.getElementById("snap").addEventListener("click", function() {
context.drawImage(video, 0, 0, 640, 480);
});
}, false);
</script>
that example can only connects and select 1 camera
i want to select and view two of my camera, any suggestion or solution guys?
you can also give me the JS fiddle
You can create two different streams, one for each camera, and show them simultaneously in two <video> tags.
The list of available devices is available using navigator.mediaDevices.enumerateDevices(). After filtering the resulting list for only videoinputs, you have access to the deviceIds without needing permission from the user.
With getUserMedia you can then request a stream from the camera with id camera1Id using
navigator.mediaDevices.getUserMedia({
video: {
deviceId: { exact: camera1Id }
}
});
The resulting stream can be fed into a <video> (referenced here by vid) by calling vid.srcObject = stream.
I have done this for two streams from two webcams simultaneously.
You cannot access two cameras simultaneously. The API would indicate otherwise, but something underlying seems to prevent it from working as expected. You can verify this by opening https://simpl.info/getusermedia/sources/ or http://googlechrome.github.io/webrtc/samples/web/content/getusermedia-source/ in two completely seperate windows, despite being able to select two streams only one is active at once - if you pick the same one in both windows, then it shows in both places.
The only workaround I was able to do was to flip-flop between the two streams, then draw the video to a canvas. Doing this I was able to do captures at around 1 fps, unfortunately the camera resets between frames, on one of my cameras I had to put in a delay to allow the auto white balance to kick in to get a decent image.
function webcam() {
if (!navigator.getUserMedia) {
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
}
if (!navigator.getUserMedia) {
return alert('getUserMedia not supported in this browser.');
}
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var audioSource;
var cw = Math.floor(canvas.clientWidth / 2);
var ch = Math.floor(canvas.clientHeight/2);
//canvas.width = cw;
//canvas.height = ch;
//off dom video player
var video = document.createElement("video");
video.autoplay="autoplay";
video.addEventListener('playing', function(){
//delay for settling...
setTimeout(draw,1000,this,context,(currentSource*canvas.clientWidth/2),cw,ch);
},false);
function captureVideo() {
console.log("Capturing " + currentSource,videosources[currentSource]);
var mediaOptions = {
audio: {
optional: [{sourceId: audioSource}]
},
video: {
optional: [
{sourceId: videosources[currentSource].id}
]
}};
navigator.getUserMedia(mediaOptions, success, errorCallback);
}
var currentSource=0;
var videosources = [];
var lastStream;
function errorCallback(error){
console.log("navigator.getUserMedia error: ", error);
}
function success(stream) {
console.log("the stream" + currentSource,stream);
video.src = window.URL.createObjectURL(stream);
video.play();
lastStream=stream;
}
function next(){
if(lastStream){
lastStream.stop();
}
video.src = "";
if(currentSource < videosources.length-1){
currentSource+=1;
}
else
{
currentSource=0;
}
captureVideo();
}
function draw(v,c,l,w,h) {
if(v.paused || v.ended) return false;
console.log("drawing",l)
c.drawImage(v,l,0,w,h);
setTimeout(next,500);
}
MediaStreamTrack.getSources(function (sourceInfos) {
for (var i = 0; i != sourceInfos.length; ++i) {
var sourceInfo = sourceInfos[i];
if (sourceInfo.kind === 'audio') {
console.log(sourceInfo.id, sourceInfo.label || 'microphone');
audioSource=sourceInfo.id;
} else if (sourceInfo.kind === 'video') {
console.log(sourceInfo.id, sourceInfo.facing, sourceInfo.label || 'camera');
videosources.push(sourceInfo);
} else {
console.log('Some other kind of source: ', sourceInfo);
}
}
console.log("sources",videosources)
next();
});
}

ZXing.NET decode PDF417 Barcode from HTML5 Video

I am trying to use jQuery/JavaScript with ZXing.NET to decode a PDF417 barcode from a video source.
Here is my HTML:
<video id="video" width="800" height="800"></video>
<canvas id="canvas" width="800" height="800"></canvas>
And the jQuery for the camera and the code that calls an .NET method to debug the barcode:
var video = document.getElementById('video');
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
const hdConstraints = {
video: { width: { min: 1280 }, height: { min: 720 } }
};
navigator.mediaDevices.getUserMedia({ video: true }).then(function (stream) {
video.srcObject = stream;
video.play();
});
}
$("#video").on("playing", function () {
setInterval(function () { scanBarcode() }, 500);
});
function scanBarcode() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var canvas_context = canvas.getContext('2d');
canvas_context.drawImage(video, 0, 0, 640, 480);
var image = document.getElementById("canvas").toDataURL("image/png");
image = image.replace('data:image/png;base64,', '');
$.post("Home/OnScan", { imageData: image }, function (data, status) {
console.log(data);
});
}
As you can see there I am getting the image of the canvas and passing it to my .NET method.
And here is my .NET method to debug the PDF417 barcode:
public JsonResult OnScan(string imageData)
{
BitmapImage bitmapImage = new BitmapImage();
byte[] byteBuffer = Convert.FromBase64String(imageData);
Bitmap bmp;
using (var ms = new MemoryStream(byteBuffer))
{
bmp = new Bitmap(ms);
}
BarcodeReader reader = new BarcodeReader();
DecodingOptions options = new DecodingOptions
{
TryHarder = true,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 }
};
reader.Options = options;
var result = reader.Decode(bmp);
return Json(result.Text, JsonRequestBehavior.AllowGet);
}
Now this still does not work, but I remembered when I first did this in Xamarin.Forms it also did not work until I add the CameraResolutionSelector option:
var options = new MobileBarcodeScanningOptions
{
TryHarder = true,
CameraResolutionSelector = HandleCameraResolutionSelectorDelegate,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 },
};
Here is the HandleCameraResolutionSelectorDelegate method:
public CameraResolution HandleCameraResolutionSelectorDelegate(List<CameraResolution> availableResolutions)
{
//Don't know if this will ever be null or empty
if (availableResolutions == null || availableResolutions.Count < 1)
return new CameraResolution() { Width = 800, Height = 600 };
//Debugging revealed that the last element in the list
//expresses the highest resolution. This could probably be more thorough.
return availableResolutions[availableResolutions.Count - 1];
}
So I am starting to think it the resolution of the camera that is causing my barcode not to scan....on another note when I change BarcodeFormat to QR_CODE and scan a QR code it works, but not with a PDF417 Barcode. What am I doing wrong?
I have some instances like the one of this issue, where with an apparently good image reconstruction, zxing can't decode as expected and i'm not competent to figure out why.
Try putting PureBarcode = true will resolve the issue.
DecodingOptions options = new DecodingOptions
{
TryHarder = true,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 },
PureBarcode = true,
AutoRotate = true,
TryInverted = true,
CameraResolutionSelector = HandleCameraResolutionSelectorDelegate
};
CameraResolution HandleCameraResolutionSelectorDelegate(List<CameraResolution> availableResolutions)
{
if (availableResolutions == null || availableResolutions.Count < 1)
return new CameraResolution () { Width = 800, Height = 600 };
return availableResolutions [availableResolutions.Count - 1];
}

Javascript: Saving getUserMedia() images

I want to load the user's webcam to a canvas, and then save an image from that canvas as a blob. Hopefully, I will get 1 image every 1 second.
Unfortunately, I only get 1 image saved 5 times, rather than 5 different images. My code is fairly straightforward, but I suspect the issue is with my takeASnap() function. What is going wrong?
Edit: This is observed on Safari on iPad/iPhone, but not in desktop chrome.
var NUM_IMAGES = 5;
const vid = document.querySelector('video');
navigator.mediaDevices.getUserMedia({ video: true }) // request cam
.then(stream => {
vid.srcObject = stream; // don't use createObjectURL(MediaStream)
return vid.play(); // returns a Promise
})
.then(() => { // enable the button
const btn = document.getElementById('download-button');
btn.disabled = false;
btn.onclick = e => {
imageId = 0;
userId = Math.floor(Math.random() * 10000);
recursiveDelay(kickOff, NUM_IMAGES, 1000)
};
});
function kickOff() {
takeASnap().then(saveBlob); // saveBlob out of scope of this question.
}
function recursiveDelay(functionToCall, executionsNumber, timeoutInMilliseconds) {
if (executionsNumber) { //exit condition
functionToCall(); // external function execution
setTimeout(
() => {
recursiveDelay(functionToCall, executionsNumber - 1, timeoutInMilliseconds); //recursive call
}, timeoutInMilliseconds);
}
}
function takeASnap() {
const canvas = document.createElement('canvas'); // create a canvas
const ctx = canvas.getContext('2d'); // get its context
canvas.width = vid.videoWidth; // set its size to the one of the video
canvas.height = vid.videoHeight;
ctx.drawImage(vid, 0, 0); // the video
return new Promise((res, rej) => {
canvas.toBlob(res, 'image/jpeg'); // request a Blob from the canvas
});
}

Converting image taken from camera to canvas has gray on the side

Im using vanilla javascript to capture images from the camera and into the canvas. Its working fine on Chrome Android but not in iOS Safari. As seen in the screenshot, there is a gray area on the image. Is there something I need to do to not get that rendered?
JavaScript for the camera
navigator.mediaDevices.getUserMedia({
'audio': false,
'video': {
facingMode: 'environment'
}
}).then(stream => {
this.VIDEO.srcObject = stream;
this.VIDEO.addEventListener('loadeddata', async () => {
this.VIDEO.width = this.VIDEO.videoWidth;
this.VIDEO.height = this.VIDEO.videoHeight;
this.CANVAS.width = this.VIDEO.videoWidth;
this.CANVAS.height = this.VIDEO.videoHeight;
resolve();
}, false);
}).catch(error => {
console.error(error);
self.MESSAGE.textContent = error;
});
JavaScript for drawing on the canvas and the preview window
self.CONTEXT.drawImage(self.VIDEO, 0, 0, self.VIDEO.width, self.VIDEO.height);
let imgDataURL = self.CANVAS.toDataURL('image/png');
self.PLANT.src = imgDataURL;
self.POLAROID.style.backgroundImage = `url(${imgDataURL})`;

Categories