I need to have ability to take a picture with desktop camera or mobile phone camera from my web application localhost:3000/user-camera route component. And, please dont write about any native solutions because I'm not working on mobile app.
I have try with react-camera and react-webcam package but nothing works.
https://www.npmjs.com/package/react-webcam
Import React from 'react'
Import Webcam from 'react-webcam'
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
const WebcamCapture = () => {
const webcamRef = React.useRef(null);
const capture = React.useCallback(
() => {
const imageSrc = webcamRef.current.getScreenshot();
},
[webcamRef]
);
return (
<>
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
/>
<button onClick={capture}>Capture photo</button>
</>
);
};
So, is there a way to do this by using javascript maybe using navigator, or is there a npm package that works with react. Does anyone have experience with this?
Thanks
Try this simple module I created on the fly just to test this interesting feature:
const camera = function () {
let width = 0;
let height = 0;
const createObjects = function () {
const video = document.createElement('video');
video.id = 'video';
video.width = width;
video.width = height;
video.autoplay = true;
document.body.appendChild(video);
const canvas = document.createElement('canvas');
canvas.id = 'canvas';
canvas.width = width;
canvas.width = height;
document.body.appendChild(canvas);
}
return {
video: null,
context: null,
canvas: null,
startCamera: function (w = 680, h = 480) {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
width = w;
height = h;
createObjects();
this.video = document.getElementById('video');
this.canvas = document.getElementById('canvas');
this.context = this.canvas.getContext('2d');
(function (video) {
navigator.mediaDevices.getUserMedia({video: true}).then(function (stream) {
video.srcObject = stream;
video.play();
});
})(this.video)
}
},
takeSnapshot: function () {
this.context.drawImage(this.video, 0, 0, width, height);
}
}
}();
export default camera;
To use this module first import it as regular es6 module
import camera from './camera.js'
Then call:
camera.startCamera();
camera.takeSnapshot();
OK. I manage to resolve my issue by using navigator and getting media device from there to use. For anyone else who is trying to make something like this, I want you to know that Chrome doesn't allow you to use this before you set your web app to use secure connection HTTPS. In react for testing use set HTTPS=true&&npm start, so you'll start your react app as https and it will show you camera and you can put it anywhere in your component's html tags.
import { useCallback, useRef, useState } from "react";
import Webcam from "react-webcam";
const videoConstraints = {
width: 640,
height: 480,
facingMode: "user",
};
const WebcamCapture = () => {
const webcamRef = useRef<any>(null);
const [imgSrc, setImgSrc] = useState<any>(null);
const capture = useCallback(() => {
const imageSrc = webcamRef.current.getScreenshot();
setImgSrc(imageSrc);
}, [webcamRef, setImgSrc]);
return (
<>
<Webcam
audio={false}
ref={webcamRef}
screenshotFormat="image/jpeg"
videoConstraints={videoConstraints}
minScreenshotWidth={180}
minScreenshotHeight={180}
/>
<button onClick={capture}>Capture Photo</button>
{imgSrc && <img src={imgSrc} alt="img" />}
</>
);
};
Related
I just starting to learn React. I've been having issues with the react-image-crop package. The document they have was not newbie friendly, I just barely made it work at this point. Now my issue is that the result cropped image is totally different from the user's selection. My thought is that it might be caused by the scaling of the original image when you select the cropping area. I have limited the window size because some people might choose to upload a large image. If you have any experience using this package, please let me know what I could do to fix this issue, thank you.
import "react-image-crop/dist/ReactCrop.css";
import React, { useState, useRef } from "react";
import ReactCrop from "react-image-crop";
export default function ImageUploader(props) {
const [imgSrc, setImgSrc] = useState();
const [crop, setCrop] = useState();
const [originalImg, setOrgImg] = useState(null);
const imgRef = useRef(null);
const handleImage = async (event) => {
setImgSrc(URL.createObjectURL(event.target.files[0]));
};
const getCroppedImg = async (image, pixelCrop) => {
try {
const canvas = document.createElement("canvas");
console.log(crop);
canvas.width = pixelCrop.width;
canvas.height = pixelCrop.height;
const ctx = canvas.getContext("2d");
// Here is what I think where the problem is at:
ctx.drawImage(
image,
pixelCrop.x,
pixelCrop.y,
pixelCrop.width,
pixelCrop.height,
0,
0,
pixelCrop.width,
pixelCrop.height
);
const base64Image = await canvas.toDataURL("image/jpeg", 1);
props.setCurrentImages(pushImage(props.images, base64Image));
console.log(base64Image);
console.log(props.images);
} catch (e) {
console.log(e);
}
};
function pushImage(array, newImage) {
if (array.lengh === 0) return [newImage];
return [...array, newImage];
}
function handleCropButton() {
getCroppedImg(imgRef.current, crop);
props.setUploadImg(false);
}
return (
<div style={{ height: "600px" }}>
<div>
<input type="file" onChange={handleImage} accept="image/*" />
<button onClick={handleCropButton}>Crop</button>
</div>
<ReactCrop
crop={crop}
aspect={1}
onChange={(c) => setCrop(c)}
onComplete={(crop) => setCrop(crop)}
>
<img
src={imgSrc}
alt=""
style={{ height: "600px" }}
onLoad={() => {
setOrgImg({
height: imgRef.current.clientHeight,
width: imgRef.current.clientWidth,
});
}}
ref={imgRef}
/>
</ReactCrop>
</div>
);
}
I am using the react-webcam to capture images and videos in my react app. I have implemented the Screenshot (via Ref) example:
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
const WebcamCapture = () => {
const webcamRef = React.useRef(null);
const capture = React.useCallback(
() => {
const imageSrc = webcamRef.current.getScreenshot();
},
[webcamRef]
);
return (
<>
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
/>
<button onClick={capture}>Capture photo</button>
</>
);
};
Works nicely, however it seems to take a few seconds for the stream to start and video to start showing. I want to be able to disable the button whilst this is happening. I have found there is a flag in the webcamRef that states if it is loading:
useEffect(() => {
if (webcamRef.current) {
const camStarted = webcamRef.current.state.hasUserMedia;
debugger;
}
}, [webcamRef]);
In the above useEffect, whilst the video is initialising, the hasUserMedia is false, once it has loaded it changes to true. This sounds like exactly what I need however as it is in a useRef, it doesn't hit the useEffect when it changes.
Is there any kind of neat trick I can implement to be able to identify when this value in the useRef is changed, or anything else that might help me get the functionality I am after?
If you just want to disable the button until the camera has started streaming, you can use a check that can be triggered using the onUserMedia prop of Webcam.
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
const WebcamCapture = () => {
const [loadingCam, setLoadingCam] = useState(true);
const webcamRef = React.useRef(null);
const capture = React.useCallback(
() => {
const imageSrc = webcamRef.current.getScreenshot();
},
[webcamRef]
);
const handleUserMedia = () => {
setTimeout(() => {
// timer is optional if the loading is taking some time.
setLoadingCam(false);
}, 1000);
};
return (
<>
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={handleUserMedia}
/>
<button disable={loadingCam} onClick={capture}>Capture photo</button>
</>
);
};
I am new to react and I am trying to develop a web application with video recording capabilities. This is my code:
App.js:
import ScreenRecording from './Recorder'
function App() {
return (
<div className="App">
<header className="App-header">
<ScreenRecording />
</header>
</div>
);
}
export default App;
Recorder.js:
import './Recorder.css'
import React from 'react';
import ReactDom from 'react-dom';
import axios from "axios";
const ScreenRecording = () => {
var strName = null;
var strEmail = null;
const video = document.getElementById('video');
async function captureMediaDevices(mediaConstraints = {
video: {
width: 1280,
height: 720
},
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 44100
}
}) {
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
video.src = null;
video.srcObject = stream;
video.muted = true;
return stream;
}
let recorder = null;
var strFile = null;
var webcamblob = null;
function stopRecording() {
recorder.stream.getTracks().forEach(track => track.stop());
}
async function recordVideo() {
const stream = await captureMediaDevices();
video.src = null;
video.srcObject = stream;
video.muted = true;
video.autoplay = true;
recorder = new MediaRecorder(stream);
let chunks = [];
recorder.ondataavailable = event => {
if (event.data.size > 0) {
chunks.push(event.data);
}
}
recorder.onstop = () => {
const blob = new Blob(chunks, {
type: 'video/webm'
})
chunks = [];
webcamblob = blob;
const blobUrl = URL.createObjectURL(blob);
strFile = blobUrl;
}
recorder.start(200);
}
const previewRecording = () => {
video.srcObject = null;
video.src = strFile;
video.muted = false;
video.autoplay = true;
}
const uploadRecording = () => {
strName = document.getElementById("name").value;
strEmail = document.getElementById("email").value;
const formData = new FormData();
// Update the formData object
formData.append("file2upload", webcamblob);
formData.append("email", strEmail);
formData.append("name", strName);
// Request made to the backend api
// Send formData object
axios.post("https://xyz.php", formData);
cleardata();
alert("Upload success!");
};
const cleardata = () => {
URL.revokeObjectURL(strFile);
webcamblob = null;
}
return(
<center>
<div>
<button onClick={recordVideo}>Record video</button>
<button onClick={stopRecording}>Stop recording</button>
<button onClick={previewRecording}>Replay</button>
<button onClick={uploadRecording}>Upload and close</button>
</div>
</center>
)
}
function Video(){
return (<div className="Display">
<center>
<video id='video' className="Display-video" width="800" height="600" autoplay muted></video>
</center>
</div>)
}
ReactDom.render(
<React.StrictMode>
<Video />
</React.StrictMode>,
document.getElementById('vid')
);
export default ScreenRecording;
The program was working as expected until recently. Presently, It is not working and when I try to replay the recorded video using the "Replay" button, the browser console is returning the error:
net::ERR_REQUEST_RANGE_NOT_SATISFIABLE.
When the blob size is read, it is zero. Can someone please help to fix the issue?
I've implemented face-API in my react project which is detecting a single face with detectSingleFace from the picture.
Now I want to move one step further. I want face-api to auto-crop the face after detection. So, I can store it in some server, state or local storage. Is there any way to do so?
Here you can see a screenshot example I want to achieve One side is a picture another side is the auto cropped face(which I want to implement).
Here is my live code link in codesandbox
Below is my code module for face-api
PhotoFaceDetection.js
import React, { useState, useEffect, useRef } from "react";
import * as faceapi from "face-api.js";
import Img from "./assets/mFace.jpg";
import "./styles.css";
const PhotoFaceDetection = () => {
const [initializing, setInitializing] = useState(false);
const [image, setImage] = useState(Img);
const canvasRef = useRef();
const imageRef = useRef();
// I want to store cropped image in this state
const [pic, setPic] = useState();
useEffect(() => {
const loadModels = async () => {
setInitializing(true);
Promise.all([
// models getting from public/model directory
faceapi.nets.tinyFaceDetector.load("/models"),
faceapi.nets.faceLandmark68Net.load("/models"),
faceapi.nets.faceRecognitionNet.load("/models"),
faceapi.nets.faceExpressionNet.load("/models")
])
.then(console.log("success", "/models"))
.then(handleImageClick)
.catch((e) => console.error(e));
};
loadModels();
}, []);
const handleImageClick = async () => {
if (initializing) {
setInitializing(false);
}
canvasRef.current.innerHTML = faceapi.createCanvasFromMedia(
imageRef.current
);
const displaySize = {
width: 500,
height: 350
};
faceapi.matchDimensions(canvasRef.current, displaySize);
const detections = await faceapi.detectSingleFace(
imageRef.current,
new faceapi.TinyFaceDetectorOptions()
);
const resizeDetections = faceapi.resizeResults(detections, displaySize);
canvasRef.current
.getContext("2d")
.clearRect(0, 0, displaySize.width, displaySize.height);
faceapi.draw.drawDetections(canvasRef.current, resizeDetections);
console.log(
`Width ${detections.box._width} and Height ${detections.box._height}`
);
setPic(detections);
console.log(detections);
};
return (
<div className="App">
<span>{initializing ? "Initializing" : "Ready"}</span>
<div className="display-flex justify-content-center">
<img ref={imageRef} src={image} alt="face" crossorigin="anonymous" />
<canvas ref={canvasRef} className="position-absolute" />
</div>
</div>
);
};
export default PhotoFaceDetection;
After doing a lot of R&D I figured it out. For future readers who may face an issue here is the guide.
I've created another function that will get the original image reference and the bounded box dimension i.e. width and height. After that, I've used faceapi method to extract faces and then with the help of the toDataURL method I actually converted it to base64 file which can be rendered to any image src or can be stored anywhere.
This is the function I was explaining above
async function extractFaceFromBox(imageRef, box) {
const regionsToExtract = [
new faceapi.Rect(box.x, box.y, box.width, box.height)
];
let faceImages = await faceapi.extractFaces(imageRef, regionsToExtract);
if (faceImages.length === 0) {
console.log("No face found");
} else {
const outputImage = "";
faceImages.forEach((cnv) => {
outputImage.src = cnv.toDataURL();
setPic(cnv.toDataURL());
});
// setPic(faceImages.toDataUrl);
console.log("face found ");
console.log(pic);
}
}
Then I call the above function inside my main function where I used faceapi face detection tiny model.
extractFaceFromBox(imageRef.current, detections.box);
You can also visit live code here to check complete implementation
I am trying to crop an image in reactjs without using any library.First the user will upload image and the user will crop the image using raw js and replace the uploaded image.How can i do it without using any library.I want to use the crop component somthing like this in reactjsreference.For me html cansvas seems to be not working how can i do it using html canvas?this is the sandbox link:sandbox
this is the code i did for showing image:
import React, { Component } from "react";
class ReactCrop extends Component {
constructor(props) {
super(props);
this.state = {
file: null,
};
this.handleChange = this.handleChange.bind(this)
}
handleChange(event) {
this.setState({
file: URL.createObjectURL(event.target.files[0])
})
console.log("ok",this.state.file)
}
saveCroped() {
const width = 500;
const height = 300;
const fileName = e.target.files[0].name;
const reader = new FileReader();
reader.readAsDataURL(e.target.files[0]);
reader.onload = event => {
const img = new Image();
img.src = event.target.result;
img.onload = () => {
const elem = document.createElement('canvas');
elem.width = width;
elem.height = height;
const ctx = elem.getContext('2d');
// img.width and img.height will contain the original dimensions
ctx.drawImage(img, 0, 0, width, height);
ctx.canvas.toBlob((blob) => {
const file = new File([blob], fileName, {
type: 'image/jpeg',
lastModified: Date.now()
});
}, 'image/jpeg', 1);
},
reader.onerror = error => console.log(error);
};
}
componentDidMount() {
console.log('componentDidMount colling ...');
}
render() {
return (
<div className="app">
<div style={{width:'450px', margin:'0 auto'}}>
<label htmlFor="file" style={{paddingRight:'80px',marginLeft:'-10px'}}>
<input id="files" type="file" style={{border:'0px',paddingBottom:'12px'}} key={this.state.inputKey}
onChange={this.handleChange} type="file"/>
</label>
{this.state.file === null?
<p></p>
: <img style={{width:'362px',height:'365px',paddingTop:'10px',marginRight:'85px'}} onClick={this.rotate} src={this.state.file}/>
}
</div>
<button type="button" onClick={() => {this.saveCroped()}}>crop</button>
</div>
);
}
}
export default ReactCrop;
Interesting question. Here is a working example: (it wont download in online sandbox, but works fine on independent branches e.g. localhost, see console messages) sandbox
import React, { useRef, useEffect, useState } from "react";
const Crop = function(){
const canvas = useRef();
const file = useRef();
const preview = useRef();
const [imageParams, setImageParams] = useState();
const [context, setContext] = useState();
// Initialize
useEffect(function(){
file.current.onchange = onNewImage;
setContext( canvas.current.getContext('2d') );
},[])
// Draw image on imageParams Change
useEffect(function(){
if(!imageParams) return;
context.drawImage( preview.current, 0, 0, imageParams.width*(700/imageParams.height), 700 )
}, [imageParams])
// Get with and height, replace preview, set params
function onNewImage(){
const newImgUrl = URL.createObjectURL( file.current.files[0] );
const newImage = document.createElement('img');
newImage.setAttribute('src', newImgUrl);
newImage.onload = function(){
const {width, height} = newImage;
const type = file.current.files[0].type
newImage.setAttribute('width', '50px');
preview.current.parentElement.append(newImage);
preview.current.remove();
preview.current = newImage;
setImageParams({ width, height, type });
}
}
// Save image on click
function handleSave(){
canvas.current.toBlob(function(blob){
const anchor = document.createElement('a');
anchor.innerHTML = 'download';
anchor.download = "my_file."+imageParams.type.replace(/^.{1,}\//,'');
anchor.href = (window.webkitURL || window.URL).createObjectURL(blob);
anchor.dataset.downloadurl = [imageParams.type, anchor.download, anchor.href].join(':');
anchor.click();
preview.current.parentElement.append(anchor);
}, imageParams.type);
}
return <>
<div className="input">
<input
ref={file}
type="file"
accept="image/png, image/jpeg" />
<div
style={{width:"100px", display: 'inline-block'}}>
<img
ref={preview}
/>
</div>
<button
onClick={handleSave} >save image</button>
</div>
<div class="canvas">
<canvas
ref={canvas}
width='700px'
height='700px' />
</div>
</>;
}
export default Crop;