I am trying to convert the following python code using opencv for image processing into javascript using opencvjs, but seem to be missing something as the output is not quite the same.
Would be great if someone could help me out here as the docs for opencvjs are few and far between.
python code:
img = cv.imread(inPath)
frame_HSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
frame_mask = cv.inRange(frame_HSV, (30, 50, 0), (80, 255, 255))
frame_mask = cv.bitwise_not(frame_mask)
frame_result = cv.bitwise_and(img, img, mask = frame_mask)
cv.imwrite(outPath, frame_result)
My javascript code:
const src = cv.imread('canvasInput');
const dst = new cv.Mat();
const hsv = new cv.Mat();
const hsvMask = new cv.Mat();
const hsvMaskInv = new cv.Mat();
cv.cvtColor(src, hsv, cv.COLOR_BGR2HSV, 0);
const low = new cv.Mat(hsv.rows, hsv.cols, hsv.type(), [30, 50, 0, 0]);
const high = new cv.Mat(hsv.rows, hsv.cols, hsv.type(), [80, 255, 255, 255]);
cv.inRange(hsv, low, high, hsvMask);
cv.bitwise_not(hsvMask, hsvMaskInv);
cv.bitwise_and(src, src, dst, hsvMaskInv);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
low.delete();
high.delete();
hsv.delete();
hsvMask.delete();
hsvMaskInv.delete();
The original image:
What python outputs:
What my javascript outputs:
TL;DR
Try replacing COLOR_BGR2HSV with cv.COLOR_RGB2HSV.
Implementation
Comparing opencv-python 4.5.3.56 with opencv.js 3.4.0, the image being read had the green and red channels swapped.
A direct translation of your python code would look like this:
// img = cv.imread(inPath)
let img = cv.imread(imgElement);
// frame_HSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
let frameHSV = new cv.Mat();
cv.cvtColor(img, frameHSV, cv.COLOR_RGB2HSV, 0);
// frame_mask = cv.inRange(frame_HSV, (30, 50, 0), (80, 255, 255))
let frameMask = new cv.Mat();
let low = new cv.Mat(frameHSV.rows, frameHSV.cols, frameHSV.type(), [30, 50, 0, 0]);
let high = new cv.Mat(frameHSV.rows, frameHSV.cols, frameHSV.type(), [80, 255, 255, 255]);
cv.inRange(frameHSV, low, high, frameMask);
// frame_mask = cv.bitwise_not(frame_mask)
cv.bitwise_not(frameMask, frameMask);
// frame_result = cv.bitwise_and(img, img, mask = frame_mask)
let frameResult = new cv.Mat();
cv.bitwise_and(img, img, frameResult, frameMask);
// cv.imwrite(outPath, frame_result)
cv.imshow('canvasOutput', frameResult);
img.delete(); frameHSV.delete(); frameMask.delete();
low.delete(); high.delete(); frameResult.delete();
Debug Method
You could try logging the images as matrices, so the swapped channels would be easily spotted, but I resorted to furas' suggestion above: display the results after every modification. Here are the results of your Python code and your JavaScript code, respectively:
Related
Is it possible to achieve the same Python operation in Javascript using TensorflowJs?
from tensorflow.keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1./255) # NOTE: Re-scaling operation as part of the pre-processing step
I am trying to run a custom model in the browser, but it requires this preprocessing step before I can feed it to tensorflowjs. It requires I rescale the image by a factor of 1/255.
Any idea how I could achieve this?
I can't find anything with tersorflowjs, so decided to try with opencvjs, but I am not too sure this has the same effect:
function rescaleImg(img, canvasId) {
const src = cv.imread(img);
let dst = new cv.Mat();
let dsize = new cv.Size(
parseFloat((src.rows * (1 / 255)) / 100),
parseFloat((src.cols * (1 / 255)) / 100)
);
cv.resize(src, dst, dsize, 1 / 255, 1 / 255, cv.INTER_AREA);
cv.imshow(canvasId, dst);
src.delete();
dst.delete();
}
I then pass the image to tensorflowjs like:
const shapeX = 150;
const shapeY = 150;
rescaleImg(image, id);
const canvas = document.getElementById(id);
tensor = tf.browser
.fromPixels(canvas)
.resizeNearestNeighbor([shapeX, shapeY])
.expandDims(0)
.toFloat();
}
const prediction = await model.predict(tensor).data();
"rescale" and "resize" are two different operations.
"rescale" modifies the pixel value, while "resize" modify the image size (yeah, also pixel value because of interpolation, but it's just a side effect).
To "rescale" the image in OpenCV you use convertTo with the optional scaling factor.
Also, when you rescale, you need to be sure to use the correct underlying data type to hold the new values.
Something like this should work:
const src = cv.imread(img);
let dst = new cv.Mat();
// rescale by 1/255, and hold values in a matrix with float32 data type
src.convertTo(dst, cv.CV_32F, 1./255.);
cv.imshow(canvasId, dst);
So far I have settle with having the following, using opencvJs:
function rescaleImg(img, canvasId) {
try {
const src = cv.imread(img);
let dst = new cv.Mat();
let dsize = new cv.Size(src.rows, src.cols);
cv.resize(src, dst, dsize, 1 / 255, 1 / 255, cv.INTER_AREA);
cv.imshow(canvasId, dst);
src.delete();
dst.delete();
} catch (e) {
console.log("Error running resize ", e);
throw e;
}
}
I have this function that simply crops the background from a picture of a coin and mostly works.
but for some reason "cv.fitEllipse" gives me an uncaught exception with this image:
bad image
but works fine with this image:
good image
I'm at a loss. any ideas? The size of the image that doesn't work is larger but that is the only thing I can figure out.
any ideas?
Ellipse_img = function(el) {
let src = cv.imread('imageChangeup');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_BGR2GRAY);
let dst = new cv.Mat();
cv.threshold(gray, dst, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1];
// apply morphology open and close
let morph = new cv.Mat();
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(5,5));
cv.morphologyEx(dst, morph, cv.MORPH_OPEN, kernel);
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(21,21));
cv.morphologyEx(morph, morph, cv.MORPH_CLOSE, kernel);
//find all the contours
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(morph, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
//find largest contour
let area_max =0;
let i_max = 0;
let cnt_max = 0;
for (let i = 0; i < contours.size(); i++) {
let cnt = contours.get(i);
let area = cv.contourArea(cnt, false);
if(area >= area_max){
area_max = area;
i_max = i;
cnt_max = cnt;
}
}
let rotatedRect = cv.fitEllipse(cnt_max); //<<<<<<<<<<<<THE PROBLEM???
let ellipseColor = new cv.Scalar(255, 255, 255, 255);
let ellipseColor2 = new cv.Scalar(255, 255, 255, 255);
cv.ellipse1(src, rotatedRect, ellipseColor, 3, cv.LINE_8);
let mask = new cv.Mat.ones(src.size(), cv.CV_8UC3);
cv.ellipse1(mask, rotatedRect, ellipseColor2, -1, cv.LINE_8);
cv.cvtColor(mask, mask, cv.COLOR_BGR2GRAY);
cv.bitwise_and(src, src, dst, mask);
cv.imshow('imageChangeup', dst);
src.delete();
dst.delete();
gray.delete();
morph.delete();
contours.delete();
hierarchy.delete();
};
here is high level of cnt_max for good and bad -- must not be finding the circle but why?
cnt_max: data32S: Int32Array(2426)
cnt_max: data32S: Int32Array(8)
I'm far from knowledgeable enough to know why... but.. this had to do with the white around the object in the 'good' picture and the THRESH used.
This worked if I changed thresholds based on background
cv.threshold(gray, dst, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1]; //for light backgrounds
cv.threshold(gray, dst, 0, 255, cv.THRESH_OTSU)[1]; //for dark backgrounds
so I changed to adaptiveThreshold and it seems to work for all scenarios
cv.adaptiveThreshold(gray, dst, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV,11,2)
I'm trying to use facial recognition via OpenCV.js, however when I call on the detectMultiScale() method of the CascadeClassifier object I receive the error:
Uncaught 6446128 - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.
The problem is I'm leveraging a hosted version of opencv.js directly from opencv.org - it's not a build version because I'm unable to build it myself, and therefore cannot follow the error's instructions.
I've followed an example from their GitHub here and adapted the code to suit my needs, as follows:
<html>
<head>
<script src="https://docs.opencv.org/master/opencv.js"></script>
<script src="https://docs.opencv.org/master/utils.js"></script>
</head>
<body>
<img id="test" src="image/with/face.jpg" alt=""/>
<canvas id="output"></canvas>
<script>
let face_cascade = new cv.CascadeClassifier();
face_cascade.load("https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades_cuda/haarcascade_frontalface_default.xml");
function face_detector() {
let imgEl = document.getElementById("test");
let img = cv.imread(imgEl);
cv.imshow("output", img);
let src = cv.imread("output");
let gray = new cv.Mat();
let msize = new cv.Size(0,0);
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
face_cascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize); //Error occurs here
}
face_detector();
</script>
</body>
</html>
Anyone with experience with OpenCV.js and facial recognition that could help?
Following this thread:
The xml files are "pre-built" before loading them with the load function. To achieve this it's used the function createFileFromUrl from utils.js. After that we can finally load our classifier from file.
let classifier = new cv.CascadeClassifier(); // initialize classifier
let utils = new Utils('errorMessage'); //use utils class
let faceCascadeFile = 'haarcascade_frontalface_default.xml'; // path to xml
// use createFileFromUrl to "pre-build" the xml
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
classifier.load(faceCascadeFile); // in the callback, load the cascade from file
});
Face Detection Other Example
TRY IT :
let src = cv.imread('canvasInput');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
// load pre-trained classifiers
faceCascade.load('haarcascade_frontalface_default.xml');
// detect faces
let msize = new cv.Size(0, 0);
// try to change scaleFactor and minNeighbors values
faceCascade.detectMultiScale(gray, faces,1.05,0);
for (let i = 0; i < faces.size(); ++i) {
let roiGray = gray.roi(faces.get(i));
let roiSrc = src.roi(faces.get(i));
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
faces.get(i).y + faces.get(i).height);
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
roiGray.delete(); roiSrc.delete();
}
cv.imshow('canvasOutput', src);
src.delete(); gray.delete(); faceCascade.delete();
faces.delete(); eyes.delete();
Try to change faceCascade.detectMultiScale parameters like given examples below:
faceCascade.detectMultiScale(gray, faces,1.05,0);
faceCascade.detectMultiScale(gray, faces,1.05,1);
faceCascade.detectMultiScale(gray, faces,2,0);
faceCascade.detectMultiScale(gray, faces,2,1);
faceCascade.detectMultiScale(gray, faces,3,0);
faceCascade.detectMultiScale(gray, faces,3,1);
faceCascade.detectMultiScale(gray, faces,4,0);
faceCascade.detectMultiScale(gray, faces,4,1);
The solution is
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
console.log('cascade ready to load.');
let src = cv.imread('imageInit');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
faceCascade.load(faceCascadeFile);
let msize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
});
}
For full code and explanation use this link Face Detection with Javascript and OpenCV
or
Human Eye Detection using Javascript and OpenCV
I can create a mask in OPENCV C++ using cv::Mat::zeros and Rect.But i cannot find these features on OPENCV.js.How can i create a mask on OPENCV.js?
cv::Mat mask = cv::Mat::zeros(8, 8, CV_8U); // all 0
mask(Rect(2,2,4,4)) = 1;
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
let rect = new cv.Rect(100, 100, 200, 200);
dst = src.roi(rect);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
Taken from here, specifically the Image ROI section
I am stumped. I can't get EaselJS' AlphaMaskFilter to work. I don't think I'm doing anything wrong but it doesn't show what I'm expecting. It should look like you're pointing a flashlight at a google billboard. http://jsfiddle.net/mLn8e/
var stage = new createjs.Stage("c");
var mask = new createjs.Shape();
mask.graphics.beginRadialGradientFill(["rgba(255, 255, 255, 1)","rgba(255, 255, 255, 0)"], [0, 1], 0, 0, 0, 0, 0, 20).drawCircle(0, 0, 20);
mask.cache(-20, -20, 40, 40);
mask.x = 100;
mask.y = 100;
var bg = new createjs.Shape();
bg.graphics.beginFill("#000000").rect(0, 0, 400, 400);
stage.addEventListener("stagemousemove", function(e) {
mask.x = e.stageX;
mask.y = e.stageY;
stage.update();
});
stage.addChild(bg, mask);
stage.update();
var img = new Image();
img.src = "https://www.google.nl/intl/en_ALL/images/logos/images_logo_lg.gif";
img.onload = function (e) {
var bmp = new createjs.Bitmap(e.target);
bmp.x = 0;
bmp.y = 0;
var amf = new createjs.AlphaMaskFilter(mask.cacheCanvas);
bmp.filters = [amf];
stage.addChild(bmp);
stage.update();
};
The two most important lines are de var amf = ... line and the bmp.filters = [amf]; line, those two break the program.
Thanks in advance!
Filters (like createjs.AlphaMaskFilter) are not included in the main package of CreateJS and EaselJS. You have to include them separatly. This information can be found in the docs: http://www.createjs.com/Docs/EaselJS/classes/Filter.html - I know, not very prominent, I had the same issue :)
And the 2nd thing, in the fiddle you var is called amff and in the line below you try to use amf (lines 28 + 29 of the fiddle)
*edit: And what I also just noticed, you don't cache the bmp - in order for a filter to take effect, you have to cache it after applying the filter initially and then use updateCache() in the stagemousemove-listener. Here is an example for a similar use of what you are trying to do: https://github.com/CreateJS/EaselJS/blob/master/examples/AlphaMaskReveal.html