LookAt Matrix (Normalising Parallel Projection) - javascript

I am currently learning 3d computer graphics and normalising parallel projection into canocial view volume(LookAt Matrix as the familiar name). I try to implement it to the code using pure javascript as the parameter below.
var VRP = new Vertex(0,0,0);
var VPN = new Vertex(0,0,1);
var VUP = new Vertex(0,1,0);
var PRP = new Vertex(8,8,100);
var Window = [-1,17,-1,17];
var F = 1, B = -1;
Now, here is my attempt. I converted it first to canocial view volume.
NOTE: You can skip these steps directly to the code here and help me to fix the code to move the cube forward to camera(the screen) instead of moving away
1. Translate VRP to origin
var TVRP = [];
TVRP[0] = [1, 0, 0, -VRP.x];
TVRP[1] = [0, 1, 0, -VRP.y];
TVRP[2] = [0, 0, 1, -VRP.z];
TVRP[3] = [0, 0, 0, 1];
2. Rotate VRC such that n-axis,u-axis and v-axis align with z-axis, x-axis, and y-axis in order
function normalizeViewPlane(VPN) {
var unitVector = calculateUnitVector(VPN); //VPN/|VPN|
return normalizeVector(VPN,unitVector);
}
function normalizeViewUp(VUP, n) {
var dtProd = dotProduct(n,VUP);
var nVUP = new Vertex(n.x*dtProd, n.y*dtProd, n.z*dtProd);
VUP = new Vertex(VUP.x-nVUP.x, VUP.y-nVUP.y, VUP.z-nVUP.z);
var unitVector = calculateUnitVector(VUP); //VUP/|VUP|
return normalizeVector(VUP,unitVector);
}
function normalizeUVN(n,u) {
var V = crossProduct(n,u);
var unitVector = calculateUnitVector(V); //V/|V|
return normalizeVector(V,unitVector);
}
var n = normalizeViewPlane(VPN);
var v = normalizeViewUp(VUP, n);
var u = normalizeUVN(v, n);
var RVRC = [];
RVRC[0] = [u.x, u.y, u.z, 0];
RVRC[1] = [v.x, v.y, v.z, 0];
RVRC[2] = [n.x, n.y, n.z, 0];
RVRC[3] = [0, 0, 0, 1];
//Perform matrix multiplication 4x4 R.T(-VRP)
var res = multiplyMatrix4x4(RVRC, TVRP);
3. Shear DOP becomes parallel to z-axis
function shearDOP(PRP, uMaxMin, vMaxMin) {
var CW = new Vertex(uMaxMin,vMaxMin,0);
var mPRP = new Vertex(PRP.x,PRP.y,PRP.z);
return new Vertex(CW.x - mPRP.x, CW.y - mPRP.y, CW.z - mPRP.z);
}
var uMaxMin = (Window[1]+Window[0])/2;
var vMaxMin = (Window[3]+Window[2])/2;
var DOP = shearDOP(PRP,uMaxMin,vMaxMin);
var HX = (DOP.x/DOP.z)*-1;
var HY = (DOP.y/DOP.z)*-1;
var Hpar = [];
Hpar[0] = [1,0,HX,0];
Hpar[1] = [0,1,HY,0];
Hpar[2] = [0,0,1,0];
Hpar[3] = [0,0,0,1];
//res = R.T(-VRP)
res = multiplyMatrix4x4(Hpar,res);
4. Translate to front center of the view volume origin
var Tpar = [];
Tpar[0] = [1,0,0,-uMaxMin];
Tpar[1] = [0,1,0,-vMaxMin];
Tpar[2] = [0,0,1,-F];
Tpar[3] = [0,0,0,1];
//res=Hpar.R.T(-VRP)
res = multiplyMatrix4x4(Tpar,res);
5. Scale such that view volume becomes bounded by plane
var uMaxMin2 = 2/(Window[1]-Window[0]);
var vMaxMin2 = 2/(Window[3]-Window[2]);
var Spar = [];
Spar[0] = [uMaxMin2, 0, 0, 0];
Spar[1] = [0, vMaxMin2, 0, 0];
Spar[2] = [0, 0, 1 / (F - B), 0];
Spar[3] = [0, 0, 0, 1];
//res=Tpar.Hpar.R.T(-VRP)
res = multiplyMatrix4x4(Spar, res);
After convert it to the canocial view volume, I decided to multiply cube vertices to this final result transformation matrix.
//res=Spar.Tpar.Hpar.R.T(-VRP)
p = multiplyMatrix1x4(res,p);
//M is the parameter of cube vertice
M.x = p[0];
M.y = p[1];
M.z = p[2];
Thus, I had my cube is moving away from the camera as it is illustrated in image below.
However, I expect that cube is move closest to the camera instead of moving away as it is explained in image below(the object is house)
Am I missing the step or misunderstanding the algorithm of converting to canocial view volume? Which function or variable I shall modify to make the cube like the house above?
JSFiddle: https://jsfiddle.net/Marfin/hL2bmvz5/20/
Reference: https://telin.ugent.be/~sanja/ComputerGraphics/L06_Viewing_Part2_6pp.pdf

in general, if your cam is looking at the box and you want the cam to move towards the box, get the vector between cam and box and move the cam towards this direction:
cam += (box-cam)

Related

THREE JS - Rendering on canvas inside of loop not updating canvas

I've got the below code, which takes in a number of scans. I know this to always be 2880. The canvas should split an entire 360° into 2880 sectors. The loop in the code below will always run from 0 to 2880, and in each loop, a bunch of (maybe several hundred) 2px coloured points are rendered in that sector, emanating from the centre of the canvas outward. The loop moves fast, before I upgraded the THREE package, this loop could render in c. 15 seconds.
The picture draws correctly, but what confuses me is the fact that the call to THREE's render message happens inside of the loop, yet the picture draws nothing until the last iteration of the loop is complete and then all 2880 sectors appear at once, which isn't the effect I'm going for.
Can anyone advise what I might be missing? It's a 2-D non-interactable image.
Stuff I've tried:-
setTimeout(null, 1000) after the .render() method to make it wait before executing the next iteration of the loop
Considered making it a recursive function with the next iteration of the loop inside of the above setTimeout
Reversing the THREE upgrade as an absolute last resort.
Stuff I've considered:-
Is the loop running two fast for the frame rate or not giving the screen enough time to update?
Limitation of THREEJs?
const drawReflectivityMap = (scans, reflectivityData, azimuthData, scene, renderer, totalScans, currentScan, cameraPosition, camera, reflectivityColours) => {
currentCamera = camera;
currentRenderer = renderer;
for (let i = 0; i < scans; i++) {
console.log('Drawing Reflectivity ' + i);
var reflectivity = reflectivityData[i];
var azimuth = utils.radians(azimuthData[i]);
var sinMultiplier = Math.sin(azimuth);
var cosMultiplier = Math.cos(azimuth);
var initialRange = mikeUtilities.addRange(mikeUtilities.multiplyRange(mikeUtilities.createRange(0, reflectivity.GateCount, 1), reflectivity.GateSize), reflectivity.FirstGate);
var x = utils.multiplyRange(initialRange, sinMultiplier);
var y = utils.multiplyRange(initialRange, cosMultiplier);
var dataSet = {
x: x,
y: y,
reflectivity: reflectivity
};
var reflectivityColourScale = d3.scaleQuantize().domain([-32.0, 94.5]).range(reflectivityColours);
var pointsMaterial = new THREE.PointsMaterial({
size: 2,
vertexColors: true,
sizeAttenuation: false
});
// x co-ordinate points for each point in this arc
var x = dataSet.x;
// y co-ordinate points for each point in this arc
var y = dataSet.y;
// Reflectivity (rainfall) intensity values for each point in this arc
var reflectivity = dataSet.reflectivity;
var geometry = new THREE.BufferGeometry();
var pointsGraph = [];
var coloursGraph = [];
x.forEach(function (index, i) {
if (reflectivity.MomentDataValues[i] > -33) {
geometry = new THREE.BufferGeometry();
var dataPointColour = new THREE.Color(reflectivityColourScale(reflectivity.MomentDataValues[i]));
pointsGraph.push(x[i], y[i], 0);
coloursGraph.push(dataPointColour.r, dataPointColour.g, dataPointColour.b);
}
});
var pointsGraphArray = new Float32Array(pointsGraph);
var coloursGraphArray = new Float32Array(coloursGraph);
geometry.setAttribute('position', new THREE.BufferAttribute(pointsGraphArray, 3));
geometry.setAttribute('color', new THREE.BufferAttribute(coloursGraphArray, 3));
var pointsMap = new THREE.Points(geometry, pointsMaterial);
scene.add(pointsMap);
renderScene(scene, cameraPosition, renderer);
}
}
function renderScene(scene, cameraPosition,renderer) {
currentCamera.position.z = cameraPosition;
currentRenderer.render(scene, currentCamera);
requestAnimationFrame(renderScene);
}
for (let i = 0; i < scans; i++) {
setTimeout(() => {
// Your code goes here
} i * 100)
}

Orient plane based on vectors

I am want to draw a plane that has an an array of points lying on it (including the origin). The three.js library draws the plane on the origin, facing the xy plane. Right now, I am having trouble of moving the it from the origin to a position such that it contains the points.
So far, I have managed to find a way to orient some planes that lie on the y-axis:
var directionalVectors = __getDirectionVectors(points);
var normal = __getNormalOfPlane(directionalVectors);
var angleXY = __getAngleBetweenPlanes( normal, new THREE.Vector3(0, 0, 1) );
plane.rotateY(- angleXY );
plane.translateY( planeDimensions.width /2.0);
plane.translateX( planeDimensions.height /2.0);
This is how I calculate the direction vectors:
var __getDirectionVectors = function( points ){
var numOfPoints = points.length, i;
var pointOne, pointTwo, directionalVectors = [], directionalVector;
for( i = 0; i < numOfPoints - 1; i++){
pointOne = points[i];
pointTwo = points[i + 1];
directionalVector = new THREE.Vector3().subVectors(pointOne, pointTwo);
directionalVectors.push(directionalVector);
}
return directionalVectors;
};
This is how I calculate the normal:
var __getNormalOfPlane = function(vectors){
var numOfVectors = vectors.length;
var vectorOne, vectorTwo, normal;
if( numOfVectors >= 2){
vectorOne = vectors[0];
vectorTwo = vectors[1];
normal = new THREE.Vector3().crossVectors(vectorOne, vectorTwo);
}
return normal;
};
This is how I calculate the angle between the plane and the XY plane:
//http://www.netcomuk.co.uk/~jenolive/vect14.html
var __getAngleBetweenPlanes = function( normalOne, normalTwo){
var dotPdt = normalOne.dot(normalTwo);
var angle = Math.acos( dotPdt / ( normalOne.length() * normalTwo.length() ) );
return angle;
}
Is there any way I could orient the plane properly for all types of planes?

Generate the Dominant Colors for an RGB image with XMLHttpRequest

A Note For Readers: This is a long question, but it needs a background to understand the question asked.
The color quantization technique is commonly used to get the dominant colors of an image.
One of the well-known libraries that do color quantization is Leptonica through the Modified Median Cut Quantization (MMCQ) and octree quantization (OQ)
Github's Color-thief by #lokesh is a very simple implementation in JavaScript of the MMCQ algorithm:
var colorThief = new ColorThief();
colorThief.getColor(sourceImage);
Technically, the image on a <img/> HTML element is backed on a <canvas/> element:
var CanvasImage = function (image) {
this.canvas = document.createElement('canvas');
this.context = this.canvas.getContext('2d');
document.body.appendChild(this.canvas);
this.width = this.canvas.width = image.width;
this.height = this.canvas.height = image.height;
this.context.drawImage(image, 0, 0, this.width, this.height);
};
And that is the problem with TVML, as we will see later on.
Another implementation I recently came to know was linked on this article Using imagemagick, awk and kmeans to find dominant colors in images that links to Using python to generate awesome linux desktop themes.
The author posted an article about Using python and k-means to find the dominant colors in images that was used there (sorry for all those links, but I'm following back my History...).
The author was super productive, and added a JavaScript version too that I'm posting here: Using JavaScript and k-means to find the dominant colors in images
In this case, we are generating the dominant colors of an image, not using the MMCQ (or OQ) algorithm, but K-Means.
The problem is that the image must be a as well:
<canvas id="canvas" style="display: none;" width="200" height="200"></canvas>
and then
function analyze(img_elem) {
var ctx = document.getElementById('canvas').getContext('2d')
, img = new Image();
img.onload = function() {
var results = document.getElementById('results');
results.innerHTML = 'Waiting...';
var colors = process_image(img, ctx)
, p1 = document.getElementById('c1')
, p2 = document.getElementById('c2')
, p3 = document.getElementById('c3');
p1.style.backgroundColor = colors[0];
p2.style.backgroundColor = colors[1];
p3.style.backgroundColor = colors[2];
results.innerHTML = 'Done';
}
img.src = img_elem.src;
}
This is because the Canvas has a getContext() method, that expose 2D image drawing APIs - see An introduction to the Canvas 2D API
This context ctx is passed to the image processing function
function process_image(img, ctx) {
var points = [];
ctx.drawImage(img, 0, 0, 200, 200);
data = ctx.getImageData(0, 0, 200, 200).data;
for (var i = 0, l = data.length; i < l; i += 4) {
var r = data[i]
, g = data[i+1]
, b = data[i+2];
points.push([r, g, b]);
}
var results = kmeans(points, 3, 1)
, hex = [];
for (var i = 0; i < results.length; i++) {
hex.push(rgbToHex(results[i][0]));
}
return hex;
}
So you can draw an image on the Canvas through the Context and get image data:
ctx.drawImage(img, 0, 0, 200, 200);
data = ctx.getImageData(0, 0, 200, 200).data;
Another nice solution is in CoffeeScript, ColorTunes, but this is using a as well:
ColorTunes.getColorMap = function(canvas, sx, sy, w, h, nc) {
var index, indexBase, pdata, pixels, x, y, _i, _j, _ref, _ref1;
if (nc == null) {
nc = 8;
}
pdata = canvas.getContext("2d").getImageData(sx, sy, w, h).data;
pixels = [];
for (y = _i = sy, _ref = sy + h; _i < _ref; y = _i += 1) {
indexBase = y * w * 4;
for (x = _j = sx, _ref1 = sx + w; _j < _ref1; x = _j += 1) {
index = indexBase + (x * 4);
pixels.push([pdata[index], pdata[index + 1], pdata[index + 2]]);
}
}
return (new MMCQ).quantize(pixels, nc);
};
But, wait, we have no <canvas/> element in TVML!
Of course, there are native solutions like Objective-C ColorCube, DominantColor - this is using K-means
and the very nice and reusable ColorArt by #AaronBrethorst from CocoaControls.
Despite the fact that this could be used in a TVML application through a native to JavaScriptCore bridge - see How to bridge TVML/JavaScriptCore to UIKit/Objective-C (Swift)?
my aim is to make this work completely in TVJS and TVML.
The simplest MMCQ JavaScript implementation does not need a Canvas: see Basic Javascript port of the MMCQ (modified median cut quantization) by Nick Rabinowitz, but needs the RGB array of the image:
var cmap = MMCQ.quantize(pixelArray, colorCount);
that is taken from the HTML <canvas/> and that is the reason for it!
function createPalette(sourceImage, colorCount) {
// Create custom CanvasImage object
var image = new CanvasImage(sourceImage),
imageData = image.getImageData(),
pixels = imageData.data,
pixelCount = image.getPixelCount();
// Store the RGB values in an array format suitable for quantize function
var pixelArray = [];
for (var i = 0, offset, r, g, b, a; i < pixelCount; i++) {
offset = i * 4;
r = pixels[offset + 0];
g = pixels[offset + 1];
b = pixels[offset + 2];
a = pixels[offset + 3];
// If pixel is mostly opaque and not white
if (a >= 125) {
if (!(r > 250 && g > 250 && b > 250)) {
pixelArray.push([r, g, b]);
}
}
}
// Send array to quantize function which clusters values
// using median cut algorithm
var cmap = MMCQ.quantize(pixelArray, colorCount);
var palette = cmap.palette();
// Clean up
image.removeCanvas();
return palette;
}
[QUESTION]
How to generate the dominant colors of a RGB image without using the HTML5 <canvas/>, but in pure JavaScript from an image's ByteArray fetched with XMLHttpRequest?
[UPDATE]
I have posted this question to Color-Thief github repo, adapting the RGB array calculations to the latest codebase.
The solution I have tried was this
ColorThief.prototype.getPaletteNoCanvas = function(sourceImageURL, colorCount, quality, done) {
var xhr = new XMLHttpRequest();
xhr.open('GET', sourceImageURL, true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(e) {
if (this.status == 200) {
var uInt8Array = new Uint8Array(this.response);
var i = uInt8Array.length;
var biStr = new Array(i);
while (i--)
{ biStr[i] = String.fromCharCode(uInt8Array[i]);
}
if (typeof colorCount === 'undefined') {
colorCount = 10;
}
if (typeof quality === 'undefined' || quality < 1) {
quality = 10;
}
var pixels = uInt8Array;
var pixelCount = 152 * 152 * 4 // this should be width*height*4
// Store the RGB values in an array format suitable for quantize function
var pixelArray = [];
for (var i = 0, offset, r, g, b, a; i < pixelCount; i = i + quality) {
offset = i * 4;
r = pixels[offset + 0];
g = pixels[offset + 1];
b = pixels[offset + 2];
a = pixels[offset + 3];
// If pixel is mostly opaque and not white
if (a >= 125) {
if (!(r > 250 && g > 250 && b > 250)) {
pixelArray.push([r, g, b]);
}
}
}
// Send array to quantize function which clusters values
// using median cut algorithm
var cmap = MMCQ.quantize(pixelArray, colorCount);
var palette = cmap? cmap.palette() : null;
done.apply(this,[ palette ])
} // 200
};
xhr.send();
}
but it does not gives back the right RGB colors array.
[UPDATE]
Thanks to all the suggestions I got it working. Now a full example is available on Github,
The canvas element is being used as a convenient way to decode the image into an RGBA array. You can also use pure JavaScript libraries to do the image decoding.
jpgjs is a JPEG decoder and pngjs is a PNG decoder. It looks like the JPEG decoder will work with TVJS as is. The PNG decoder, however, looks like it's made to work in a Node or web browser environment, so you might have to tweak that one a bit.

Gradient fill empty in for loop

I'm drawing buttons on createjs canvas that have gradient fill and stroke. The number of buttons are drawn inside a for loop. Each section, as you will see in the fiddle, is drawn separately via function. but only the first function run draws the correct fill. The subsequent calls only draws the gradient stroke Jsfiddle
for (i = 0; i < db.length; i++) {
var btn = db[i];
var sdb = btn.split("_");
var blabel = sdb[0];
var battrib = sdb[1];
var bval = sdb[2];
var sid = sdb[3];
var tick = sdb[4];
var cptn = sdb[5];
var imageType = sdb[6];
var buttonSize = 90 + 10;
var bttn = new c.Shape();
bttn.graphics.beginLinearGradientFill([grad1, grad2], [.2, 1], 0, 0,0,50 ).setStrokeStyle(3).beginLinearGradientStroke([grad2, grad1], [.2, 1], 0, 0,0,50 ).drawRoundRect(x, y, 85, 35,5);
var label = new c.Text(blabel);
label.font = font;
label.color = '#000';
label.x = x+8;
label.y = y+6;
m1.addChild(bttn, label);
x+= buttonSize;
}s.update();
It seems to be working to me. Is it perhaps that you forgot to offset your buttons, so you're only seeing the first one? bttn.y = i*40
https://jsfiddle.net/gskinner/wqu4nzdq/12/

Javascript canvas: how to efficiently compute distance of two canvases

I want to compute the distance between two figures drawn in two canvases, actually i'm doing the following, iterating through the data of the canvases (canvases have the same size):
var computeDifference = function() {
var imgd1 = bufferCtx.getImageData(0, 0, w, h).data;
var imgd2 = targetCtx.getImageData(0, 0, w, h).data;
var diff = 0;
for(var i=0; i<imgd1.length; i+=4) {
var d = (imgd1[i]-imgd2[i]);
var tot = d > 0 ? d : -d;
diff += tot
}
return diff;
}
this is not very efficient.
Is there a better method? I read about composite operations, but I'm not sure if that could help in this case.
I've purposely considered only the R channel because for now I'm operating with black and white images, but I'm probably going to consider the other channels later.
You can use the new difference blending method on a single canvas, draw both images in with mode set before the last draw, then extract the bitmap data to get the total sum.
You would use the same property, globalCompositeOperation, to set blending mode with.
This way you are letting the browser do the initial work calculating the difference on each component leaving you only to sum them up. You are also saving one canvas, one call to getImageData() which is relative expensive on an hardware accelerated system:
ctx.drawImage(image1, x, y);
ctx.globalCompositeOperation = "difference"; // use composite to set blending...
ctx.drawImage(image2, x, y);
// extract data, and sum -
Note: IE11 does not support the new blending modes. For IE you would need to do the difference calculations manually as initially.
You can feature detect this by providing the fast method when supported, manual when not:
ctx.globalCompositeOperation = "difference";
if (ctx.globalCompositeOperation === "difference") {
// fast
}
else {
// manual
}
Live performance test
Test1 will do manual difference calclation, test2 will use browser difference blending mode. On my setup FireFox wins with more than a 4x factor (slightly less difference in Chrome).
var canvas1 = document.createElement("canvas"),
canvas2 = document.createElement("canvas"),
ctx1 = canvas1.getContext("2d"),
ctx2 = canvas2.getContext("2d"),
img1 = new Image, img2 = new Image,
count = 2,
startTime1, startTime2, endTime1, endTime2, sum1, sum2;
performance = performance || Date; // "polyfill" the performance object
img1.crossOrigin = img2.crossOrigin = ""; // we need to extract pixels
img1.onload = img2.onload = loader;
img1.src = "http://i.imgur.com/TJiD5GM.jpg";
img2.src = "http://i.imgur.com/s9ksOb1.jpg";
function loader() {if(!--count) test1()} // handle async load
function test1(){
startTime1 = performance.now();
ctx1.drawImage(img1, 0, 0);
ctx2.drawImage(img2, 0, 0);
var data1 = ctx1.getImageData(0, 0, 500, 500).data,
data2 = ctx2.getImageData(0, 0, 500, 500).data,
i = 0, len = data1.length, sum = 0;
// we do all channels except alpha channel (not used in difference calcs.)
while(i < len) {
sum += Math.abs(data2[i] - data1[i++]) +
Math.abs(data2[i] - data1[i++]) +
Math.abs(data2[i] - data1[i++]);
i++
}
sum1 = sum;
endTime1 = performance.now();
test2();
}
function test2(){
startTime2 = performance.now();
ctx1.drawImage(img1, 0, 0);
ctx1.globalCompositeOperation = "difference";
if (ctx1.globalCompositeOperation !== "difference")
alert("Sorry, use Firefox or Chrome");
ctx1.drawImage(img2, 0, 0);
var data = ctx1.getImageData(0, 0, 500, 500).data,
i = 0, len = data.length, sum = 0;
// we do all channels except alpha channel
while(i < len) {
sum += data[i++];
sum += data[i++];
sum += data[i++];
i++;
}
sum2 = sum;
endTime2 = performance.now();
result();
}
function result() {
var time1 = endTime1 - startTime1,
time2 = endTime2 - startTime2,
factor = time1 / time2,
res = "Manual method: " + time1.toFixed(3) + "ms<br>";
res += "Blending mode: " + time2.toFixed(3) + "ms<br>";
res += "Factor: " + factor.toFixed(2) + "x<br>";
res += "Sum 1 = " + sum1;
res += "<br>Sum 2 = " + sum2;
document.querySelector("output").innerHTML = res;
}
<output>Loading images and calculating...</output>

Categories