Okay,
So I have been all over the net trying to find ways to correctly render using normals, and directional light (origonally found in one of learningwebgl.com tutorials). In the learningwebgl tutorials the normals are all setup in an array. In my program, I need to be able to load in wavefont OBJ files and then generate normals. I am wondering if it likely to be my normal generation code, or possibly a problem with my shaders. The code is a little confusing (as all the vertex/normal/indices data are in a single array each) but here is my normal generation code:
for(var i=0;i<d["vertices"].length;i++)d["normals"][i] = 0;
for(var i=0;i<d["indices"].length/3;i++){
var a = [d["vertices"][d["indices"][(i*3)]], d["vertices"][d["indices"][(i*3)]+1], d["vertices"][d["indices"][(i*3)]+2]];
var b = [d["vertices"][d["indices"][(i*3)+1]], d["vertices"][d["indices"][(i*3)+1]+1], d["vertices"][d["indices"][(i*3)+1]+2]];
var c = [d["vertices"][d["indices"][(i*3)+2]], d["vertices"][d["indices"][(i*3)+2]+1], d["vertices"][d["indices"][(i*3)+2]+2]];
var e = vec3.cross(vec3.subtract(b, a), vec3.subtract(c, a));
d["normals"][d["indices"][(i*3)]] += -e[0];
d["normals"][d["indices"][(i*3)]+1] += -e[1];
d["normals"][d["indices"][(i*3)]+2] += -e[2];
d["normals"][d["indices"][(i*3)+1]] += -e[0];
d["normals"][d["indices"][(i*3)+1]+1] += -e[1];
d["normals"][d["indices"][(i*3)+1]+2] += -e[2];
d["normals"][d["indices"][(i*3)+2]] += -e[0];
d["normals"][d["indices"][(i*3)+2]+1] += -e[1];
d["normals"][d["indices"][(i*3)+2]+2] += -e[2];
}
for(var i=0;i<d["normals"].length/3;i++){
var old = vec3.normalize([d["normals"][(i*3)],d["normals"][(i*3)+1],d["normals"][(i*3)+2]]);
d["normals"][(i*3)] = old[0];
d["normals"][(i*3)+1] = old[1];
d["normals"][(i*3)+2] = old[2];
}
Important part of the (vertex)shader:
// where uNMatrix = inverse of model view matrix
vec3 transformedNormal = uNMatrix * aVertexNormal;
// vec3 - light pos
float directionalLightWeighting = max(dot(transformedNormal, uLightingDirection), 1.0);
// vec3 = light color
vLightWeighting = uAmbientColor + uDirectionalColor * directionalLightWeighting;
I have tried many normal algorithm's to no avail. I have also found if I don't normalize the normals at the very end, the color/shades do in fact change, it is just obviously incorrect shading.
For an example of what it is currently looking like (with the bottom loop commented) follow this link, select teddy from the dropdown, then click load, then click "(re)generate normals", you can then rotate around the teddy by dragging the mouse:
http://webdesignscript.net/assignment/graphics_a3/
For a look at the shaders they are here:
http://webdesignscript.net/assignment/graphics_a3/scripts/shaders.js
I have been stuck on this for many hours, and am starting to wonder if it might be something shader related, however I am still new to graphicaly programming and would greatly appreciate any help :)
*the matrix library used is glMatrix
Cheers, Josh
I can't load your demo (allocation size overflow on model_engine.js line 107), but I threw your shader code into my engine (shameless plug: check out Jax, it's really cool!) and it worked fine.
Then I took a close look at your JS code, and... well, I believe it's pretty much entirely wrong. It looks like the first thing you do is take the normal of each face -- a good start, but I don't understand why you are negating the value of e. You should also normalize e at this point, because right now it's just an arbitrary-length vector. Don't know if that really matters, though.
The next thing you're doing is taking the normal of the sum of all es for a given vertex. Not quite right: you need to normalize the average of all es, rather than the sum.
In the end, here's what I came up with. It works great in my own engine, and it seems to run considerably faster than the original version to boot. (Disclaimer: there may still be some optimizations to be made. I wrote it for clarity, not speed.)
var i, j, normals = {};
// calculate face normals. Note that each vertex will have a number of faces
// adjacent to it, so we accumulate their normals into an array. We'll take
// the average of them all when done.
var tmp1 = vec3.create(), tmp2 = vec3.create();
var a, b, c;
function pushNormal(index, normal) {
normals[index] = normals[index] || [];
normals[index].push(normal);
}
for (i = 0; i < d["indices"].length; i += 3) {
// get points a, b, c
var aIndex = d["indices"][i], bIndex = d["indices"][i+1], cIndex = d["indices"][i+2];
var aOffsetX = aIndex * 3, aOffsetY = aIndex * 3 + 1, aOffsetZ = aIndex * 3 + 2;
var bOffsetX = bIndex * 3, bOffsetY = bIndex * 3 + 1, bOffsetZ = bIndex * 3 + 2;
var cOffsetX = cIndex * 3, cOffsetY = cIndex * 3 + 1, cOffsetZ = cIndex * 3 + 2;
a = [d["vertices"][aOffsetX], d["vertices"][aOffsetY], d["vertices"][aOffsetZ]];
b = [d["vertices"][bOffsetX], d["vertices"][bOffsetY], d["vertices"][bOffsetZ]];
c = [d["vertices"][cOffsetX], d["vertices"][cOffsetY], d["vertices"][cOffsetZ]];
// calculate face normal
vec3.subtract(b, a, tmp1);
vec3.subtract(c, a, tmp2);
var e = vec3.normalize(vec3.cross(tmp1, tmp2, vec3.create()));
// accumulate face normal for each of a, b, c
pushNormal(a, vec3.create(e));
pushNormal(b, vec3.create(e));
pushNormal(c, vec3.create(e));
}
// now calculate normalized averages for each face normal, and store the result
for (i = 0; i < d["vertices"].length; i += 3) {
a = [d["vertices"][i], d["vertices"][i+1], d["vertices"][i+2]];
if (normals[a]) {
var avg = vec3.create();
for (j = 0; j < normals[a].length; j++) {
vec3.add(normals[a][j], avg, avg);
}
vec3.scale(avg, 1/normals[a].length);
vec3.normalize(avg);
d["normals"][i] = avg[0];
d["normals"][i+1] = avg[1];
d["normals"][i+2] = avg[2];
}
}
// sanity check
if (d["normals"].length != d["vertices"].length)
alert("ERROR "+d["normals"].length+" != "+d["vertices"].length);
Hope this helps!
Related
I am attempting to use GPU JS to accelerate the performance of a dynamic programming algorithm.
Here is my current code:
let pixels = new Uint32Array(5 * 5);
for (let i = 0; i < pixels.length; i++) {
pixels[i] = i;
}
function kFunction() {
let width = this.output.x;
let row = this.constants.row;
let col = this.thread.x;
let prevRow = (row - 1) * width;
let base = (row * width) + col;
let prevBase = (prevRow * width) + col;
let nw = this.constants.pixels[prevBase - 1];
let n = this.constants.pixels[prevBase];
let ne = this.constants.pixels[prevBase + 1];
return this.constants.pixels[base] + Math.min(Math.min(nw, n), ne);
}
var gpuKernel = gpu.createKernel(kFunction)
.setConstants({ pixels: pixels, row: 1 })
.setOutput([5, 5]);
console.log(gpuKernel());
This works, except I would like to have it run on each row, instead of just row 1.
The issue is that in order to run on the next row, the previous row has to be computed first (for rows n > 1 the nw, n, and ne values should be computed based on the previous row's value instead of pixels)
I could easily fix this by putting createKernel in a loop and running it on every row, but I believe that constantly returning the value from the GPU and sending it back is slow. I heard that Textures might be able to solve this, to maintain some sort of state, but I cannot find any relevant information on them.
Is what I'm asking to do possible? To have a single GPU function call to compute the entire cumulative sum table without passing data back and forth for each row computed?
What I'm basically trying to do is to map an array of data points into a WebGL vertex buffer (Float32Array) in realtime (working on animated parametric surfaces). I've assumed that representing data points with Float32Arrays (either one Float32Array per component: [xx...x, yy...y] or interleave them: xyxy...xy) should be faster than storing them in an array of points: [[x, y], [x, y],.. [x, y]] since that'd actually be a nested hash and all. However, to my surprise, that leads to a slowdown of about 15% in all the major browsers (not counting array creation time). Here's a little test I've set up:
var points = 250000, iters = 100;
function map_2a(x, y) {return Math.sin(x) + y;}
var output = new Float32Array(3 * points);
// generate data
var data = [];
for (var i = 0; i < points; i++)
data[i] = [Math.random(), Math.random()];
// run
console.time('native');
(function() {
for (var iter = 0; iter < iters; iter++)
for (var i = 0, to = 0; i < points; i++, to += 3) {
output[to] = data[i][0];
output[to + 1] = data[i][1];
output[to + 2] = map_2a(data[i][0], data[i][1]);
}
}());
console.timeEnd('native');
// generate data
var data = [new Float32Array(points), new Float32Array(points)];
for (var i = 0; i < points; i++) {
data[0][i] = Math.random();
data[1][i] = Math.random();
}
// run
console.time('typed');
(function() {
for (var iter = 0; iter < iters; iter++)
for (var i = 0, to = 0; i < points; i++, to += 3) {
output[to] = data[0][i];
output[to + 1] = data[1][i];
output[to + 2] = map_2a(data[0][i], data[1][i]);
}
}());
console.timeEnd('typed');
Is there anything I'm doing wrong?
I think your problem is that you are not comparing the same code. In the first example, you have one large array filled with very small arrays. In the second example, you have two very large arrays, and both of them need to be indexed. The profile is different.
If I structure the first example to be more like the second (two large generic arrays), then the Float32Array implementation far outperforms the generic array implementation.
Here is a jsPerf profile to show it.
In V8 variables can have SMI (int31/int32), double and pointer type. So I guess when you operate with floats it should be converted to double type. If you use usual arrays it is converted to doubles already.
I am trying to implement an A* algorithm for my pathfinding robot in JavaScript. The only problem is that I do not understand what does it mean to find all adjacent squares. I am using the Manhattan Distance formula as I cannot let my bot go diagonally. Here is my code (for now):
var open = new Array();
var closed = new Array();
start = [9,18]; //do not take this literally
goal = [1,0]; //again don't
open.push(start);
while (open.length != 0) {
for(var x = 0; x < open.length; x++) {
heuristicValue[x] = computeHeuristicV(maplayout, start[0], start[1], open[x][0], open[x][1], goal[0], goal[1]);
}
minimum = Math.min(100000,heuristicValue[0]);
for(var x = 1; x < open.length; x++) {
minimum = Math.min(minimum, heuristicValue[x]);
}
for(var x = 0; x < open.length; x++) {
if (minimum == heuristicValue[x]) {
current = [open[x][0], open[x][1]];
}
}
closed.push(current);
//INCOMPLETE
}
The computeHeuristicV function computes the heuristic value in the code above.
"All adjacent squares" means every possible next hop on the path.
A* is a great algorithm to master and use. The two key elements are finding neighbors and the heuristic. A heuristic is used to estimate the distance between your current location, and the end. Also, the statement "find all adjacent squares" is referencing a neighbors function. For example, you might have the following:
var heuristic = function(state) {
var endLocation = MyGame.getEndLocation();
return Math.abs(state.x - endLocation.x) + Math.abs(state.y - endLocation.y)
}
var neighbors = function(state){
var neighborStates = [];
MyGame.setPlayer({
x: state.x,
y: state.y
});
neighborStates.push(MyGame.moveUp.getState());
neighborStates.push(MyGame.moveRight.getState());
neighborStates.push(MyGame.moveDown.getState());
neighborStates.push(MyGame.moveLeft.getState());
return neighborStates;
}
So, getting the "adjacent squares" is just asking you for the neighboring states or options. Personal plug: I just authored a simple a-star algorithm here: https://github.com/tssweeney/async-astar. Reading the description might help you to better understand the problem.
I want to design a function that can generate a 'map' of sorts.
For example:
Location A is created, it is located at some position X
Location B is created, it is located at some position Y, we know the distance between X, Y
Location C is created, we know the distance from C to B, how do we calculate C to A?
Using a triangle method, I suppose I could also assign a random angle and calculate the third side, but what would I do if I added a Location D, E, F randomly? Would I be calculating multiple triangles that get exponentially worse with every addition?
Say you want to generate a list of locations L[1..n], you just randomly pick next location and scan over the L to guarantee the distance is over a threshold, otherwise, pick again.
Then, push this into your list L. So the total run time of generating a n elements list is O(n^2). When n < 1000, this is fast enough. The following method is guaranteed to terminate, which is designed for a relatively small read-to-pick list, say up to 1,000,000.
function generateList(orgList, numberToOutput) {
if (orgList.length < numberToOutput)
return false;
var orgListClone = orgList.slice(0);
var L = [];
while (L.length < numberToOutput && orgListClone.length > 0) {
var n = parseInt(Math.random() * orgListClone.length);
// Assume we pick n-th element in the list.
var ok = true;
for (var j = 0; j < L.length; j++)
if (distance(orgListClone[n], L[j]) < kThreshold) {
// n is not an option, swap orgListClone[n] with the last element and pop it out.
orgListClone[n] = orgListClone[orgListClone.length - 1];
orgListClone.pop();
ok = false;
break;
}
if (ok) {
// All tests passed
L.push(orgListClone[n]);
orgListClone[n] = orgListClone[orgListClone.length - 1];
orgListClone.pop();
}
}
if (L.length == numberToOutput)
return L;
// Failed to find the list
return null;
}
Another solution is to calcuate distances between each of the locations ahead, and make a list of too close locations for each location.
So that after each pick, just merge the too close locations to the current set, which takes O(n). And then pick another location which is not included in this set. This method only works when the read-to-pick list is large enough, so that the probability (1 - |too close list| / |read-to-pick list|) of choosing a location not included in the set is large. This will take up to O(nm) in total, where m is the average |too close list|.
function generateList(orgList, numberToOutput) {
if (orgList.length < numberToOutput)
return false;
var tooCloseSet = {};
var L = [];
var lastLengthOfL = 0;
var repickCount = 0;
for (L.length < numberToOutput) {
if (l.length == lastLengthOfL) {
if (++repickCount > 10)
return false;
} else {
lastLengthOfL = l.length;
repickCount = 0;
}
var n = parseInt(Math.random() * orgList.length);
if (n in tooCloseSet)
continue;
L.push(orgList[n]);
mergeSet(tooCloseSet, orgList[n].tooCloseList);
}
return L;
}
You could try something like this, I haven't tested it, so it's just conceptual at this point.
You could just generate an array of randomly placed points, and each point could hold it's own array of distances, calculated using basic trigonometry.
function Point(x, y) {
return {
x: x,
y:y,
addRelative: function(pt) {
this.realtivePoints[pt] = abs(sqrt(pow((this.x-pt.x),2) + pow((this.y-pt.y),2)));
},
relativePoints: {}
};
var randPoints = []; // Lets assume this has a collection of random Point objects
for(var i=0; i<randPoints.length; i++) {
for(var j=0; j<randPoints.length; j++) {
randPoint[i].addRelative(randPoints[j]);
}
}
randPoints[0].relativePoints[randPoints[1]]; // Dist from first to second point.
Yes, it gets geometrically more complicated with each point you add.
The problem is that even if you know the lengths of all three sides of a triangle, you still don't know the orientation. To illustrate your example:
You're defining ABC by specifying distances dAB and dBC (which gives you dAC). But you actually have two possible triangles, ABC and ABC'. Which means if you add a fourth point, D, by specifying it's distance to one of the points on ABC (e.g. dCD), you've added a 2nd triangle, which can also have one of two orientations, making for a total of four possible solutions. As you can see, orientation doesn't matter for determining distance between two points on the same triangle, but for determining distances between points on different triangles, it does.
Desperately need a Javascript equivalent to polyval and polyfit functions that exist in Matlab. Essentially those functions in matlab do a curve fit based on two equally sized arrays depending on a specified polynomial. I need to do some calculations that involve curve fitting in javascript and can't for the life of me find an equivalent function.
This is MatLab's explanation of the function polyfit
"P = POLYFIT(X,Y,N) finds the coefficients of a polynomial P(X) of
degree N that fits the data Y best in a least-squares sense. P is
a
row vector of length N+1 containing the polynomial coefficients in
descending powers, P(1)*X^N + P(2)*X^(N-1) +...+ P(N)*X + P(N+1)."
This is MatLab's explanation of polyval.
"POLYVAL Evaluate polynomial.
Y = POLYVAL(P,X) returns the value of a polynomial P evaluated at
X. P
is a vector of length N+1 whose elements are the coefficients of
the
polynomial in descending powers.
Y = P(1)*X^N + P(2)*X^(N-1) + ... + P(N)*X + P(N+1)"
Any help would be super.
Regards,
numericjs may help you get started.
POLYFIT performs a least-square polynomial fitting which comes down to solving a system of linear equations. I did a quick search, but I couldn't find a basic linear algebra Javascript library that solves such systems... The easiest method would be to implement the Gaussian elimination algorithm yourself.
POLYVAL is simply evaluating the polynomial at the points X by substituting the coefficients in the equation.
perhaps this code might help someone
function _prepare(_mat) {
_mat=[[]].concat(_mat)
for(i=0;i<_mat.length;++i)
_mat[i]=[0].concat(_mat[i])
return _mat
}
function linear(_mat){
_mat=_prepare(_mat)
return _solve(_mat)
}
function _solve(_mat){
var c=new Array(),d=new Array()
var n=_mat.length-1
for(i=0;i<=n+1;i++) {
d[i]=new Array();
c[i]=0
for(j=0;j<=n+1;++j)
d[i][j]=0
}
// mission impossible
// calculate all the determinants of the system
for(m=2; m<=n ; ++m) {
for(i=m;i<=n;++i)
for(j = m-1;j<=n+1;++j)
d[i][j] = [_mat[i][j] * _mat[m-1][m-1] , _mat[i][m-1]]
for(i=m;i<=n;++i)
for(j=m-1;j<=n+1;++j) {
_mat[i][j] = d[i][j][0]-d[i][j][1]*_mat[m-1][j]
if(Math.abs(_mat[i][j])<1e-25) _mat[i][j]=0 // i have to add this line
}
}
// now the coefficients of equation (not exactly)
for(i=n;i>=1;--i) {
c[i-1] = _mat[i][n+1]
if (i!=n)
for(j=n; j>=i+1;--j)
c[i-1] = c[i-1] -_mat[i][j] * c[j-1]
if(_mat[i][i]!=0)
c[i-1]=c[i-1] / _mat[i][i]
else
c[i-1]=0
if(Math.abs(c[i-1])<1e-25)
c[i-1]=0
}
c.length=n
return c
}
function fitpoly(e,b){
var a=new Array()
var n = 1+b,e=[[0,0]].concat(e),ns=e.length-1
for(i=0;i<=n+1;i++) {
a[i]=new Array();
for(j=0;j<=n+1;++j)
a[i][j]=0
}
for(m=1;m <= n;m++)
for(i=1;i<= m;i++) {
j = m - i + 1;
for(ii=1;ii <= ns;ii++)
a[i][j] = a[i][j] + Math.pow(e[ii][0], m-1)
}
for(i=1;i<= n;++i)
for(ii=1;ii<=ns;++ii)
a[i][n+1] = a[i][n+1] +e[ii][1]*Math.pow(e[ii][0],i-1)
for(m = n+2 ; m <= 2*n ; ++m)
for(i = m-n; i<= n;++i) {
j= m -i
for(ii=1; ii<=ns;++ii)
a[i][j] = a[i][j] + Math.pow(e[ii][0],m-2) // coefficients of system
}
a.length=a.length-1
return _solve(a)
}
//and then
poly_degree = 6
points= [[2,2],[2,4],[4,6],[6,4],[8,2]]
// coefficients of polynome
console.log(fitpoly(points, poly_degree))
// or solve a linear system. Here with six variables
solution = linear([[1,2,3,-2,-3,-26,52],[3,2,5,-2,4,30,-60],[6,1,-4,-1,5,94,-188],[-1,2,4,3,4,30,-60],[-1,4,2,-1,2,26,-52],[3,-3,11,-7,-2,-1,-95]])
console.log(solution)
Give this gist a try, it uses numeric.js:
function polyfit(xArray, yArray, order) {
if (xArray.length <= order) console.warn("Warning: Polyfit may be poorly conditioned.")
let xMatrix = []
let yMatrix = numeric.transpose([yArray])
for (let i = 0; i < xArray.length; i++) {
let temp = []
for (let j = 0; j <= order; j++) {
temp.push(Math.pow(xArray[i], j))
}
xMatrix.push(temp)
}
let xMatrixT = numeric.transpose(xMatrix)
let dot1 = numeric.dot(xMatrixT, xMatrix)
let dot2 = numeric.dot(xMatrixT, yMatrix)
let dotInv = numeric.inv(dot1)
let coefficients = numeric.dot(dotInv, dot2)
return coefficients
}