Javascript equivalent functions to Matlab Functions: Polyfit/Polyval? - javascript

Desperately need a Javascript equivalent to polyval and polyfit functions that exist in Matlab. Essentially those functions in matlab do a curve fit based on two equally sized arrays depending on a specified polynomial. I need to do some calculations that involve curve fitting in javascript and can't for the life of me find an equivalent function.
This is MatLab's explanation of the function polyfit
"P = POLYFIT(X,Y,N) finds the coefficients of a polynomial P(X) of
degree N that fits the data Y best in a least-squares sense. P is
a
row vector of length N+1 containing the polynomial coefficients in
descending powers, P(1)*X^N + P(2)*X^(N-1) +...+ P(N)*X + P(N+1)."
This is MatLab's explanation of polyval.
"POLYVAL Evaluate polynomial.
Y = POLYVAL(P,X) returns the value of a polynomial P evaluated at
X. P
is a vector of length N+1 whose elements are the coefficients of
the
polynomial in descending powers.
Y = P(1)*X^N + P(2)*X^(N-1) + ... + P(N)*X + P(N+1)"
Any help would be super.
Regards,

numericjs may help you get started.

POLYFIT performs a least-square polynomial fitting which comes down to solving a system of linear equations. I did a quick search, but I couldn't find a basic linear algebra Javascript library that solves such systems... The easiest method would be to implement the Gaussian elimination algorithm yourself.
POLYVAL is simply evaluating the polynomial at the points X by substituting the coefficients in the equation.

perhaps this code might help someone
function _prepare(_mat) {
_mat=[[]].concat(_mat)
for(i=0;i<_mat.length;++i)
_mat[i]=[0].concat(_mat[i])
return _mat
}
function linear(_mat){
_mat=_prepare(_mat)
return _solve(_mat)
}
function _solve(_mat){
var c=new Array(),d=new Array()
var n=_mat.length-1
for(i=0;i<=n+1;i++) {
d[i]=new Array();
c[i]=0
for(j=0;j<=n+1;++j)
d[i][j]=0
}
// mission impossible
// calculate all the determinants of the system
for(m=2; m<=n ; ++m) {
for(i=m;i<=n;++i)
for(j = m-1;j<=n+1;++j)
d[i][j] = [_mat[i][j] * _mat[m-1][m-1] , _mat[i][m-1]]
for(i=m;i<=n;++i)
for(j=m-1;j<=n+1;++j) {
_mat[i][j] = d[i][j][0]-d[i][j][1]*_mat[m-1][j]
if(Math.abs(_mat[i][j])<1e-25) _mat[i][j]=0 // i have to add this line
}
}
// now the coefficients of equation (not exactly)
for(i=n;i>=1;--i) {
c[i-1] = _mat[i][n+1]
if (i!=n)
for(j=n; j>=i+1;--j)
c[i-1] = c[i-1] -_mat[i][j] * c[j-1]
if(_mat[i][i]!=0)
c[i-1]=c[i-1] / _mat[i][i]
else
c[i-1]=0
if(Math.abs(c[i-1])<1e-25)
c[i-1]=0
}
c.length=n
return c
}
function fitpoly(e,b){
var a=new Array()
var n = 1+b,e=[[0,0]].concat(e),ns=e.length-1
for(i=0;i<=n+1;i++) {
a[i]=new Array();
for(j=0;j<=n+1;++j)
a[i][j]=0
}
for(m=1;m <= n;m++)
for(i=1;i<= m;i++) {
j = m - i + 1;
for(ii=1;ii <= ns;ii++)
a[i][j] = a[i][j] + Math.pow(e[ii][0], m-1)
}
for(i=1;i<= n;++i)
for(ii=1;ii<=ns;++ii)
a[i][n+1] = a[i][n+1] +e[ii][1]*Math.pow(e[ii][0],i-1)
for(m = n+2 ; m <= 2*n ; ++m)
for(i = m-n; i<= n;++i) {
j= m -i
for(ii=1; ii<=ns;++ii)
a[i][j] = a[i][j] + Math.pow(e[ii][0],m-2) // coefficients of system
}
a.length=a.length-1
return _solve(a)
}
//and then
poly_degree = 6
points= [[2,2],[2,4],[4,6],[6,4],[8,2]]
// coefficients of polynome
console.log(fitpoly(points, poly_degree))
// or solve a linear system. Here with six variables
solution = linear([[1,2,3,-2,-3,-26,52],[3,2,5,-2,4,30,-60],[6,1,-4,-1,5,94,-188],[-1,2,4,3,4,30,-60],[-1,4,2,-1,2,26,-52],[3,-3,11,-7,-2,-1,-95]])
console.log(solution)

Give this gist a try, it uses numeric.js:
function polyfit(xArray, yArray, order) {
if (xArray.length <= order) console.warn("Warning: Polyfit may be poorly conditioned.")
let xMatrix = []
let yMatrix = numeric.transpose([yArray])
for (let i = 0; i < xArray.length; i++) {
let temp = []
for (let j = 0; j <= order; j++) {
temp.push(Math.pow(xArray[i], j))
}
xMatrix.push(temp)
}
let xMatrixT = numeric.transpose(xMatrix)
let dot1 = numeric.dot(xMatrixT, xMatrix)
let dot2 = numeric.dot(xMatrixT, yMatrix)
let dotInv = numeric.inv(dot1)
let coefficients = numeric.dot(dotInv, dot2)
return coefficients
}

Related

Create N dimensional grid of points in JavaScript

I know there are similar questions out there (the closest one I found was this JavaScript; n-dimensional array creation) but most one them are in Python and even this one I found I tried to implement in my code and It didn't work.
So I want to create a function createGrid(L,n) that take as parameters two arrays of same size, L and n. In these, L[i] would specify the size of the grid in dimension i and n[i] would specify the number of points in the same dimension (such as the spacing between points is L[i]/(n[i] - 1). For example, for two dimensions lets say I call "let grid = createGrid([10,10],[2,2])" then the function should return an n+1 dimension array like this:
[[[0,0],[0,10]], [[10,0], [10,10]].
So If I want to access a point in the grid I could simply type, for example, grid[1][0], which will return the point [10,0].
In this moment I am hardcoding this for 3 dimensions like this:
let create3DSquareGrid = function(L, n){
//L should be an array [Lx, Ly, Lz], if not they are all the same
if(!Array.isArray(L)){
L = [L,L,L];
}
//n should be an array [nx, ny, nz], if not they are all the same
if(!Array.isArray(n)){
n = [n,n,n];
}
//calculate the dl of each dimension
var dl = L.map((val,i)=> Math.round(val/(n[i]-1)));
//create the grid
let grid = []
for(let i=0; i<n[0]; i++){
let x = i*dl[0];
let gridJ = [];
for(let j=0; j<n[1]; j++){
let y = j*dl[1];
let gridK = [];
for(let k=0; k<n[2]; k++){
let z = k*dl[2];
gridK.push([x,y,z]);
}
gridJ.push(gridK)
}
grid.push(gridJ);
}
return grid;
}
But I want to extend this by any number of dimensions. I tried to recurse as shown at the question I linked in the beginning but it simply did not work, so I tweaked it a little bit and things got worse, I from that point I just started getting more and more confused.
If you can, please help! And thanks a lot!
You can use a loop. It is a great way to solve this problem.
function createGrid(L, n) {
var ans = L
for (i = 1; i < L.length; i++) {
var tmp = []
for (el of ans) {
innerTmp = []
for (j = 0; j < L.length; j++) {
innerTmp.push([el, L[j]])
}
tmp.push(innerTmp)
}
ans = tmp
}
return ans
}

Math.js is slow to multiply 2 big matrices

I'm trying to speed-up matrices multiplications in pure javascript. Multiplications appear to be very slow above a few hundreds of lines, over the minute on a thousand of lines: you'll see the execution time bellow.
How would you solve this? We are working on a split + parallelization solution in Node.js so I'm looking for the best options to optimize it in pure javascript. My solution has to adapt the parallelized flows itself to the number of CPU threads available (that is unknown at design time).
Some data :
const math = require("mathjs");
// a1 is a 1000x1000 float matrix
// b1 is a 1000x400
math.multiply(a1, b1)
// runs in 19.6 seconds on a CPU 4.2Ghz
// a2 is 1600x1200
// b2 is 1200x800
math.multiply(a2, b2)
// runs in 78 seconds
Array lookup optimization
Arrays are associative lookup tables in JavaScipt - they are inefficient by nature. An optimization of this kind of array access
var array = some_2D_Array;
var nRows = array.length;
var nCols = array[0].length;
for( var r = 0; r < nRows; ++r) {
for( var c = 0; c < nCols; ++c) {
// do something with array[r][c];
}
}
is to replace it with
var array = some_2D_Array;
var nRows = array.length;
var nCols = array[0].length;
for( var r = 0; r < nRows; ++r) {
var aRow = array[r]; // lookup the row once
for( var c = 0; c < nCols; ++c) {
// do something with aRow[c];
}
}
which avoids searching the array object for the row array within each iteration of the inner loop. Performance gain will depend on the JS engine and the number of inner iterations.
Typed Array Usage
Another alternative could be to use a one dimensional typed array to avoid associative array index lookup instead of computing it. Here's some test code I ran in Node so see what difference it might make:
function Mat (rows, cols) {
var length = rows*cols,
buffer = new Float64Array( length)
;
function getRow( r) {
var start = r*cols,
inc = 1;
return { length: cols, start, inc, buffer};
}
function getCol( c) {
var start = c,
inc = cols;
return { length: rows, start, inc, buffer};
}
function setRC(r,c, to) {
buffer[ r*cols + c] = to;
}
this.rows = rows;
this.cols = cols;
this.buffer = buffer;
this.getRow = getRow;
this.getCol = getCol;
this.setRC = setRC;
}
Mat.dotProduct = function( vecA, vecB) {
var acc=0,
length = vecA.length,
a = vecA.start, aInc = vecA.inc, aBuf = vecA.buffer,
b = vecB.start, bInc = vecB.inc, bBuf = vecB.buffer
;
if( length != vecB.length) {
throw "dot product vectors of different length";
}
while( length--) {
acc += aBuf[ a] * bBuf[ b];
a += aInc;
b += bInc;
}
return acc;
}
Mat.mul = function( A, B, C) {
if( A.cols != B.rows) {
throw "A cols != B.rows";
}
if( !C) {
C = new Mat( A.rows, B.cols);
}
for( var r = 0; r < C.rows; ++r) {
var Arow = A.getRow(r);
for (var c = 0; c < C.cols; ++c) {
C.setRC( r, c, this.dotProduct( Arow, B.getCol(c)));
}
}
return C;
}
function test() {
// A.cols == B.rows
let side = 128;
let A = new Mat( side, side)
let B= new Mat( side, side);
A.buffer.fill(1)
B.buffer.fill(1)
console.log( "starting test");
let t0 = Date.now();
Mat.mul( A,B);
let t1 = Date.now();
console.log( "time: " + ((t1-t0)/1000).toFixed(2) +" seconds");
}
test()
Results for multiplying two square matrices (1.1Ghz Celeron):
// 128 x 128 = 0.05 seconds
// 256 x 256 = 0.14 seconds
// 512 x 512 = 7 seconds
// 768 x 768 = 25 seconds
// 1024 x 1024 = 58 seconds
The differences in CPU speed suggest this approach could be significantly faster but ... the code is experimental, the system had no other load and timings were for array multiplication alone - they exclude time taken to decode and populate the array with data. Any serious gain would need to be proven in practice.
I eventually decided that when multiplying two square matrices together, doubling the side dimension used should make the operation take 8 times as long: four times as many result elements to calculate and twice as many elements in vectors used to calculate dot products. The comparative times for 512 x 512 and 1024 x 1024 multiplications fit in line with this expectation.

Knapsack variant in JavaScript

I have tried to implement this knapsack problem solution algorithm in JavaScript, but the solutions s_opt I get has a total weight greater than the L_max.
What am I doing wrong?
I suspect it could be something related to Closures in recursion.
/*
GENERAL:
Assume we have a knapsack and we want to bring as much stuff as possible.
Of each thing we have several variants to choose from. Each of these variants have
different value and takes different amount of space.
DEFINITIONS:
L_max = integer, size of the knapsack for the entire problem having N items
l = matrix, having the elements l[i-1][j-1] representing the space taken
by variant j of item i (-1 since indexing the matrices has index starting on zero, i.e. item i is stored at position i-1)
p = matrix, having the elements p[i-1][j-1] representing the value given by
by variant j of item i
n = total number of items (used in a sub-problem)
N = total number of items (used in the full problem, N >= n)
s_opt = vector having the optimal combination of variant selections s_i, i.e. s_opt = arg max p_sum
*/
function knapsack(L_max,l,p) {
// constructing (initializing) - they are private members
var self = this; // in order for private functions to be able read variables
this.N = l.length;
var DCached = []; // this is only used by a private function so it doesnt need to made public using this.*
this.s_opt = [];
this.p_mean = null;
this.L_max = L_max;
// define public optimization function for the entire problem
// when this is completed the user can read
// s_opt to get the solution and
// p_mean to know the quality of the solution
this.optimize = function() {
self.p_mean = D(self.N,self.L_max) / Math.max(1,self.N);
}
// define private sub-problem optimization function
var D = function(n,r) {
if (r<0)
return -Infinity;
if (n==0)
return 0;
if(DCached[n-1] != null) {
if(DCached[n-1][r-1] != null) {
return DCached[n-1][r-1];
}
}
var p_max = -Infinity;
var p_sum;
var J = l[n-1].length;
for(var j = 0; j < J; j++) {
p_sum = p[n-1][j] + D( n-1 , r - l[n-1][j] );
if(p_sum>p_max) {
p_max = p_sum;
self.s_opt[n-1] = j;
}
}
DCached[n-1] = [];
DCached[n-1][r-1] = p_max;
return p_max;
}
}
The client using this knapsack solver does the following:
var knapsackSolution = new knapsack(5,l,p);
knapsackSolution.optimize();
// now the client can access knapsackSolution.s_opt containing the solution.
I found a solution. When solving a sub-problem D(n,r) the code in the question returned the optimized value, but it didn't really manage the array s_opt in a proper way. In the modified solution, pasted below, I fixed this. Instead of only returning the optimized value of the knapsack also an array of chosen variants (e.g. the arg of the max) are returned. The cache is also modified to manage these two parts of the solution (both max value and arg max value).
The code below also contains an additional feature addition. The user can now also pass a value maxComputingComplexity controlling the computational size of the problem in some kind of heuristic manner.
/*
GENERAL:
Assume we have a knapsack and we want to bring as much stuff as possible.
Of each thing we have several variants to choose from. Each of these variants have
different value and takes different amount of space.
The quantity of each variant is one.
DEFINITIONS:
L_max = integer, size of the knapsack, e.g. max number of letters, for the entire problem having N items
l = matrix, having the elements l[i-1][j-1] representing the space taken
by variant j of item i (-1 since indexing the matrices has index starting on zero, i.e. item i is stored at position i-1)
p = matrix, having the elements p[i-1][j-1] representing the value given by
by variant j of item i
maxComputingComplexity = value limiting the product L_max*self.N*M_max in order to make the optimization
complete in limited amount of time. It has a serious implication, since it may cut the list of alternatives
so that only the first alternatives are used in the computation, meaning that the input should be well
ordered
n = total number of items (used in a sub-problem)
N = total number of items (used in the full problem, N >= n)
M_i = number of variants of item i
s_i = which variant is chosen to pack of item i
s = vector of elements s_i representing a possible solution
r = maximum total space in the knapsack, i.e. sum(l[i][s_i]) <= r
p_sum = sum of the values of the selected variants, i.e. sum(p[i][s_i]
s_opt = vector having the optimal combination of variant selections s_i, i.e. s_opt = arg max p_sum
In order to solve this, let us see p_sum as a function
D(n,r) = p_sum (just seeing it as a function of the sub-problem n combined with the maximum total space r)
RESULT:
*/
function knapsack(L_max,l,p,maxComputingComplexity) {
// constructing (initializing) - they are private members
var self = this; // in order for private functions to be able read variables
this.N = l.length;
var DCached = []; // this is only used by a private function so it doesnt need to made public using this.*
//this.s_opt = [];
//this.p_mean = null;
this.L_max = L_max;
this.maxComputingComplexity = maxComputingComplexity;
//console.log("knapsack: Creating knapsack. N=" + N + ". L_max=" + L_max + ".");
// object to store the solution (both big problem and sub-problems)
function result(p_max,s_opt) {
this.p_max = p_max; //max value
this.s_opt = s_opt; //arg max value
}
// define public optimization function for the entire problem
// when this is completed the user can read
// s_opt to get the solution and
// p_mean to know the quality of the solution
// computing complexity O(L_max*self.N*M_max),
// think O=L_max*N*M_max => M_max=O/L_max/N => 3=x/140/20 => x=3*140*20 => x=8400
this.optimize = function() {
var M_max = Math.max(maxComputingComplexity / (L_max*self.N),2); //totally useless if not at least two
console.log("optimize: Setting M_max =" + M_max);
return D(self.N,self.L_max,M_max);
//self.p_mean = mainResult.D / Math.max(1,self.N);
// console.log...
}
// Define private sub-problem optimization function.
// The function reads to "global" variables, p and l
// and as arguments it takes
// n delimiting the which sub-set of items to be able to include (from p and l)
// r setting the max space that this sub-set of items may take
// Based on these arguments the function optimizes D
// and returns
// D the max value that can be obtained by combining the things
// s_opt the selection (array of length n) of things optimizing D
var D = function(n,r,M_max) {
// Start by checking whether the value is already cached...
if(DCached[n-1] != null) {
if(DCached[n-1][r-1] != null) {
//console.log("knapsack.D: n=" + n + " r=" + r + " returning from cache.");
return DCached[n-1][r-1];
}
}
var D_result = new result(-Infinity, []); // here we will manage the result
//D_result.s_opt[n-1] = 0; // just put something there to start with
if (r<0) {
//D_result.p_max = -Infinity;
return D_result;
}
if (n==0) {
D_result.p_max = 0;
return D_result;
}
var p_sum;
//self.s_opt[n] = 0; not needed
var J = Math.min(l[n-1].length,M_max);
var D_minusOneResult; //storing the result when optimizing all previous items given a max length
for(var j = 0; j < J; j++) {
D_minusOneResult = D( n-1 , r - l[n-1][j] , M_max)
p_sum = p[n-1][j] + D_minusOneResult.p_max;
if(p_sum > D_result.p_max) {
D_result.p_max = p_sum;
D_result.s_opt = D_minusOneResult.s_opt;
D_result.s_opt[n-1] = j;
}
}
DCached[n-1] = [];
DCached[n-1][r-1] = D_result;
//console.log("knapsack.D: n=" + n + " r=" + r + " p_max= "+ p_max);
return D_result;
}
}

Javascript Typed array vs simple array: performance

What I'm basically trying to do is to map an array of data points into a WebGL vertex buffer (Float32Array) in realtime (working on animated parametric surfaces). I've assumed that representing data points with Float32Arrays (either one Float32Array per component: [xx...x, yy...y] or interleave them: xyxy...xy) should be faster than storing them in an array of points: [[x, y], [x, y],.. [x, y]] since that'd actually be a nested hash and all. However, to my surprise, that leads to a slowdown of about 15% in all the major browsers (not counting array creation time). Here's a little test I've set up:
var points = 250000, iters = 100;
function map_2a(x, y) {return Math.sin(x) + y;}
var output = new Float32Array(3 * points);
// generate data
var data = [];
for (var i = 0; i < points; i++)
data[i] = [Math.random(), Math.random()];
// run
console.time('native');
(function() {
for (var iter = 0; iter < iters; iter++)
for (var i = 0, to = 0; i < points; i++, to += 3) {
output[to] = data[i][0];
output[to + 1] = data[i][1];
output[to + 2] = map_2a(data[i][0], data[i][1]);
}
}());
console.timeEnd('native');
// generate data
var data = [new Float32Array(points), new Float32Array(points)];
for (var i = 0; i < points; i++) {
data[0][i] = Math.random();
data[1][i] = Math.random();
}
// run
console.time('typed');
(function() {
for (var iter = 0; iter < iters; iter++)
for (var i = 0, to = 0; i < points; i++, to += 3) {
output[to] = data[0][i];
output[to + 1] = data[1][i];
output[to + 2] = map_2a(data[0][i], data[1][i]);
}
}());
console.timeEnd('typed');
Is there anything I'm doing wrong?
I think your problem is that you are not comparing the same code. In the first example, you have one large array filled with very small arrays. In the second example, you have two very large arrays, and both of them need to be indexed. The profile is different.
If I structure the first example to be more like the second (two large generic arrays), then the Float32Array implementation far outperforms the generic array implementation.
Here is a jsPerf profile to show it.
In V8 variables can have SMI (int31/int32), double and pointer type. So I guess when you operate with floats it should be converted to double type. If you use usual arrays it is converted to doubles already.

Implementing A-Star algorithm. Help please?

I am trying to implement an A* algorithm for my pathfinding robot in JavaScript. The only problem is that I do not understand what does it mean to find all adjacent squares. I am using the Manhattan Distance formula as I cannot let my bot go diagonally. Here is my code (for now):
var open = new Array();
var closed = new Array();
start = [9,18]; //do not take this literally
goal = [1,0]; //again don't
open.push(start);
while (open.length != 0) {
for(var x = 0; x < open.length; x++) {
heuristicValue[x] = computeHeuristicV(maplayout, start[0], start[1], open[x][0], open[x][1], goal[0], goal[1]);
}
minimum = Math.min(100000,heuristicValue[0]);
for(var x = 1; x < open.length; x++) {
minimum = Math.min(minimum, heuristicValue[x]);
}
for(var x = 0; x < open.length; x++) {
if (minimum == heuristicValue[x]) {
current = [open[x][0], open[x][1]];
}
}
closed.push(current);
//INCOMPLETE
}
The computeHeuristicV function computes the heuristic value in the code above.
"All adjacent squares" means every possible next hop on the path.
A* is a great algorithm to master and use. The two key elements are finding neighbors and the heuristic. A heuristic is used to estimate the distance between your current location, and the end. Also, the statement "find all adjacent squares" is referencing a neighbors function. For example, you might have the following:
var heuristic = function(state) {
var endLocation = MyGame.getEndLocation();
return Math.abs(state.x - endLocation.x) + Math.abs(state.y - endLocation.y)
}
var neighbors = function(state){
var neighborStates = [];
MyGame.setPlayer({
x: state.x,
y: state.y
});
neighborStates.push(MyGame.moveUp.getState());
neighborStates.push(MyGame.moveRight.getState());
neighborStates.push(MyGame.moveDown.getState());
neighborStates.push(MyGame.moveLeft.getState());
return neighborStates;
}
So, getting the "adjacent squares" is just asking you for the neighboring states or options. Personal plug: I just authored a simple a-star algorithm here: https://github.com/tssweeney/async-astar. Reading the description might help you to better understand the problem.

Categories