Cumulative distribution function in Javascript - javascript

I am searching for a way to calculate the Cumulative distribution function in Javascript. Are there classes which have implemented this? Do you have an idea to get this to work? It does not need to be 100% percent accurate but I need a good idea of the value.
http://en.wikipedia.org/wiki/Cumulative_distribution_function

I was able to write my own function with the help of Is there an easily available implementation of erf() for Python? and the knowledge from wikipedia.
The calculation is not 100% correct as it is just a approximation.
function normalcdf(mean, sigma, to)
{
var z = (to-mean)/Math.sqrt(2*sigma*sigma);
var t = 1/(1+0.3275911*Math.abs(z));
var a1 = 0.254829592;
var a2 = -0.284496736;
var a3 = 1.421413741;
var a4 = -1.453152027;
var a5 = 1.061405429;
var erf = 1-(((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*Math.exp(-z*z);
var sign = 1;
if(z < 0)
{
sign = -1;
}
return (1/2)*(1+sign*erf);
}
normalcdf(30, 25, 1.4241); //-> 0.12651187738346226
//wolframalpha.com 0.12651200000000000

The math.js library provides an erf function. Based on a definition found at Wolfram Alpha , the cdfNormalfunction can be implemented like this in Javascript:
const mathjs = require('mathjs')
function cdfNormal (x, mean, standardDeviation) {
return (1 - mathjs.erf((mean - x ) / (Math.sqrt(2) * standardDeviation))) / 2
}
In the node.js console:
> console.log(cdfNormal(5, 30, 25))
> 0.15865525393145707 // Equal to Wolfram Alpha's result at: https://sandbox.open.wolframcloud.com/app/objects/4935c1cb-c245-4d8d-9668-4d353ad714ec#sidebar=compute

This formula will give the correct normal CDF unlike the currently accepted answer
function ncdf(x, mean, std) {
var x = (x - mean) / std
var t = 1 / (1 + .2315419 * Math.abs(x))
var d =.3989423 * Math.exp( -x * x / 2)
var prob = d * t * (.3193815 + t * ( -.3565638 + t * (1.781478 + t * (-1.821256 + t * 1.330274))))
if( x > 0 ) prob = 1 - prob
return prob
}
This answer comes from math.ucla.edu

Due to some needs in the past, i put together an implementation of distribution function in javascript. my library is available at github. You can take a look at https://github.com/chen0040/js-stats
it provides javascript implementation of CDF and inverse CDF for Normal distribution, Student's T distribution, F distribution and Chi-Square Distribution
To use the js lib for obtaining CDF and inverse CDF:
jsstats = require('js-stats');
//====================NORMAL DISTRIBUTION====================//
var mu = 0.0; // mean
var sd = 1.0; // standard deviation
var normal_distribution = new jsstats.NormalDistribution(mu, sd);
var X = 10.0; // point estimate value
var p = normal_distribution.cumulativeProbability(X); // cumulative probability
var p = 0.7; // cumulative probability
var X = normal_distribution.invCumulativeProbability(p); // point estimate value
//====================T DISTRIBUTION====================//
var df = 10; // degrees of freedom for t-distribution
var t_distribution = new jsstats.TDistribution(df);
var t_df = 10.0; // point estimate or test statistic
var p = t_distribution.cumulativeProbability(t_df); // cumulative probability
var p = 0.7;
var t_df = t_distribution.invCumulativeProbability(p); // point estimate or test statistic
//====================F DISTRIBUTION====================//
var df1 = 10; // degrees of freedom for f-distribution
var df2 = 20; // degrees of freedom for f-distribution
var f_distribution = new jsstats.FDistribution(df1, df2);
var F = 10.0; // point estimate or test statistic
var p = f_distribution.cumulativeProbability(F); // cumulative probability
//====================Chi Square DISTRIBUTION====================//
var df = 10; // degrees of freedom for cs-distribution
var cs_distribution = new jsstats.ChiSquareDistribution(df);
var X = 10.0; // point estimate or test statistic
var p = cs_distribution.cumulativeProbability(X); // cumulative probability

This is a brute force implementation, but accurate to more digits of precision. The approximation above is accurate within 10^-7. My implementation runs slower (700 nano-sec) but is accurate within 10^-14. normal(25,30,1.4241) === 0.00022322110257305683, vs wolfram's 0.000223221102572082.
It takes the power series of the standard normal pdf, i.e. the bell-curve, and then integrates the series.
I originally wrote this in C, so I concede some of the optimizations might seem silly in Javascript.
function normal(x, mu, sigma) {
return stdNormal((x-mu)/sigma);
}
function stdNormal(z) {
var j, k, kMax, m, values, total, subtotal, item, z2, z4, a, b;
// Power series is not stable at these extreme tail scenarios
if (z < -6) { return 0; }
if (z > 6) { return 1; }
m = 1; // m(k) == (2**k)/factorial(k)
b = z; // b(k) == z ** (2*k + 1)
z2 = z * z; // cache of z squared
z4 = z2 * z2; // cache of z to the 4th
values = [];
// Compute the power series in groups of two terms.
// This reduces floating point errors because the series
// alternates between positive and negative.
for (k=0; k<100; k+=2) {
a = 2*k + 1;
item = b / (a*m);
item *= (1 - (a*z2)/((a+1)*(a+2)));
values.push(item);
m *= (4*(k+1)*(k+2));
b *= z4;
}
// Add the smallest terms to the total first that
// way we minimize the floating point errors.
total = 0;
for (k=49; k>=0; k--) {
total += values[k];
}
// Multiply total by 1/sqrt(2*PI)
// Then add 0.5 so that stdNormal(0) === 0.5
return 0.5 + 0.3989422804014327 * total;
}

You can also take a look here, it's a scientific calculator implemented in javascript, it includes erf and its author claims no copyright on the implementation.

Related

Recreating Blended Bisection and False Position algorithm from reference

I'm trying to recreate a blended bisection algorithm (Algorithm 3) from the website below (link takes you to exact section of the algorithm I'm referencing)
https://www.mdpi.com/2227-7390/7/11/1118/htm#sec3-mathematics-07-01118
I'm not quite sure if what I've typed out currently is correct and I'm stuck on line 29 of the algorithm from the website where I'm not sure what it means especially with the intersection symbol.
Code so far
/* Math function to test on */
function fn(x) {
//return x * x - x - 2; /* root for this is x = 2 */
return x*x*x-2; /* root for this is x = (2)^(1/3) */
}
function blendedMethod(a, b, eps, maxIterations, fn) {
let k = 0,
r, fa, fb, ba, bb, eps_a;
do {
let m = (a + b) * .5;
let eps_m = Math.abs(fn(m));
let fn_a = fn(a),
fn_r;
let s = a - ((fn_a * (b - a)) / (fn(b) - fn_a));
let eps_s = Math.abs(fn(s));
if (eps_m < eps_s) {
r = m;
fn_r = fn(r);
eps_a = eps_m;
if (fn_a * fn_r < 0) {
ba = a;
bb = r;
} else {
ba = r;
bb = b;
}
} else {
r = s;
fn_r = fn(r)
eps_a = eps_s;
if (fn_a * fn_r < 0) {
fa = a;
fb = r;
} else {
fa = r;
fb = b;
}
/* line 29 here! */
/* [a, b] = [ba, bb] ∩ [fa, fb] */
/* either fa,fb or ba,bb haven't yet been defined */
/* so this will fail with NaN */
a = Math.max(ba, fa);
b = Math.min(bb, fb);
}
r = r;
eps_a = Math.abs(fn_r)
k = k + 1;
} while (Math.abs(fn(r)) > eps || k < maxIterations)
/* think this is the root to return*/
return r;
}
console.log(blendedMethod(1,4,0.00001,1000,fn));
EDIT: Fixed some errors, only problem is that this algorithm defines either fa,fb or ba,bb inside the conditional statements without defining the other two. So by the time it comes to these calculations below, it fail with NaN and messes up for the next iterations.
a = Math.max(ba,fa);
b = Math.min(bb,fb);
You are right in that this intersection makes no sense. There is in every step only one sub-interval defined. As all intervals are successive nested subsets, the stale, old values of the interval that was not set in the current loop is still a superset of the new interval. The new interval could be directly set in each branch. Or the method selection branch could be totally separated from the interval selection branch.
The implementation is not very economic as 6 or more function values are computed where only 2 evaluations are needed. The idea being that the dominating factor in the time complexity are the function evaluations, so that a valid metric for any root finding algorithm is the number of function evaluations. To that end, always keep points and function value as pair, generate them as a pair, assign them as a pair.
let fn_a =f(a), fn_b=f(b)
do {
let m = (a + b) * .5;
let fm = f(m);
let s = a - (fn_a * (b - a)) / (fn_b - fn_a)
let fn_s = f(s);
let c,fn_c;
// method selection
if(Math.abs(fn_m) < Math.abs(fn_s)) {
c = m; fn_c = fn_m;
} else {
c = s; fn_c = fn_s;
}
// sub-interval selection
if( fn_a*fn_c > 0 ) {
a = c; fn_a = fn_c;
} else {
b = c; fn_b = fn_c;
}
while( Math.abs(b-a) > eps );
It is also not clear in what way the blended method avoids or alleviates the shortcomings of the basis algorithms. To avoid the stalling (deviation from a secant step) of the regula falsi method it would be better to introduce a stalling counter and apply a bisection step after 1 or 2 stalled steps. Or just simply alternate the false position and bisection steps. Both variants ensure the reduction of the bracketing interval.
Known effective modifications of the regula falsi method are on one hand the variations like the Illinois variant that add a weight factor to the function values, thus shifting the root approximation towards the repeated, stalled interval bound. On the other hand there are more general algorithms that combine the ideas of the bracketing interval and reverse interpolation like the Dekker and Brent methods.

How to get a random value from truncated normal distribution with mean and std in JavaScript?

What is a JS alternative to the same Python implementation?
import matplotlib.pyplot as plt
from scipy.stats import truncnorm
import numpy as np
mean = 1
std = 2
clip_a = -4
clip_b = 3
a, b = (clip_a - mean) / std, (clip_b - mean) / std
x_range = np.linspace(-3 * std, 3 * std, 1000)
plt.plot(x_range, truncnorm.pdf(x_range, a, b, loc = mean, scale = std));
I'd like to get a random value according to the distribution (in JS the same code with size=1):
dist = truncnorm.rvs(a, b, loc = mean, scale = std, size=1000000)
plt.hist(dist);
Here is a JS function that implements a truncated, skew-normal pseudo random number generator (PRNG). It is based on this blog post by Tom Liao and has been extended to consider lower and upper bounds (truncation).
Essentially, the function is called recursively until a variate within the desired bounds is found.
You can pass your own random number generator using the rng property, though Math.random will be used by default. Also, as you didn't ask for a skew-normal distribution, you can just ignore the skew property as it defaults to 0. This will leave you with a truncated normal PRNG, just as you asked for.
function randomTruncSkewNormal({
rng = Math.random,
range = [-Infinity, Infinity],
mean,
stdDev,
skew = 0
}) {
// Box-Muller transform
function randomNormals(rng) {
let u1 = 0,
u2 = 0;
//Convert [0,1) to (0,1)
while (u1 === 0) u1 = rng();
while (u2 === 0) u2 = rng();
const R = Math.sqrt(-2.0 * Math.log(u1));
const Θ = 2.0 * Math.PI * u2;
return [R * Math.cos(Θ), R * Math.sin(Θ)];
}
// Skew-normal transform
// If a variate is either below or above the desired range,
// we recursively call the randomSkewNormal function until
// a value within the desired range is drawn
function randomSkewNormal(rng, mean, stdDev, skew = 0) {
const [u0, v] = randomNormals(rng);
if (skew === 0) {
const value = mean + stdDev * u0;
if (value < range[0] || value > range[1])
return randomSkewNormal(rng, mean, stdDev, skew);
return value;
}
const sig = skew / Math.sqrt(1 + skew * skew);
const u1 = sig * u0 + Math.sqrt(1 - sig * sig) * v;
const z = u0 >= 0 ? u1 : -u1;
const value = mean + stdDev * z;
if (value < range[0] || value > range[1])
return randomSkewNormal(rng, mean, stdDev, skew);
return value;
}
return randomSkewNormal(rng, mean, stdDev, skew);
}
Calling this function in the following manner
const data = [];
for (let i = 0; i < 50000; i++) {
data.push({
x: i,
y: randomTruncSkewNormal({
range: [-4,3],
mean: 1,
stdDev: 2
})
});
}
and plotting the data using your charting library of choice should give your the desired output.
I also made a small Observable notebook interactively demonstrating the function which you might want to check out as well.

Writing your own exponential power function with decimals

So, I want to write a function in code using some sort of algorithm to calculate any number to any power, including decimals. I use JavaScript and it already has an inbuilt pow function:
Math.pow(2, 0.413) // 2^0.413 = 1.331451613236371, took under 1 second.
Now I want to write my own like this:
function pow(x, y) {
// Algorithm
}
This is a function that calculates the square root of any number (x^0.5), and it's very accurate with only 10 loops:
function sqrt(x, p) { // p = precision (accuracy)
var a = 1;
var b = x;
while (p--) {
a = (a + b) / 2
b = x / a
}
return a
}
Is there any simple formula to calculate any exponential?
If there isn't a simple one, is there a hard one?
If the solution is slow, how can JavaScript's pow estimate under a single second?
Heres a nice algorithm for positive integer powers, it starts by dealing with some simple cases and then uses a loop testing the binary bits of the exponent. For example to find 3^11 11 in binary is 1011 so the stages in the loop are
bitMask = 1011, evenPower = 3, result = 3
bitMask = 101, evenPower = 3*3 = 9, result = 3*9 = 27
bitMask = 10, evenPower = 9*9 = 81, result = 27
bitMask = 1, evenPower = 81*81 = 6561, result = 27*6561 = 177147
That is the evenPower squares at each loop, and the result gets multiplied by the evenPower if the bottom bit is 1. The code has been lifted from Patricia Shanahan’s method http://mindprod.com/jgloss/power.html which in turn has its roots in Kunth and can be traced back to 200 BC in india.
/**
* A fast routine for computing integer powers.
* Code adapted from {#link efficient power} by Patricia Shanahan pats#acm.org
* Almost identical to the method Knuth gives on page 462 of The Art of Computer Programming Volume 2 Seminumerical Algorithms.
* #param l number to be taken to a power.
* #param n power to take x to. 0 <= n <= Integer.MAX_VALUE
* Negative numbers will be treated as unsigned positives.
* #return x to the power n
*
*/
public static final double power(double l,int n)
{
assert n>=0;
double x=l;
switch(n){
case 0: x = 1.0; break;
case 1: break;
case 2: x *= x; break;
case 3: x *= x*x; break;
case 4: x *= x; x *= x; break;
case 5: { double y = x*x; x *= y*y; } break;
case 6: { double y = x*x; x = y*y*y; } break;
case 7: { double y = x*x; x *= y*y*y; } break;
case 8: x *= x; x *= x; x *= x; break;
default:
{
int bitMask = n;
double evenPower = x;
double result;
if ( (bitMask & 1) != 0 )
result = x;
else
result = 1;
bitMask >>>= 1;
while ( bitMask != 0 ) {
evenPower *= evenPower;
if ( (bitMask & 1) != 0 )
result *= evenPower;
bitMask >>>= 1;
} // end while
x = result;
}
}
return x;
}
For a real exponent you will basically need ways of finding exp and log. You can use Taylor series which are the simplest to get but there are much better method. We have
exp(x) = 1 + x + x^2/2 + x^3/6 + x^4/24 + x^5/120 + x^6/6! + ...
ln(1+x) = x - x^2 /2 + x^3 /3 - x^4 / 4 + x^5 / 5 - x^6/6 + ... |x|<1
To find x^y note ln(x^y) = y*ln(x). Now we need to get the argument in the right range so we can use our power series. Let x = m * 2^ex, the mantissa and exponent chosen so 1/sqrt(2)<= m < sqrt(2) and ln(m*2^ex) = ln(m) + ex*ln(2). Let h = m-1 and find ln(1+h).
Using java and floats as there is an easy way to find the internals of the IEEE representation (I've used float as there are fewer bits to cope with)
int b = Float.floatToIntBits(x);
int sign = (b & 0x80000000) == 0 ? 1 : -1;
int mattissa = b & 0x007fffff;
int ex = ((b & 0x7f800000) >> 23 ) - 127;
in javascript it might be easiest to us Number.toExponential and parse the results.
Next construct a number z in the desired range 1/sqrt(2) < z < sqrt(2)
int bits = mattissa | 0x3f800000;
float z = Float.intBitsToFloat(bits);
if(z>root2) {
z = z/2;
++ex;
}
Use this function to find the log of 1+x using a taylor series
static float ln1px(float x) {
float x_2 = x*x; // powers of x
float x_3 = x_2 * x;
float x_4 = x_3 * x;
float x_5 = x_4 * x;
float x_6 = x_5 * x;
float res = x - x_2 /2 + x_3 /3 - x_4 / 4 + x_5 / 5 - x_6/6;
return res;
}
this seems to be good to three significant figures, often much better when x is close to 0.
The log of our number x can then be found
float w = z - 1;
float ln_z = ln1px(w);
float ln_x = ln_z + ln2 * ex;
System.out.println("ln "+ln_x+"\t"+Math.log(x));
Now to the actual power if we write y = n + a where n is an integer and a is fractional. So
x^y=x^(n+a) = x^n * x^a. use the first algorithm in this answer to find the x^n. Writing x=m*2^ex then ln((m*2^ex)^a) = yln(m) + yex*ln(2) and
x^a=exp(ln((m*2^ex)^a)) = exp(a * ln(m)) * exp(a * ln(2))^ex
the two exponential terms have fairly small values so the taylor series should be good.
We need one function for the taylor series of the exponential function
static float exp(float x) {
float x_2 = x*x; // powers of x
float x_3 = x_2 * x;
float x_4 = x_3 * x;
float x_5 = x_4 * x;
float x_6 = x_5 * x;
float res = 1+ x + x_2 /2 + x_3 /6 + x_4 / 24 + x_5 / 120 + x_6/ 720;
return res;
}
finally we can put the pieces together
// Get integer and fractional parts of y
int n = (int) Math.floor(y);
float a = y-n;
float x_n = power(x,n); // x^n
float a_ln_m = a * ln_z; // ln(m^a) = a ln(m)
float a_ln_2 = a * ln2; // ln(2^a) = a ln(2)
float m_a = exp(a_ln_m); // m^a = exp(a ln(m))
float _2_a = exp(a_ln_2); // 2^a = exp(a ln(2))
float _2_a_ex = power(_2_a,ex); // (2^ex)^a = 2^(a*ex) = (2^a)^ex
float x_a = m_a * _2_a_ex; // x^a = m^a * 2^(a*ex)
float x_y = x_n * x_a; // x^y = x^n * x^a
System.out.println("x^y "+x_y+"\t"+Math.pow(x,y));
That should be the complete program, you need some smarts to cope with negative arguments etc.
Note this is not particularly accurate as I've only used a few terms of the taylor series. Other SO questions have more detailed answers How can I write a power function myself?
Those are some really nice examples, here is a simpler one too.
function exponential(a,b){
var c = 1;
for(var i=1; i<=b; i++){
c = c * a;
}
return c;
}
now call the function:
exponential(2,4);
Edit: It only works on integer, but it's simple and quick.
I checked this post, but it worked only for whole numbers (1,2,3... not 0.1, 0.3...)
Recursive power function: Why does this work if there's no initial return value?
Then,
I got this from here: Algorithm for pow(float, float)
function power(x,n) {
if(n === 0) return 1;
if(n === -1) return 1/x;
if(n === 1) return x;
return Math.exp(n*Math.log(x))
}
console.log(power(2,3.5));
I added some basic checks (n===0)... To fasten things up in case.
Flexo sums it up:
The general algorithm tends to be computing the float power as the
combination of the integer power and the remaining root. The integer
power is fairly straightforward, the root can be computed using either
Newton - Raphson method or Taylor series. IIRC numerical recipes in C
has some text on this. There are other (potentially better) methods
for doing this too, but this would make a reasonable starting point
for what is a surprisingly complex problem to implement. Note also
that some implementations use lookup tables and a number of tricks to
reduce the computation required.
http://mathworld.wolfram.com/NewtonsMethod.html
http://mathworld.wolfram.com/TaylorSeries.html
https://en.wikipedia.org/wiki/Logarithm#Power_series
https://rads.stackoverflow.com/amzn/click/0521431085

d3.js How to simplify a complex path - using a custom algorithm

I've got a very basic example here. http://jsfiddle.net/jEfsh/57/ that creates a complex path - with lots of points. I've read up on an algorithm that may look over the points and create a simpler set of coordinates. Does anyone have any experience with this - examples on how to loop through the path data and pass it through the algorithm - find the shortest set of points to create a more rudimentary version of the shape?
http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
var points = "M241,59L237,60L233,60L228,61L224,61L218,63L213,63L209,65L204,66L199,67L196,68L193,69L189,70L187,72L184,72L182,74L179,75L177,76L175,78L173,79L170,81L168,83L165,85L163,87L161,89L159,92L157,95L157,97L155,102L153,105L152,110L151,113L151,117L151,123L150,137L148,180L148,185L148,189L148,193L148,197L148,202L148,206L149,212L151,218L152,222L154,229L154,232L155,235L157,239L158,241L160,245L162,247L163,249L165,251L167,254L169,256L172,258L175,260L178,261L183,265L188,268L193,270L206,273L213,275L220,275L225,275L232,276L238,277L243,277L249,277L253,277L259,277L266,277L271,277L277,277L281,277L284,277L288,277L293,277L297,276L302,274L305,272L308,271L311,268L313,267L315,264L318,261L322,257L324,254L326,249L329,244L331,241L332,239L334,234L338,230L339,226L341,222L343,218L345,213L347,211L348,207L349,201L351,196L352,192L353,187L353,183L353,180L353,178L354,176L354,173L354,170L354,168L354,167L354,166L354,164L354,162L354,161L354,159L354,158L354,155L354,152L354,149L352,143L349,137L347,133L343,125L340,119 M241,59L340,119";
d3.select("#g-1").append("path").attr("d", points);
//simplify the path
function DouglasPeucker(){
}
/*
//http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
function DouglasPeucker(PointList[], epsilon)
// Find the point with the maximum distance
dmax = 0
index = 0
end = length(PointList)
for i = 2 to ( end - 1) {
d = shortestDistanceToSegment(PointList[i], Line(PointList[1], PointList[end]))
if ( d > dmax ) {
index = i
dmax = d
}
}
// If max distance is greater than epsilon, recursively simplify
if ( dmax > epsilon ) {
// Recursive call
recResults1[] = DouglasPeucker(PointList[1...index], epsilon)
recResults2[] = DouglasPeucker(PointList[index...end], epsilon)
// Build the result list
ResultList[] = {recResults1[1...end-1] recResults2[1...end]}
} else {
ResultList[] = {PointList[1], PointList[end]}
}
// Return the result
return ResultList[]
end
*/
It's not clear what your problem is exactly. Do you have problems to turn the SVG data string into a list of points? You can use this:
function path_from_svg(svg) {
var pts = svg.split(/[ML]/);
var path = [];
console.log(pts.length);
for (var i = 1; i < pts.length; i++) {
path.push(pts[i].split(","));
}
return path;
}
It is a very simple approach: It splits the string on all move (M) and line (L) commands and treats them as lines. It then splits all substrings on the comma. The first "substring" is ignored, because it is the empty string before the first M. If there is a way to do this better in d3 I haven't found it.
The reverse operation is easier:
function svg_to_path(path) {
return "M" + path.join("L");
}
This is equivalent to svg.line.interpolate("linear").
You can then implement the Douglas-Peucker algorithm on this path data recursively:
function path_simplify_r(path, first, last, eps) {
if (first >= last - 1) return [path[first]];
var px = path[first][0];
var py = path[first][1];
var dx = path[last][0] - px;
var dy = path[last][1] - py;
var nn = Math.sqrt(dx*dx + dy*dy);
var nx = -dy / nn;
var ny = dx / nn;
var ii = first;
var max = -1;
for (var i = first + 1; i < last; i++) {
var p = path[i];
var qx = p[0] - px;
var qy = p[1] - py;
var d = Math.abs(qx * nx + qy * ny);
if (d > max) {
max = d;
ii = i;
}
}
if (max < eps) return [path[first]];
var p1 = path_simplify_r(path, first, ii, eps);
var p2 = path_simplify_r(path, ii, last, eps);
return p1.concat(p2);
}
function path_simplify(path, eps) {
var p = path_simplify_r(path, 0, path.length - 1, eps);
return p.concat([path[path.length - 1]]);
}
The distance to the line is not calculated in a separate function but directly with the formula for the distance of a point to a 2d line from the normal {nx, ny} on the line vector {dx, dy} between the first and last point. The normal is normalised, nx*nx + ny*ny == 1.
When creating the paths, only the first point is added, the last point path[last] is implied and must be added in path_simplify, which is a front end to the recursive function path_simplify_r. This approach was chosen so that concatenating the left and right subpaths does not create a duplicate point in the middle. (This could equally well and maybe cleaner be done by joining p1 and p2.slice(1).)
Here's everything put together in a fiddle: http://jsfiddle.net/Cbk9J/3/
Lots of good references in the comments to this question -- alas they are comments and not suggested answers which can be truly voted on.
http://bost.ocks.org/mike/simplify/
shows interactive use of this kind of thing which references Douglas-Peucker but also Visvalingam.

Neural Network --- Back propagation algorithm error (In javascript!)

I have read quite a few back prop algorithms and i have no idea why mine is doing what it is doing.
Some notes before reading
No-hidden layer can "learn" all linear equations given a training set. Using 5 / 2 training it will even learn as accurate as 0.01% average error (fairly low)
When a hidden layer is used. The network will only output 2 values. One if both inputs are positive one if both inputs are negative.
The activation function of inputs -> hiddens -> (up to) outputs is f(x) = 1 / (1 + e^-x)
The activation function of outputs is linear (f(x) = x)
Error calculations
I believe this is where my possible error is at!
Outputs: E(k) = (target(k) - O(k).output) * f'(O(k).output) = (target - actual) * 1 linear activation fn gets 1 as derivative
Inputs and hiddens: E(j) = sum(w(kj) * E(k)) * f'(N(j).output) = sum(w(kj) * E(k) * N(j).output * (1 - N(j).output)
The full source code can be found here
http://www.github.com/primeagen/neural-js
The Source! Its in javascript!
Remember: Feedforward and output error seems to be correct since a non hidden layer network can learn any linear function and extrapolate well beyond its training set with a 0.01%. So i believe that is correct
Back prop error calculation
// Output layer
for (var i = 0; i < this._outputs.length; i++) {
var o = this._outputs[i];
o.error = targetOutputs[i] - o.output;
this._mse += o.error * o.error;
}
// Go through hidden layers
for (var cIdx = this._layers.length - 2; cIdx > 0; cIdx--) {
var curr = this._layers[cIdx];
var next = this._layers[cIdx + 1];
// Go through hidden neurons
for (var hN = 0, hLen = curr.length; hN < hLen; hN++) {
var h = curr[hN];
var sum = 0;
for (var nN = 0, nLen = next.length; nN < nLen; nN++) {
var n = next[nN];
sum += n.w[hN] * n.error;
}
h.error = sum * h.dActivationFn(h.output);
}
}
The activation function and its derivative
/**
* The logisticFunction function is 1 / (1 + e ^ (-x))
* #param x
*/
function logisticFunction(x) {
return 1 / (1 + Math.pow(Math.E, -x));
}
/**
* The derivative of the logistic function
* #param {Number} x
* #returns {Number}
*/
function dLogisticFunction(x) {
return x * (1 - x);
}
Neuron.dActivation = dLogisticFunction
My network just converges onto an answer (its random) and no matter the input (when positive) the value will not change when trained with 100+ data points...
Any idea?

Categories