I want to make an "overflow" scale that will reset value and continue count from zero if it overflows limit
For example, I want to draw a chart but change the original 0 point place. The first column represents a normal scale, and in the second column I shifted the scale
5 2
4 1
3 0
2 5
1 4
0 3
I think I can alter input data and convert first half of values to negative so the domain will look like this [-2: 3]. But I would prefer not to alter input data values and just have a parameter in scale that can do this for me.
UPD:
I've tried to use more than 2 domain/range values - didn't give me the results that I want, so I wrote a custom interpolation function.
I doubt that only I need something like this and probably d3.js provide something similar. If you know a function like this in standard d3.js distribution please point me to it.
const svg = d3.select("svg").attr('height', 200);
myinterpolate = function(a, b) {
var a = a;
var b = b;
var shift = 100;
var max_value = Math.max(a, b)
var min_value = Math.min(a, b)
return function(t) {
let result = a * (1 - t) + b * t + shift;
if (result < min_value) {
result = result + max_value
} else if (result > max_value) {
result = result % max_value
}
return result
}
;
}
const myScale = d3.scaleLinear().domain([0, 10]).range([200, 0]).interpolate(myinterpolate);
const axis = d3.axisLeft(myScale).tickValues(d3.range(10)).tickFormat(d=>~~d)(svg.append("g").attr("transform", "translate(50,0)"))
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/5.7.0/d3.min.js"></script>
<svg><svg>
You said "I would prefer not to alter input data values and just have a parameter in scale that can do this for me", but that's a very specific and highly unusual use case, so there is none.
What you could easily do is creating a piecewise scale with more than 2 domain/range values:
const svg = d3.select("svg");
const myScale = d3.scaleLinear()
.domain([0, 3, 4, 6])
.range([75, 6, 144, 98]);
const axis = d3.axisLeft(myScale).tickValues(d3.range(7)).tickFormat(d => ~~d)(svg.append("g").attr("transform", "translate(50,0)"))
path {
stroke: none;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/5.7.0/d3.min.js"></script>
<svg></svg>
This simply answers your question as it is. Mind you that it raises several other questions: where are you drawing the values between 3 and 4? What's the meaning of the area between 6 and 0?
A custom interpolation function worked for me.
My code snippet uses a hardcoded value for the shift, but it can be rewritten to use shift value as a parameter.
const svg = d3.select("svg").attr('height', 200);
myinterpolate = function(a, b) {
var a = a;
var b = b;
var shift = 100;
var max_value = Math.max(a, b)
var min_value = Math.min(a, b)
return function(t) {
let result = a * (1 - t) + b * t + shift;
if (result < min_value) {
result = result + max_value
} else if (result > max_value) {
result = result % max_value
}
return result
}
;
}
const myScale = d3.scaleLinear().domain([0, 10]).range([200, 0]).interpolate(myinterpolate);
const axis = d3.axisLeft(myScale).tickValues(d3.range(10)).tickFormat(d=>~~d)(svg.append("g").attr("transform", "translate(50,0)"))
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/5.7.0/d3.min.js"></script>
<svg><svg>
Related
Goal
I am at the final stage of scripting a Luhn algorithm.
Problem
Let's say I have a final calculation of 73
How can I round it up to the next 0? So the final value is 80.
And lastly, how can I get the value that made the addition? e.g. 7 is the final answer.
Current code
function validateCred(array) {
// Array to return the result of the algorithm
const algorithmValue = [];
// Create a [2, 1, 2] Pattern
const pattern = array.map((x, y) => {
return 2 - (y % 2);
});
// From given array, multiply each element by it's pattern
const multiplyByPattern = array.map((n, i) => {
return n * pattern[i];
});
// From the new array, split the numbers with length of 2 e.g. 12 and add them together e.g. 1 + 2 = 3
multiplyByPattern.forEach(el => {
// Check for lenght of 2
if(el.toString().length == 2) {
// Split the number
const splitNum = el.toString().split('');
// Add the 2 numbers together
const addSplitNum = splitNum.map(Number).reduce(add, 0);
// Function to add number together
function add(accumalator, a) {
return accumalator + a;
}
algorithmValue.push(addSplitNum);
}
// Check for lenght of 1
else if(el.toString().length == 1){
algorithmValue.push(el);
}
});
// Sum up the algorithmValue together
const additionOfAlgorithmValue = algorithmValue.reduce((a, b) => {
return a + b;
});
// Mod the final value by 10
if((additionOfAlgorithmValue % 10) == 0) {
return true;
}
else{
return false;
}
}
// Output is False
console.log(validateCred([2,7,6,9,1,4,8,3,0,4,0,5,9,9,8]));
Summary of the code above
The output should be True. This is because, I have given the total length of 15 digits in the array. Whereas it should be 16. I know the 16th value is 7, because the total value of the array given is 73, and rounding it up to the next 0 is 80, meaning the check digit is 7.
Question
How can I get the check number if given array length is less than 15?
You could do something like this:
let x = [73,81,92,101,423];
let y = x.map((v) => {
let remainder = v % 10;
let nextRounded = v + (10-remainder);
/* or you could use
let nextRounded = (parseInt(v/10)+1)*10;
*/
let amountToNextRounded = 10 - remainder;
return [nextRounded,amountToNextRounded];
});
console.log(y);
EDIT
As noticed by #pilchard you could find nextRounded using this more simplified way:
let nextRounded = v + (10-remainder);
https://stackoverflow.com/users/13762301/pilchard
I think what you need is this:
var oldNum = 73
var newNum = Math.ceil((oldNum+1) / 10) * 10;;
Then check the difference using this:
Math.abs(newNum - oldNum);
What is a JS alternative to the same Python implementation?
import matplotlib.pyplot as plt
from scipy.stats import truncnorm
import numpy as np
mean = 1
std = 2
clip_a = -4
clip_b = 3
a, b = (clip_a - mean) / std, (clip_b - mean) / std
x_range = np.linspace(-3 * std, 3 * std, 1000)
plt.plot(x_range, truncnorm.pdf(x_range, a, b, loc = mean, scale = std));
I'd like to get a random value according to the distribution (in JS the same code with size=1):
dist = truncnorm.rvs(a, b, loc = mean, scale = std, size=1000000)
plt.hist(dist);
Here is a JS function that implements a truncated, skew-normal pseudo random number generator (PRNG). It is based on this blog post by Tom Liao and has been extended to consider lower and upper bounds (truncation).
Essentially, the function is called recursively until a variate within the desired bounds is found.
You can pass your own random number generator using the rng property, though Math.random will be used by default. Also, as you didn't ask for a skew-normal distribution, you can just ignore the skew property as it defaults to 0. This will leave you with a truncated normal PRNG, just as you asked for.
function randomTruncSkewNormal({
rng = Math.random,
range = [-Infinity, Infinity],
mean,
stdDev,
skew = 0
}) {
// Box-Muller transform
function randomNormals(rng) {
let u1 = 0,
u2 = 0;
//Convert [0,1) to (0,1)
while (u1 === 0) u1 = rng();
while (u2 === 0) u2 = rng();
const R = Math.sqrt(-2.0 * Math.log(u1));
const Θ = 2.0 * Math.PI * u2;
return [R * Math.cos(Θ), R * Math.sin(Θ)];
}
// Skew-normal transform
// If a variate is either below or above the desired range,
// we recursively call the randomSkewNormal function until
// a value within the desired range is drawn
function randomSkewNormal(rng, mean, stdDev, skew = 0) {
const [u0, v] = randomNormals(rng);
if (skew === 0) {
const value = mean + stdDev * u0;
if (value < range[0] || value > range[1])
return randomSkewNormal(rng, mean, stdDev, skew);
return value;
}
const sig = skew / Math.sqrt(1 + skew * skew);
const u1 = sig * u0 + Math.sqrt(1 - sig * sig) * v;
const z = u0 >= 0 ? u1 : -u1;
const value = mean + stdDev * z;
if (value < range[0] || value > range[1])
return randomSkewNormal(rng, mean, stdDev, skew);
return value;
}
return randomSkewNormal(rng, mean, stdDev, skew);
}
Calling this function in the following manner
const data = [];
for (let i = 0; i < 50000; i++) {
data.push({
x: i,
y: randomTruncSkewNormal({
range: [-4,3],
mean: 1,
stdDev: 2
})
});
}
and plotting the data using your charting library of choice should give your the desired output.
I also made a small Observable notebook interactively demonstrating the function which you might want to check out as well.
I want to calculate the length of the subsegments of a line having a length of totalLineLength, where the subsegments are proportional to the log of the events given.
A visual representation: |---|-----|---------| a line composed of subsegments.
I want the sum of the calculated subsegments to be equal to totalLineLength, and Success must be printed. Log(x+1) must be used too (because it gives always positive results for positive arguments and scales 1 too).
Using linear scaling, I'm doing this, and it works.
var totalLineLength = 20;
var eventTotal;
var calcSubSegment = function(x) {
return totalLineLength * x / eventTotal;
}
var events = [0.1, 1, 22];
eventTotal = events.reduce((a, b) => a + b, 0);
var subSegments = events.map(calcSubSegment);
var segmentLength = subSegments.reduce((a, b) => a + b);
if (segmentLength != totalLineLength) {
console.log("Error:", segmentLength);
} else {
console.log("Success");
}
Using log(x+1) (calcSubSegment is updated), it doesn't work.
var totalLineLength = 20;
var eventTotal;
var calcSubSegment = function(x) {
return totalLineLength * Math.log(x+1) / Math.log(eventTotal+1);
}
var events = [0.1, 1, 22];
eventTotal = events.reduce((a, b) => a + b, 0);
var subSegments = events.map(calcSubSegment);
var segmentLength = subSegments.reduce((a, b) => a + b, 0);
if (segmentLength != totalLineLength) {
console.log("Error:", segmentLength);
} else {
console.log("Success");
}
What's wrong with my code? I suppose the eventTotal calculation, but I'm not sure how to proceed.
How should I use logarithmic scaling? I'd also like a mathematical explanation.
If I understand your task correctly, the simplest solution is first map all events using your log(x+1) functions and then just use linear split you've already implemented. Something like this:
function linearSplit(events, totalLineLength) {
var eventTotal;
var calcSubSegment = function(x) {
return totalLineLength * x / eventTotal;
}
eventTotal = events.reduce((a, b) => a + b, 0);
var subSegments = events.map(calcSubSegment);
return subSegments;
}
function logSplit(events, totalLineLength) {
var logEvents = events.map(function(x) { return Math.log(x+1); } );
return linearSplit(logEvents, totalLineLength);
}
function testSplit(events, totalLineLength, fnSplit, splitName) {
var subSegments = fnSplit(events, totalLineLength);
var segmentLength = subSegments.reduce((a, b) => a + b, 0);
console.log(splitName + ":\n" + events + " =>\n" + subSegments);
if (Math.abs(segmentLength - totalLineLength) > 0.001) {
console.log(splitName + " Error:", segmentLength);
} else {
console.log(splitName + " Success");
}
}
var totalLineLength = 20;
var events = [0.1, 1, 22];
testSplit(events, totalLineLength, linearSplit, "linearSplit");
testSplit(events, totalLineLength, logSplit, "logSplit");
The idea is that the only splitting that
Has parts proportionally to some coefficients
Makes sum of all parts equal to the whole
at the same time is the linear splitting. So if you want to scale coefficients in any way, you should scale them before passing to the linear splitting logic. Otherwise the sum of parts will not equal the whole (in case of any non-linear scaling) as it happens in your code.
P.S. It is not very good idea to compare floating point values by ==. You should use some tolerance for calculation/rounding errors.
Important rule while dealing with logarithms: log(a)+log(b) = log(a*b). Hence for the calculation of the total length, we need to the find the product of the individual numbers instead of their sum.
What you are doing is showing that log(a)+log(b) != log(a+b), so try multiplying the numbers instead of adding them.
Use an error margin as described in SergGr's answer, because of the way floating point operations work.
var totalLineLength = 20;
var eventTotal;
var calcSubSegment = function(x) {
return totalLineLength * Math.log(x+1) / Math.log(eventTotal);
}
var events = [0.1, 1, 22];
eventTotal = events.reduce((a, b) => a * (b+1), 1);
var subSegments = events.map(calcSubSegment);
var segmentLength = subSegments.reduce((a, b) => a + b, 0);
if (segmentLength != totalLineLength) {
console.log("Error:", segmentLength);
} else {
console.log("Success", subSegments);
}
Suppose I have the following factors:
(1+3x)(1+x)(1+2x)
Expanded to a polynomial, it looks like:
1 + 6x + 11x^2 + 6x^3
The coefficients of this polynomial would be
c0 = 1
c1 = 6
c2 = 11
c3 = 6
I'm trying to figure out how to calculate these rapidly (for any set of factors). The ideal output would be an array of the coefficients, like
var coeff = [c0,c1,c2,c3];
What I'm trying to do is find a way to quickly go from the factors to the array of coefficients. Any suggestions on how to rapidly handle this in javascript? And for the sake of clarity, I'm trying to figure out how to do this for any set of n factors, not just this particular scenario.
You could use the factors as vector and use a cross product for the result.
function multiply(a1, a2) {
var result = [];
a1.forEach(function (a, i) {
a2.forEach(function (b, j) {
result[i + j] = (result[i + j] || 0) + a * b;
});
});
return result;
}
var data = [[1, 3], [1, 1], [1, 2]], // (1+3x)(1+x)(1+2x)
result = data.reduce(multiply);
console.log(result); // [1, 6, 11, 6] = 1x^0 + 6x^1 + 11x^2 + 6x^3
Well I found a method to do what you want from start to finish even without the need for any treatment in the original factors. Although I had to use a Math library. I found there is at least a library that does what you want: Nerdamer
As you can see from the code below the coeficients are correctly calculated from the factors you gave.
var factors = '(1+3x)(1+x)(1+2x)';
console.log('original factors: ' + factors);
var y = nerdamer('expand(' + factors + ')');
var polynomialform = y.toString();
console.log('polynomial form: ' + polynomialform);
var coef = polynomialform.split('+').map(v=>v.trim()).map(v=>v.split('x')[0]).map(v=>v.replace(/^\*+|\*+$/g, ''));
console.log('coeficients: ' + coef);
<script src="http://nerdamer.com/js/nerdamer.core.js"></script>
Notice that coefs var is an array.
Obviously, by the way I otained the coeficients, the operation may fail in different factor cases. This has to be adapted for minus characters and edge cases. You can create some kind of loop and put failed calculations in an array to check for edge cases to adapt the code for the full dataset. I can improve the answer if you provide a larger test dataset.
Hope it helps you.
Here's my take based on the fact that when you multiply (1+ax) by (1+b_1*x+b_2*x^2+...+b_nx^n), in the resulting polynomial (of degree n+1), the first term's coefficient will be one and its last term's coefficient will be a*b_n.
I think it is a bit simpler than the accepted answer, but still quadratic in time. To make this more efficient, you will need more advanced techniques.
function multOne(a, b) {
var n = b.length;
var r = [1]; //The first term is always 1
r[n] = a * b[n - 1]; //The last term is always a*b_n-1
for (var i = 1; i < n; i++)
r[i] = b[i] + a * b[i - 1];
return r;
}
function solve(c) {
var result = [1, c[0]]; //use result as an accumulator
for (var j = 1; j < c.length; j++)
result = multOne(c[j], result);
return result;
}
console.log(solve([3, 1, 2])); //You don't need to pass 1s either. Just pass the varying coefficients
I needed something similar (to calculate permutations via exponential generating functions) so I wrote a version that works when some terms missing, by using objects as the base instead. I also needed it not not calculate anything over a certain power, so that's also an option
/**
* Calculates the result of multiplying many polynomials together
* #example
* polynomials = [{0: 1, 1: 10, 2:1}, {0: 0, 1: 5, 2: 0, 3: 0.5}]
* limit = 4;
* polynomialMultiplication(polynomials, limit);
* #param {Array.<Object.<number,number>>} polynomials an array of polynomials,
* each expressed as an array of term coefficients
* #param {number} limit the maximum term to calculate
* #returns the resultant polynomial from multiplying all polynomials together
*/
function polynomialMultiplication(polynomials, limit) {
const length = limit ?? polynomials.reduce((sum, poly) => sum += Math.max(...Object.keys(poly)), 0)
// make an object to hold the result in
// which is prepopulated by zeros
const template = { ...Array.from({
length
}).fill(0)
};
return polynomials.reduce((memo, poly, polyIndex) => {
const tempCopy = { ...template};
if (polyIndex === 0) return { ...poly };
for (let termIndex = 0; termIndex < length && termIndex <= Math.max(...Object.keys(poly)); termIndex++) {
for (let memoIndex = 0;
(memoIndex + termIndex) <= length && memoIndex <= Math.max(...Object.keys(memo)); memoIndex++) {
const addition = (memo[memoIndex] ?? 0) * (poly[termIndex] ?? 0);
const copyIndex = memoIndex + termIndex;
tempCopy[copyIndex] = (tempCopy[copyIndex] ?? 0) + addition;
}
}
return tempCopy;
}, template)
}
console.log('(1+3x)(1+x)(1+2x)');
const polynomials = [{
0: 1,
1: 3
}, {
0: 1,
1: 1
}, {
0: 1,
1: 2
}];
console.log(polynomialMultiplication(polynomials));
const morePolynomials = [{
0: 1,
1: 0,
2: 5
}, {
0: 0,
1: 6
}, {
0: 1,
1: 2,
2: 0
}];
console.log('(1+5x^2)(6x)(1+2x) up to x^2')
console.log(polynomialMultiplication(morePolynomials, 2));
I am searching for a way to calculate the Cumulative distribution function in Javascript. Are there classes which have implemented this? Do you have an idea to get this to work? It does not need to be 100% percent accurate but I need a good idea of the value.
http://en.wikipedia.org/wiki/Cumulative_distribution_function
I was able to write my own function with the help of Is there an easily available implementation of erf() for Python? and the knowledge from wikipedia.
The calculation is not 100% correct as it is just a approximation.
function normalcdf(mean, sigma, to)
{
var z = (to-mean)/Math.sqrt(2*sigma*sigma);
var t = 1/(1+0.3275911*Math.abs(z));
var a1 = 0.254829592;
var a2 = -0.284496736;
var a3 = 1.421413741;
var a4 = -1.453152027;
var a5 = 1.061405429;
var erf = 1-(((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*Math.exp(-z*z);
var sign = 1;
if(z < 0)
{
sign = -1;
}
return (1/2)*(1+sign*erf);
}
normalcdf(30, 25, 1.4241); //-> 0.12651187738346226
//wolframalpha.com 0.12651200000000000
The math.js library provides an erf function. Based on a definition found at Wolfram Alpha , the cdfNormalfunction can be implemented like this in Javascript:
const mathjs = require('mathjs')
function cdfNormal (x, mean, standardDeviation) {
return (1 - mathjs.erf((mean - x ) / (Math.sqrt(2) * standardDeviation))) / 2
}
In the node.js console:
> console.log(cdfNormal(5, 30, 25))
> 0.15865525393145707 // Equal to Wolfram Alpha's result at: https://sandbox.open.wolframcloud.com/app/objects/4935c1cb-c245-4d8d-9668-4d353ad714ec#sidebar=compute
This formula will give the correct normal CDF unlike the currently accepted answer
function ncdf(x, mean, std) {
var x = (x - mean) / std
var t = 1 / (1 + .2315419 * Math.abs(x))
var d =.3989423 * Math.exp( -x * x / 2)
var prob = d * t * (.3193815 + t * ( -.3565638 + t * (1.781478 + t * (-1.821256 + t * 1.330274))))
if( x > 0 ) prob = 1 - prob
return prob
}
This answer comes from math.ucla.edu
Due to some needs in the past, i put together an implementation of distribution function in javascript. my library is available at github. You can take a look at https://github.com/chen0040/js-stats
it provides javascript implementation of CDF and inverse CDF for Normal distribution, Student's T distribution, F distribution and Chi-Square Distribution
To use the js lib for obtaining CDF and inverse CDF:
jsstats = require('js-stats');
//====================NORMAL DISTRIBUTION====================//
var mu = 0.0; // mean
var sd = 1.0; // standard deviation
var normal_distribution = new jsstats.NormalDistribution(mu, sd);
var X = 10.0; // point estimate value
var p = normal_distribution.cumulativeProbability(X); // cumulative probability
var p = 0.7; // cumulative probability
var X = normal_distribution.invCumulativeProbability(p); // point estimate value
//====================T DISTRIBUTION====================//
var df = 10; // degrees of freedom for t-distribution
var t_distribution = new jsstats.TDistribution(df);
var t_df = 10.0; // point estimate or test statistic
var p = t_distribution.cumulativeProbability(t_df); // cumulative probability
var p = 0.7;
var t_df = t_distribution.invCumulativeProbability(p); // point estimate or test statistic
//====================F DISTRIBUTION====================//
var df1 = 10; // degrees of freedom for f-distribution
var df2 = 20; // degrees of freedom for f-distribution
var f_distribution = new jsstats.FDistribution(df1, df2);
var F = 10.0; // point estimate or test statistic
var p = f_distribution.cumulativeProbability(F); // cumulative probability
//====================Chi Square DISTRIBUTION====================//
var df = 10; // degrees of freedom for cs-distribution
var cs_distribution = new jsstats.ChiSquareDistribution(df);
var X = 10.0; // point estimate or test statistic
var p = cs_distribution.cumulativeProbability(X); // cumulative probability
This is a brute force implementation, but accurate to more digits of precision. The approximation above is accurate within 10^-7. My implementation runs slower (700 nano-sec) but is accurate within 10^-14. normal(25,30,1.4241) === 0.00022322110257305683, vs wolfram's 0.000223221102572082.
It takes the power series of the standard normal pdf, i.e. the bell-curve, and then integrates the series.
I originally wrote this in C, so I concede some of the optimizations might seem silly in Javascript.
function normal(x, mu, sigma) {
return stdNormal((x-mu)/sigma);
}
function stdNormal(z) {
var j, k, kMax, m, values, total, subtotal, item, z2, z4, a, b;
// Power series is not stable at these extreme tail scenarios
if (z < -6) { return 0; }
if (z > 6) { return 1; }
m = 1; // m(k) == (2**k)/factorial(k)
b = z; // b(k) == z ** (2*k + 1)
z2 = z * z; // cache of z squared
z4 = z2 * z2; // cache of z to the 4th
values = [];
// Compute the power series in groups of two terms.
// This reduces floating point errors because the series
// alternates between positive and negative.
for (k=0; k<100; k+=2) {
a = 2*k + 1;
item = b / (a*m);
item *= (1 - (a*z2)/((a+1)*(a+2)));
values.push(item);
m *= (4*(k+1)*(k+2));
b *= z4;
}
// Add the smallest terms to the total first that
// way we minimize the floating point errors.
total = 0;
for (k=49; k>=0; k--) {
total += values[k];
}
// Multiply total by 1/sqrt(2*PI)
// Then add 0.5 so that stdNormal(0) === 0.5
return 0.5 + 0.3989422804014327 * total;
}
You can also take a look here, it's a scientific calculator implemented in javascript, it includes erf and its author claims no copyright on the implementation.