Let's think about the following situation.
The Go routine creates a byte array where packs a Uint64 number 5577006791947779410 in 8 bytes Big Endian [77, 101, 130, 33, 7, 252, 253, 82].
In JavaScript code I receive these bytes as Uint8Array. We know that JavaScript doesn't currently support Uint64 as safe numeric type and cannot perform bitwise operations on integers larger than 32 bits, so things like buf[0] << 56 will never work.
So what is the process of decoding these bytes directly to numeric string "5577006791947779410"?
P.S. I know there are plenty of libraries for working with big integers in JavaScript, but generally they are huge and provide lots of mathematical operations, which I don't need here. I am looking for a simple modern straightforward solution for just decoding BE-packed Uint64 and Int64 bytes to numeric string. Do you have anything in mind?
EDIT: For converting (U)int64 I would now definitely recommend #LS_DEV's solution. I would use my solution only when having an unknown or larger amount of bytes.
I started with https://stackoverflow.com/a/21668344/3872370 and modified it:
function Int64ToString(bytes, isSigned) {
const isNegative = isSigned && bytes.length > 0 && bytes[0] >= 0x80;
const digits = [];
bytes.forEach((byte, j) => {
if(isNegative)
byte = 0x100 - (j == bytes.length - 1 ? 0 : 1) - byte;
for(let i = 0; byte > 0 || i < digits.length; i++) {
byte += (digits[i] || 0) * 0x100;
digits[i] = byte % 10;
byte = (byte - digits[i]) / 10;
}
});
return (isNegative ? '-' : '') + digits.reverse().join('');
}
const tests = [
{
inp: [77, 101, 130, 33, 7, 252, 253, 82],
signed: false,
expectation: '5577006791947779410'
},
{
inp: [255, 255, 255, 255, 255, 255, 255, 255],
signed: true,
expectation: '-1'
},
];
tests.forEach(test => {
const result = Int64ToString(test.inp, test.signed);
console.log(`${result} ${result !== test.expectation ? '!' : ''}=== ${test.expectation}`);
});
At first the sign gets calculated by checking if the topmost bit is set (bytes[0] > 128). For negative numbers the bits have to be negated (255 - byte) and 1 has to be added to the number (therefore 256 instead of 255 for the last byte).
The basic idea of the forEach loop is to split each byte into its decimal digits (byte % 10 and calculating the overhead (byte - digits[i]) / 10 resp. Math.floor(byte / 10) for the next digit). For the next byte one has to add the shifted result of the last bytes' digits (byte += digits[i] * 256 resp. digits[i] << 8).
That code is optimized for shortness, simplicity and flexibility. If you are working with strings instead of bytes or numbers and don't want to use any libraries it appears that conversion performance doesn't really matter. Otherwise the function could be optimized for performance: Up to four bytes could be treated simultaneously, one only has to replace the 0x100 and 0x80, additionally (with only two byte groups remaining in the case of an (U)Int64) the forEach loop can be unrolled. Grouping the decimal digits probably won't increase performance since the resulting strings would have to be padded with zeros, introducing the need of removing leading zeros in the end result.
Another approach: divide problem in two uint32 to keep calculations manageable.
Consider lower and higher uint32 (l and h). Full number could be written as h*0x100000000+l. Considering decimal, one could also consider lower 9 digits and remaining higher digits (ld and hd): ld=(h*0x100000000+l)%1000000000 and hd=(h*0x100000000+l)/1000000000. With some arithmetic and algebra operators properties, one can break those operation into safe "half" 64bit operations and compose string at ending.
function int64_to_str(a, signed) {
const negative = signed && a[0] >= 128;
const H = 0x100000000, D = 1000000000;
let h = a[3] + a[2] * 0x100 + a[1] * 0x10000 + a[0]*0x1000000;
let l = a[7] + a[6] * 0x100 + a[5] * 0x10000 + a[4]*0x1000000;
if(negative) {
h = H - 1 - h;
l = H - l;
}
const hd = Math.floor(h * H / D + l / D);
const ld = (((h % D) * (H % D)) % D + l) % D;
const ldStr = ld + '';
return (negative ? '-' : '') +
(hd != 0 ? hd + '0'.repeat(9 - ldStr.length) : '') + ldStr;
}
let result = int64_to_str([77, 101, 130, 33, 7, 252, 253, 82], false);
let expectation = '5577006791947779410';
console.log(result + ' ' + (result === expectation ? '===' : '!==') + ' ' + expectation);
result = int64_to_str([255, 255, 255, 255, 255, 255, 255, 255], true);
expectation = '-1';
console.log(result + ' ' + (result === expectation ? '===' : '!==') + ' ' + expectation);
As detailed in the comments that algorithm works even though (h % D) * (H % D) can get larger than Number.MAX_SAFE_INTEGER, because the lost bits were nevertheless zero.
Here is my solution. The general strategy is this:
If number is negative, negate it using 2's complement and add negative sign back in at the end
Represent arbitrary size numbers as LE arrays of digits from 0 to 9
For each byte in the Uint8Array (from most to least significant), multiply running total by 256 and add to it the value of the new byte
To multiply a number by 256, double it 8 times (since 2 ** 8 == 256)
To add two numbers, use the elementary school algorithm:
Start with least significant digit
Add corresponding digits of the two numbers
Resulting digit is the sum mod 10; carry is 1 if the sum is 10 or more, otherwise 0
Continue adding corresponding digits with the carry until we add the most significant digits and carry is 0
A few notes about shorthand:
n1[i] || 0 gets the ith digit of n1. If this is past the end of i, we treat it as a 0 (imagine numbers represented with infinite 0s in front of them). Same with n2.
added > 9 produces a boolean, which is automatically converted to a number (1 if added >= 10, 0 otherwise)
i < n1.length || i < n2.length || carry checks whether there are more digits in either of the addends or the carry is still nonzero
String(b).split('').map(Number).reverse() converts, e.g. 100 to '100', then ['1', '0', '0'], then [1, 0, 0], then [0, 0, 1] so it is represented in LE base-10
result.reverse().join('') converts, e.g. [0, 0, 1] to [1, 0, 0], then '100'
Code:
function add(n1, n2) {
const sum = []
let carry = 0
for (let i = 0; i < n1.length || i < n2.length || carry; i++) {
const added = (n1[i] || 0) + (n2[i] || 0) + carry
sum[i] = added % 10
carry = added > 9 //floor(added / 10)
}
return sum
}
function times256(n1) {
for (let i = 8; i; i--) n1 = add(n1, n1)
return n1
}
function toString(buffer) {
const isNegative = buffer[0] & 128 //check if high bit is set
if (isNegative) { //convert to positive, using 2's complement
buffer = buffer.map(b => ~b) //invert all bits
let i = buffer.length - 1
while (buffer[i] === 255) { //add 1 to the number, carrying if necessary
buffer[i] = 0
i--
}
buffer[i]++
}
const result = buffer.reduce((sum, b) =>
add(
times256(sum), //multiply sum by 256
String(b).split('').map(Number).reverse() //then add b
),
[]
)
const stringResult = result.reverse().join('')
if (isNegative) return '-' + stringResult
else return stringResult
}
This does the UInt64 version - I can't imagine that an interchange is that difficult:
<!DOCTYPE html>
<html>
<body>
<span id='out1'></span>
<br>
<span id='out2'></span>
<br>
<span id='out3'></span>
</body>
<script>
fnl='';
be=[77, 101, 130, 33, 7, 252, 253, 82];
function paddedBinary(n) {
pad='';
sv=128;
while (sv>n) {pad+='0';sv/=2;}
return pad+n.toString(2);
}
for (let i=0;i<8;i++)
fnl+=paddedBinary(be[i]);
out1.textContent=fnl;
dec=new Array(64);
for (let i=0;i<64;i++) dec[i]=new Array(21).fill(0);
function make2s() {
dec[0][0]=1;
for (let i=1;i<64;i++) {
for (let j=0;j<21;j++)
dec[i][j]=2*dec[i-1][j];
for (let j=0;j<21;j++)
if (dec[i][j]>9) {
dec[i][j]-=10;
dec[i][j+1]++;
}
}
}
function int64add(v1,v2) {
var res=new Array(21).fill(0);
for (let i=0;i<21;i++)
res[i]=v1[i]+v2[i];
for (let i=0;i<21;i++)
if (res[i]>9) {
res[i]-=10;
res[i+1]++;
}
return res;
}
make2s();
for (let i=0;i<64;i++)
out2.textContent+=dec[i]+' :: ';
cv=new Array(21).fill(0);
for (let i=0;i<fnl.length;i++)
if (fnl[i]=='1') cv=int64add(cv,dec[63-i]);
out3.textContent=cv;
</script>
</html>
The paddedBinary() function returns a 'full' 8-bit binary number, so we can create 'fnl' as a 64-bit string of the BigEndian.
As JavaScript doesn't do full 64-bit arithmetic, I create the dec[] array to store each power of 2 as individual digits, by doubling each previous digit and smoothing the tens out.
Then all is left is to add the bits that we want, which uses a similar method to smooth out the tens.
(and the answer is given in reverse!)
Related
It is necessary to calculate how many palindrome numbers are on the segment [1, 10 ** n] (n <100).
function f(n) {
let res = 10 ** (parseInt(n / 2) + n % 2);
return res - res / 10;
}
function countPalindromes(n) {
let count = 0;
for (let i = 1; i <= n; i++) {
count += f(i);
}
return count;
}
for (let i = 1; i < 100; i++) {
console.log(i, f(i), countPalindromes(i));
}
Problems:
Over time js returns the result in exponential form, and the result cannot be translated into a string.
Adding such large numbers into a column does not work
And so, how can I count the number of palindromes in the segment [1, 10 ** n] (n <100)???
Let's start with a smaller problem first. How many n digit palindromic numbers exist?
If n is even, let the number be of the form _ _ _ _ (midpoint) _ _ _ _. Now, in the first digit, you can't fill 0, so you have 9 options (1-9). For the second digit to the n/2th digit, you can fill any of the digits in 0-9. Since we're dealing with palindromes, second half of the number will have the same digits as the first half. So, using basic counting, total number of n digit palindromes when n is even is 9*[10*10*...10 (n/2)-1 times] = 9 * 10^((n/2)-1)
When n is odd, the analysis will be similar, but with two differences:
You have 10 choices (0-9) for the midpoint.
There are (n-1)/2 digits on either side of the midpoint, where the first digit can't be 0.
Again, using basic counting, number of palindromes if n is odd = 9 (for first digit) * 10 (for midpoint) * 10^((n/2)-2) (the remaining digits) = 9 * (10^(n/2)-1), similar to the even case.
Thus, number of n digit palindromes = 9 * (10^(n/2)-1) if n is greater than 1. After that, you just have to loop n from 2 to 100 to get the total count of palindromic numbers you need.
I'm not 100% sure, so correct me if I'm wrong. It works, check the code at the bottom.
There should be two kinds of palindromes: ones with odd number of digits and ones with even number of digits. You can go from one palindrome to another (of the same kind) by appending and prepending the same digit to the first one. So...
Single-digit palindromes: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9.
For every single-digit palindrome, we can construct 10 3-digit palindromes:
000, 101, 202, ..., 010, 111, 212, ..., 898, 999.
Similarly, for every 3-digit palindromes, we can construct 10 5-digit palindromes.
10 ** n would have up to n-digit palindromes. Let's count:
10 ** 1 single-digit, 10 ** 2 3-digit, 10 ** 3 5-digit, ..., 10 ** ((n + 1) / 2) n-digit.
The same can be done for double-digit palindromes (00, 11, 22, 33, 44, 55, 66, 77, 88, 99), to iteratively obtain palindromes with an even number of digits.
Then we need to drop the 'illegal' numbers - numbers starting with the digit 0.
It works. Yay!
// Counting without creating the numbers
function getSmartCount(n) {
let smartPalindromeCount = 0;
let startingWithZeroCount = 0;
for (let digits = 1; digits <= n; digits += 2) { // even digits
const power = (digits + 1) / 2;
smartPalindromeCount += 10 ** power;
startingWithZeroCount += 10 ** (power - 1); // the zero-x-zero numbers
}
for (let digits = 2; digits <= n; digits += 2) { // odd digits
const power = digits / 2;
smartPalindromeCount += 10 ** power;
startingWithZeroCount += 10 ** (power - 1); // the zero-x-zero numbers;
}
return smartPalindromeCount - startingWithZeroCount;
}
// Counting by creating the numbers
function getCountIteratively(n) {
const singleDigitPalindromes = '0123456789'.split('');
const doubleDigitPalindromes = singleDigitPalindromes.map(x => x + x);
const palindromes = [...singleDigitPalindromes, ...doubleDigitPalindromes];
const nestedPalindromes = [
singleDigitPalindromes,
doubleDigitPalindromes,
];
for (let digits = 3; digits <= n; digits++) {
const index = digits - 1;
const extraPalindromes = [];
const previousPalindromes = nestedPalindromes[index - 2];
for (const previousPalindrome of previousPalindromes) {
for (const character of singleDigitPalindromes) {
const newPalindrome = character + previousPalindrome + character;
extraPalindromes.push(newPalindrome);
palindromes.push(newPalindrome);
}
}
nestedPalindromes.push(extraPalindromes);
}
const legalPalindromes = palindromes.filter(palindrome => palindrome[0] !== '0');
// console.log(legalPalindromes); // uncomment to see the numbers
return legalPalindromes.length;
}
// Counting by checking every single number
function getCountByBruteForce(n) {
const bruteForcePalindromes = [];
for (let i = 1; i < 10 ** n; i++) {
const leftToRight = i.toString();
const rightToLeft = leftToRight.split('').reverse().join('');
if (leftToRight === rightToLeft) {
bruteForcePalindromes.push(i);
}
}
// console.log(bruteForcePalindromes); // uncomment to see the numbers
return bruteForcePalindromes.length;
}
for (let n = 2; n <= 5; n++) {
console.log({
n,
smart: getSmartCount(n),
byCreatingTheNumbers: getCountIteratively(n),
bruteForce: getCountByBruteForce(n),
});
}
Say I have a bitarray like this
101110101010101010101111111011001100100100001011001111101000101001
The two operations I want to do are:
Read a contiguous subset of the bits into a single bitarray (or integer in the case of JavaScript)
Read a noncontigous subset of bits into a single integer in JavaScript.
So for (1), let's say I want this:
1011[101010101010101011111]11011001100100100001011001111101000101001
== 101010101010101011111
= 1398111 in decimal
Bits 4-25 or so.
For (2), I would like to most optimally select a non-contiguous subset of bits and combine them optimally into a final value.
1011[101]0101010101010[11]1111[1]011001100100100001011001111101000101001
== 101 ++ 11 ++ 1
= 101111
= 47 in decimal
Bits 4-6, 21-22, and 27 or so.
What is the right/optimal way of doing this?
The format is still a bit vague, but here's a way to do something like this. I'm making some assumptions that make the problem easier, namely:
At most 32 bits are extracted at once (so it fits in a Number without weird hacks)
Bits are in an Uint32Array (or compatible storage, as long as it has 32 bits per element)
The least significant bit of the 0th entry of the array is number 0 overall
The bit string represented this way is ... + tobits(array[1]) + tobits(array[0]) for example [ 0, 256 ] represents 00000000000000000000000100000000_00000000000000000000000000000000 (the underscore indicates the boundary between the pieces). Maybe that's the wrong way around, it can be changed but this way is simple.
The ith bit is in the i >> 5-th (aka i / 32 but with integer division) word, at offset i & 31 (aka i % 32) within that word. That's what makes this order simple to work with.
By the first assumption, at most 2 entries/words in the array are spanned by the range, so there are only two cases:
The bottom of the range is in one word and the top is in the next.
The range is wholly contained in a single word. Touching a second word should be avoided, as it might be beyond the end of the array. Also even if the second word can be touched, it wouldn't be as easy to deal with the excess bits because shift counts are taken modulo 32 so high << 32 wouldn't do the trick.
In code (not tested)
function extractRange(bits, begin, end) {
// extract bits [begin .. end],
// begin==end extracts a single bit
var beginIdx = begin >> 5;
var endIdx = end >> 5;
var beginOfs = begin & 31;
var endOfs = end & 31;
var len = end - begin + 1;
var msk = -1 >>> (32 - len);
console.assert(len > 0 && len <= 32);
if (beginIdx == endIdx) {
// begin and end are in the same word
// discard the bits before the begin of the range and mask off the high bits
return ((bits[endIdx] >>> beginOfs) & msk) >>> 0;
}
else {
var low = bits[beginIdx];
var high = bits[endIdx];
// the high word contains the high bits of the result, in its lowest bits
// the low word contains the low bits of the result, in its highest bits:
// xxxxhhhh_llllxxxx
// high word must be shifted left by the number of bits in the low word
var bitsInLow = 32 - beginOfs;
return (((low >>> beginOfs) | (high << bitsInLow)) & msk) >>> 0;
}
}
Examples:
[0xdeadbeef, 0xcafebabe] means that the string is really 0xcafebabedeadbeef (in bits)
extractRange([0xdeadbeef, 0xcafebabe], 0, 31).toString(16) =
deadbeef
extractRange([0xdeadbeef, 0xcafebabe], 4, 35).toString(16) =
edeadbee
extractRange([0xdeadbeef, 0xcafebabe], 8, 39).toString(16) =
bedeadbe
extractRange([0xdeadbeef, 0xcafebabe], 60, 63).toString(16) =
c
extractRange([0xdeadbeef, 0xcafebabe], 30, 33).toString(16) =
b // ...ed... in binary 11101101, taking the middle 4 bits, 1011 = b
For non-contiguous ranges, you could extract every individual range and then append them. I don't think there is a nicer way in general.
A streamlined version, that uses only generators up to the final step, thus avoiding loading whole input into memory.
// for single number
function* gUintToBits(input, dim) {
let i = 0;
while (i < dim) {
yield (input >> (dim - 1 - i++)) & 1;
// or like this, if bits should bo from left to right:
// yield (input >> i++) & 1;
}
}
// for array of numbers
function* gUintArrayToBits(input, dim) {
for (let item of input) {
yield* gUintToBits(item, dim);
}
}
// apply intervals mask directly to generator
function* genWithIntervalsApplied(iterOfBits, intervals) {
// fast, if number of intervals is not so big
const isInsideIntervals = (n, itrvs) => {
for (let intv of itrvs) {
if (n >= intv[0] && n < intv[1]) return true;
}
return false;
};
let index = 0;
for (let item of iterOfBits) {
if (isInsideIntervals(index++, intervals)) {
yield item;
}
}
}
// Finally consume the generator
function extractIntervalsFromUint8Array(input, intervals) {
let result = ''
for (let x of genWithIntervalsApplied(gUintArrayToBits(input, 8), intervals)) {
result += `${x}`
}
return result
}
const result = extractIntervalsFromUint8Array(
[1, 3, 9, 127],
[[8, 16], [24, 32]],
);
const dec = parseInt(result, 2);
console.log(result);
console.log(dec);
output:
// 0000001101111111
// 895
https://leetcode.com/problems/numbers-with-repeated-digits/
Given a positive integer N, return the number of positive integers less than or equal to N that have at least 1 repeated digit.
Example 1:
Input: 20
Output: 1
Explanation: The only positive number (<= 20) with at least 1 repeated digit is 11.
Example 2:
Input: 100
Output: 10
Explanation: The positive numbers (<= 100) with atleast 1 repeated digit are 11, 22, 33, 44, 55, 66, 77, 88, 99, and 100.
Example 3:
Input: 1000
Output: 262
Note:
1 <= N <= 10^9
https://leetcode.com/problems/numbers-with-repeated-digits/discuss/256725/JavaPython-Count-the-Number-Without-Repeated-Digit
I found out this solution has many likes. but it's coded in Java/Python. Anyone can help to use Javascript to code it by the similar logic.
Really appreciate...
python solution: I have no clue how the set() part, how to use Javascript to do it.
def numDupDigitsAtMostN(self, N):
L = map(int, str(N + 1))
res, n = 0, len(L)
def A(m, n):
return 1 if n == 0 else A(m, n - 1) * (m - n + 1)
for i in range(1, n): res += 9 * A(9, i - 1)
s = set()
for i, x in enumerate(L):
for y in range(0 if i else 1, x):
if y not in s:
res += A(9 - i, n - i - 1)
if x in s: break
s.add(x)
return N - res
While I can't translate the python, I think what you need to do is break the integer to array of char, so 100 would become ["1","0","0"], and do a comparison from the 1st element (array[0]) to the last element to see if any of the char is the same.
Here is a quick function that I do.. maybe it will run slower than the python, but it should do the job:
var result = [];
function x(n) {
for (var i = 0; i <= n; i++){
var nStr = i.toString();
var nStrArr = nStr.split('');
if(nStrArr.length > 1){
for(var j = 0; j < nStrArr.length; j++){
var idx = nStrArr.findIndex(x => x === nStrArr[j]);
if(idx >= 0 && idx !== j){
result.push(parseInt(i));
break;
}
}
}
}
console.log(result); //should list all the numbers with at least 1 repeated digit
console.log(result.length); //length of the array
}
x(1000);
So I'm very familiar with the good old
Math.floor(Math.random() * (max - min + 1)) + min;
and this works very nicely with small numbers, however when numbers get larger this quickly becomes biased and only returns numbers one zero below it (for ex. a random number between 0 and 1e100 will almost always (every time I've tested, so several billion times since I used a for loop to generate lots of numbers) return [x]e99). And yes I waited the long time for the program to generate that many numbers, twice. By this point, it would be safe to assume that the output is always [x]e99 for all practical uses.
So next I tried this
Math.floor(Math.pow(max - min + 1, Math.random())) + min;
and while that works perfectly for huge ranges it breaks for small ones. So my question is how can do both - be able to generate both small and large random numbers without any bias (or minimal bias to the point of not being noticeable)?
Note: I'm using Decimal.js to handle numbers in the range -1e2043 < x < 1e2043 but since it is the same algorithm I displayed the vanilla JavaScript forms above to prevent confusion. I can take a vanilla answer and convert it to Decimal.js without any trouble so feel free to answer with either.
Note #2: I want to even out the odds of getting large numbers. For example 1e33 should have the same odds as 1e90 in my 0-1e100 example. But at the same time I need to support smaller numbers and ranges.
Your Problem is Precision. That's the reason you use Decimal.js in the first place. Like every other Number in JS, Math.random() supports only 53 bit of precision (Some browser even used to create only the upper 32bit of randomness). But your value 1e100 would need 333 bit of precision. So the lower 280 bit (~75 decimal places out of 100) are discarded in your formula.
But Decimal.js provides a random() method. Why don't you use that one?
function random(min, max){
var delta = new Decimal(max).sub(min);
return Decimal.random( +delta.log(10) ).mul(delta).add(min);
}
Another "problem" why you get so many values with e+99 is probability. For the range 0 .. 1e100 the probabilities to get some exponent are
e+99 => 90%,
e+98 => 9%,
e+97 => 0.9%,
e+96 => 0.09%,
e+95 => 0.009%,
e+94 => 0.0009%,
e+93 => 0.00009%,
e+92 => 0.000009%,
e+91 => 0.0000009%,
e+90 => 0.00000009%,
and so on
So if you generate ten billion numbers, statistically you'll get a single value up to 1e+90. That are the odds.
I want to even out those odds for large numbers. 1e33 should have the same odds as 1e90 for example
OK, then let's generate a 10random in the range min ... max.
function random2(min, max){
var a = +Decimal.log10(min),
b = +Decimal.log10(max);
//trying to deal with zero-values.
if(a === -Infinity && b === -Infinity) return 0; //a random value between 0 and 0 ;)
if(a === -Infinity) a = Math.min(0, b-53);
if(b === -Infinity) b = Math.min(0, a-53);
return Decimal.pow(10, Decimal.random(Math.abs(b-a)).mul(b-a).add(a) );
}
now the exponents are pretty much uniformly distributed, but the values are a bit skewed. Because 101 to 101.5 10 .. 33 has the same probability as 101.5 to 102 34 .. 100
The issue with Math.random() * Math.pow(10, Math.floor(Math.random() * 100)); at smaller numbers is that random ranges [0, 1), meaning that when calculating the exponent separately one needs to make sure the prefix ranges [1, 10). Otherwise you want to calculate a number in [1eX, 1eX+1) but have e.g. 0.1 as prefix and end up in 1eX-1. Here is an example, maxExp is not 100 but 10 for readability of the output but easily adjustable.
let maxExp = 10;
function differentDistributionRandom() {
let exp = Math.floor(Math.random() * (maxExp + 1)) - 1;
if (exp < 0) return Math.random();
else return (Math.random() * 9 + 1) * Math.pow(10, exp);
}
let counts = new Array(maxExp + 1).fill(0).map(e => []);
for (let i = 0; i < (maxExp + 1) * 1000; i++) {
let x = differentDistributionRandom();
counts[Math.max(0, Math.floor(Math.log10(x)) + 1)].push(x);
}
counts.forEach((e, i) => {
console.log(`E: ${i - 1 < 0 ? "<0" : i - 1}, amount: ${e.length}, example: ${Number.isNaN(e[0]) ? "none" : e[0]}`);
});
You might see the category <0 here which is hopefully what you wanted (the cutoff point is arbitrary, here [0, 1) has the same probability as [1, 10) as [10, 100) and so on, but [0.01, 0.1) is again less likely than [0.1, 1))
If you didn't insist on base 10 you could reinterpret the pseudorandom bits from two Math.random calls as Float64 which would give a similar distribution, base 2:
function exponentDistribution() {
let bits = [Math.random(), Math.random()];
let buffer = new ArrayBuffer(24);
let view = new DataView(buffer);
view.setFloat64(8, bits[0]);
view.setFloat64(16, bits[1]);
//alternatively all at once with setInt32
for (let i = 0; i < 4; i++) {
view.setInt8(i, view.getInt8(12 + i));
view.setInt8(i + 4, view.getInt8(20 + i));
}
return Math.abs(view.getFloat64(0));
}
let counts = new Array(11).fill(0).map(e => []);
for (let i = 0; i < (1 << 11) * 100; i++) {
let x = exponentDistribution();
let exp = Math.floor(Math.log2(x));
if (exp >= -5 && exp <= 5) {
counts[exp + 5].push(x);
}
}
counts.forEach((e, i) => {
console.log(`E: ${i - 5}, amount: ${e.length}, example: ${Number.isNaN(e[0]) ? "none" : e[0]}`);
});
This one obviously is bounded by the precision ends of Float64, there are some uneven parts of the distribution due to some details of IEEE754, e.g. denorms/subnorms and i did not take care of special values like Infinity. It is rather to be seen as a fun extra, a reminder of the distribution of float values. Note that the loop does 1 << 11 (2048) times a number iterations, which is about the exponent range of Float64, 11 bit, [-1022, 1023]. That's why in the example each bucket gets approximately said number (100) hits.
You can create the number in increments less than Number.MAX_SAFE_INTEGER, then concatenate the generated numbers to a single string
const r = () => Math.floor(Math.random() * Number.MAX_SAFE_INTEGER);
let N = "";
for (let i = 0; i < 10; i++) N += r();
document.body.appendChild(document.createTextNode(N));
console.log(/e/.test(N));
It's important to note that I'm not looking for a rounding function. I am looking for a function that returns the number of decimal places in an arbitrary number's simplified decimal representation. That is, we have the following:
decimalPlaces(5555.0); //=> 0
decimalPlaces(5555); //=> 0
decimalPlaces(555.5); //=> 1
decimalPlaces(555.50); //=> 1
decimalPlaces(0.0000005); //=> 7
decimalPlaces(5e-7); //=> 7
decimalPlaces(0.00000055); //=> 8
decimalPlaces(5.5e-7); //=> 8
My first instinct was to use the string representations: split on '.', then on 'e-', and do the math, like so (the example is verbose):
function decimalPlaces(number) {
var parts = number.toString().split('.', 2),
integerPart = parts[0],
decimalPart = parts[1],
exponentPart;
if (integerPart.charAt(0) === '-') {
integerPart = integerPart.substring(1);
}
if (decimalPart !== undefined) {
parts = decimalPart.split('e-', 2);
decimalPart = parts[0];
}
else {
parts = integerPart.split('e-', 2);
integerPart = parts[0];
}
exponentPart = parts[1];
if (exponentPart !== undefined) {
return integerPart.length +
(decimalPart !== undefined ? decimalPart.length : 0) - 1 +
parseInt(exponentPart);
}
else {
return decimalPart !== undefined ? decimalPart.length : 0;
}
}
For my examples above, this function works. However, I'm not satisfied until I've tested every possible value, so I busted out Number.MIN_VALUE.
Number.MIN_VALUE; //=> 5e-324
decimalPlaces(Number.MIN_VALUE); //=> 324
Number.MIN_VALUE * 100; //=> 4.94e-322
decimalPlaces(Number.MIN_VALUE * 100); //=> 324
This looked reasonable at first, but then on a double take I realized that 5e-324 * 10 should be 5e-323! And then it hit me: I'm dealing with the effects of quantization of very small numbers. Not only are numbers being quantized before storage; additionally, some numbers stored in binary have unreasonably long decimal representations, so their decimal representations are being truncated. This is unfortunate for me, because it means that I can't get at their true decimal precision using their string representations.
So I come to you, StackOverflow community. Does anyone among you know a reliable way to get at a number's true post-decimal-point precision?
The purpose of this function, should anyone ask, is for use in another function that converts a float into a simplified fraction (that is, it returns the relatively coprime integer numerator and nonzero natural denominator). The only missing piece in this outer function is a reliable way to determine the number of decimal places in the float so I can multiply it by the appropriate power of 10. Hopefully I'm overthinking it.
Historical note: the comment thread below may refer to first and second implementations. I swapped the order in September 2017 since leading with a buggy implementation caused confusion.
If you want something that maps "0.1e-100" to 101, then you can try something like
function decimalPlaces(n) {
// Make sure it is a number and use the builtin number -> string.
var s = "" + (+n);
// Pull out the fraction and the exponent.
var match = /(?:\.(\d+))?(?:[eE]([+\-]?\d+))?$/.exec(s);
// NaN or Infinity or integer.
// We arbitrarily decide that Infinity is integral.
if (!match) { return 0; }
// Count the number of digits in the fraction and subtract the
// exponent to simulate moving the decimal point left by exponent places.
// 1.234e+2 has 1 fraction digit and '234'.length - 2 == 1
// 1.234e-2 has 5 fraction digit and '234'.length - -2 == 5
return Math.max(
0, // lower limit.
(match[1] == '0' ? 0 : (match[1] || '').length) // fraction length
- (match[2] || 0)); // exponent
}
According to the spec, any solution based on the builtin number->string conversion can only be accurate to 21 places beyond the exponent.
9.8.1 ToString Applied to the Number Type
Otherwise, let n, k, and s be integers such that k ≥ 1, 10k−1 ≤ s < 10k, the Number value for s × 10n−k is m, and k is as small as possible. Note that k is the number of digits in the decimal representation of s, that s is not divisible by 10, and that the least significant digit of s is not necessarily uniquely determined by these criteria.
If k ≤ n ≤ 21, return the String consisting of the k digits of the decimal representation of s (in order, with no leading zeroes), followed by n−k occurrences of the character ‘0’.
If 0 < n ≤ 21, return the String consisting of the most significant n digits of the decimal representation of s, followed by a decimal point ‘.’, followed by the remaining k−n digits of the decimal representation of s.
If −6 < n ≤ 0, return the String consisting of the character ‘0’, followed by a decimal point ‘.’, followed by −n occurrences of the character ‘0’, followed by the k digits of the decimal representation of s.
Historical note: The implementation below is problematic. I leave it here as context for the comment thread.
Based on the definition of Number.prototype.toFixed, it seems like the following should work but due to the IEEE-754 representation of double values, certain numbers will produce false results. For example, decimalPlaces(0.123) will return 20.
function decimalPlaces(number) {
// toFixed produces a fixed representation accurate to 20 decimal places
// without an exponent.
// The ^-?\d*\. strips off any sign, integer portion, and decimal point
// leaving only the decimal fraction.
// The 0+$ strips off any trailing zeroes.
return ((+number).toFixed(20)).replace(/^-?\d*\.?|0+$/g, '').length;
}
// The OP's examples:
console.log(decimalPlaces(5555.0)); // 0
console.log(decimalPlaces(5555)); // 0
console.log(decimalPlaces(555.5)); // 1
console.log(decimalPlaces(555.50)); // 1
console.log(decimalPlaces(0.0000005)); // 7
console.log(decimalPlaces(5e-7)); // 7
console.log(decimalPlaces(0.00000055)); // 8
console.log(decimalPlaces(5e-8)); // 8
console.log(decimalPlaces(0.123)); // 20 (!)
Well, I use a solution based on the fact that if you multiply a floating-point number by the right power of 10, you get an integer.
For instance, if you multiply 3.14 * 10 ^ 2, you get 314 (an integer). The exponent represents then the number of decimals the floating-point number has.
So, I thought that if I gradually multiply a floating-point by increasing powers of 10, you eventually arrive to the solution.
let decimalPlaces = function () {
function isInt(n) {
return typeof n === 'number' &&
parseFloat(n) == parseInt(n, 10) && !isNaN(n);
}
return function (n) {
const a = Math.abs(n);
let c = a, count = 1;
while (!isInt(c) && isFinite(c)) {
c = a * Math.pow(10, count++);
}
return count - 1;
};
}();
for (const x of [
0.0028, 0.0029, 0.0408,
0, 1.0, 1.00, 0.123, 1e-3,
3.14, 2.e-3, 2.e-14, -3.14e-21,
5555.0, 5555, 555.5, 555.50, 0.0000005, 5e-7, 0.00000055, 5e-8,
0.000006, 0.0000007,
0.123, 0.121, 0.1215
]) console.log(x, '->', decimalPlaces(x));
2017 Update
Here's a simplified version based on Edwin's answer. It has a test suite and returns the correct number of decimals for corner cases including NaN, Infinity, exponent notations, and numbers with problematic representations of their successive fractions, such as 0.0029 or 0.0408. This covers the vast majority of financial applications, where 0.0408 having 4 decimals (not 6) is more important than 3.14e-21 having 23.
function decimalPlaces(n) {
function hasFraction(n) {
return Math.abs(Math.round(n) - n) > 1e-10;
}
let count = 0;
// multiply by increasing powers of 10 until the fractional part is ~ 0
while (hasFraction(n * (10 ** count)) && isFinite(10 ** count))
count++;
return count;
}
for (const x of [
0.0028, 0.0029, 0.0408, 0.1584, 4.3573, // corner cases against Edwin's answer
11.6894,
0, 1.0, 1.00, 0.123, 1e-3, -1e2, -1e-2, -0.1,
NaN, 1E500, Infinity, Math.PI, 1/3,
3.14, 2.e-3, 2.e-14,
1e-9, // 9
1e-10, // should be 10, but is below the precision limit
-3.14e-13, // 15
3.e-13, // 13
3.e-14, // should be 14, but is below the precision limit
123.12345678901234567890, // 14, the precision limit
5555.0, 5555, 555.5, 555.50, 0.0000005, 5e-7, 0.00000055, 5e-8,
0.000006, 0.0000007,
0.123, 0.121, 0.1215
]) console.log(x, '->', decimalPlaces(x));
The tradeoff is that the method is limited to maximum 10 guaranteed decimals. It may return more decimals correctly, but don't rely on that. Numbers smaller than 1e-10 may be considered zero, and the function will return 0. That particular value was chosen to solve correctly the 11.6894 corner case, for which the simple method of multiplying by powers of 10 fails (it returns 5 instead of 4).
However, this is the 5th corner case I've discovered, after 0.0029, 0.0408, 0.1584 and 4.3573. After each, I had to reduce the precision by one decimal. I don't know if there are other numbers with less than 10 decimals for which this function may return an incorrect number of decimals. To be on the safe side, look for an arbitrary precision library.
Note that converting to string and splitting by . is only a solution for up to 7 decimals. String(0.0000007) === "7e-7". Or maybe even less? Floating point representation isn't intuitive.
Simple "One-Liner":
If what you're doing requires more than 16 digit precision, then this is not for you.
This 'one-liner' will work fine for the other 99.99999999999999% of the time. (Yes, even that number.)😜
function numDec(n){return n%1==0?0:(""+n).length-(""+n).lastIndexOf(".")-1}
Demo in the snippet:
function numDec(n){return n%1==0?0:(""+n).length-(""+n).lastIndexOf(".")-1}
setInterval(function(){
n=Math.random()*10000000000;
document.body.innerHTML=n+' ← '+numDec(n)+' decimal places';
},777);
body{font-size:123.4567890%; font-family:'fira code';}
More info:
mozilla.com : .lastIndexOf()
mozilla.com : .length
this works for numbers smaller than e-17 :
function decimalPlaces(n){
var a;
return (a=(n.toString().charAt(0)=='-'?n-1:n+1).toString().replace(/^-?[0-9]+\.?([0-9]+)$/,'$1').length)>=1?a:0;
}
This works for me
const decimalPlaces = value.substring(value.indexOf('.') + 1).length;
This method expects the value to be a standard number.
Not only are numbers being quantized before storage; additionally, some numbers stored in binary have unreasonably long decimal representations, so their decimal representations are being truncated.
JavaScript represents numbers using IEEE-754 double-precision (64 bit) format. As I understand it this gives you 53 bits precision, or fifteen to sixteen decimal digits.
So for any number with more digits you just get an approximation. There are some libraries around to handle large numbers with more precision, including those mentioned in this thread.
2021 Update
An optimized version of Mike Samuel handling scientific and non-scientific representation.
// Helper function to extract the number of decimal assuming the
// input is a number (either as a number of a stringified number)
// Note: if a stringified number has an exponent, it will always be
// '<x>e+123' or '<x>e-123' or '<x.dd...d>e+123' or '<x.dd...d>e-123'.
// No need to check for '<x>e123', '<x>E+123', '<x>E-123' etc.
const _numDecimals = v => {
const [i, p, d, e, n] = v.toString().split(/(\.|e[\-+])/g);
const f = e === 'e-';
return ((p === '.' && (!e || f) && d.length) + (f && parseInt(n)))
|| (p === 'e-' && parseInt(d))
|| 0;
}
// But if you want to be extra safe...you can replace _numDecimals
// with this:
const _numSafeDecimals = v => {
let [i, p, d, e, n] = v.toString().split(/(\.|[eE][\-+])/g);
e = e.toLowerCase();
const f = e === 'e-';
return ((p === '.' && (!e || f) && d.length) + (f && parseInt(n)))
|| (p.toLowerCase() === 'e-' && parseInt(d))
|| 0;
}
// Augmenting Number proto.
Number.prototype.numDecimals = function () {
return (this % 1 !== 0 && _numDecimals(this)) || 0;
}
// Independent function.
const numDecimals = num => (
(!isNaN(num) && num % 1 !== 0 && _numDecimals(num)) || 0
);
// Tests:
const test = n => (
console.log('Number of decimals of', n, '=', n.numDecimals())
);
test(1.234e+2); // --> 1
test(0.123); // ---> 3
test(123.123); // ---> 3
test(0.000123); // ---> 6
test(1e-20); // --> 20
test(1.2e-20); // --> 21
test(1.23E-20); // --> 22
test(1.23456789E-20); // --> 28
test(10); // --> 0
test(1.2e20); // --> 0
test(1.2e+20); // --> 0
test(1.2E100); // --> 0
test(Infinity); // --> 0
test(-1.234e+2); // --> 1
test(-0.123); // ---> 3
test(-123.123); // ---> 3
test(-0.000123); // ---> 6
test(-1e-20); // --> 20
test(-1.2e-20); // --> 21
test(-1.23E-20); // --> 22
test(-1.23456789E-20); // --> 28
test(-10); // --> 0
test(-1.2e20); // --> 0
test(-1.2e+20); // --> 0
test(-1.2E100); // --> 0
test(-Infinity); // --> 0
I use this...
45555.54545456?.toString().split(".")[1]?.length
value null check before converting it to string, then convert it to string, split it, get the decimal part, add nullcheck, and get length, if there is number with decimals, you get the amount of those decimals, else you get undefined.
//console.log("should give error:", null.toString().split(".")[1]?.length);
console.log("should give undefined:", null?.toString().split(".")[1]?.length);
//console.log("should give error:", 45555.toString().split(".")[1]?.length);
console.log("should give undefined:", 45555?.toString().split(".")[1]?.length);
//console.log("should give error:", 45555?.toString().split(".")[1].length);
console.log("should give amount of decimals:", 45555.54545456?.toString().split(".")[1]?.length);
console.log("should return without decimals when undefined:", 45555.54545456.toFixed(undefined));
An optimized version of nick answer.
The function requires that n is a string. This function gets the decimal even if there all 0, like 1.00 -> 2 decimals.
function getDecimalPlaces(n) {
var i = n.indexOf($DecimalSeparator)
return i > 0 ? n.length - i - 1 : 0
}
console.log(getDecimalPlaces("5555.0")); // 1
console.log(getDecimalPlaces("5555")); // 0
console.log(getDecimalPlaces("555.5")); // 1
console.log(getDecimalPlaces("555.50")); // 2
console.log(getDecimalPlaces("0.0000005")); // 7
console.log(getDecimalPlaces("0.00000055")); // 8
console.log(getDecimalPlaces("0.00005500")); // 8
If you have very small values, use the below code:
Number.prototype.countDecimals = function () {
if (Math.floor(this.valueOf()) === this.valueOf()) return 0;
var str = this.toString();
if (str.indexOf(".") !== -1 && str.indexOf("-") !== -1) {
return parseInt(str.split("-")[1])+str.split("-")[0].split(".")[1].length-1
} else if (str.indexOf(".") !== -1) {
return str.split(".")[1].length || 0;
}
return str.split("-")[1] || 0;
}
var num = 10
console.log(num.countDecimals()) //0
num = 1.23
console.log(num.countDecimals()) //2
num = 1.454689451
console.log(num.countDecimals()) //9
num = 1.234212344244323e-7
console.log(num.countDecimals()) //22
Based on gion_13 answer I came up with this:
function decimalPlaces(n){
let result= /^-?[0-9]+\.([0-9]+)$/.exec(n);
return result === null ? 0 : result[1].length;
}
for (const x of [
0, 1.0, 1.00, 0.123, 1e-3, 3.14, 2.e-3, -3.14e-21,
5555.0, 5555, 555.5, 555.50, 0.0000005, 5e-7, 0.00000055, 5e-8,
0.000006, 0.0000007,
0.123, 0.121, 0.1215
]) console.log(x, '->', decimalPlaces(x));
It fixes the returning 1 when there are no decimal places. As far as I can tell this works without errors.