Fast nearest power of 2 in JavaScript? - javascript

Is there any faster alternative to the following expression:
Math.pow(2,Math.floor(Math.log(x)/Math.log(2)))
That is, taking the closest (smaller) integer power of 2 of a double? I have such expression in a inner loop. I suspect it could be much faster, considering one could just take the mantissa from the IEEE 754 representation of the double.

Making use of ES6's Math.clz32(n) to count leading zeros of a 32-bit integer:
// Compute nearest lower power of 2 for n in [1, 2**31-1]:
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
// Examples:
console.log(nearestPowerOf2(9)); // 8
console.log(nearestPowerOf2(33)); // 32

Here's another alternative, with benchmarks. While both seems to be comparable, I like being able to floor or ceil.
function blpo2(x) {
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
x = x | (x >> 32);
return x - (x >> 1);
}
function pow2floor(v) {
var p = 1;
while (v >>= 1) {
p <<= 1;
}
return p;
}
function pow2ceil(v) {
var p = 2;
while (v >>= 1) {
p <<= 1;
}
return p;
}
function MATHpow2(v) {
return Math.pow(2, Math.floor(Math.log(v) / Math.log(2)))
}
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
function runner(fn, val) {
var res;
var st = new Date().getTime()
for (var i = 0; i < 100000000; i++) {
fn(val);
}
return (new Date().getTime() - st)
}
var source = 300000;
var a = runner(pow2floor, source);
console.log("\n--- pow2floor ---");
console.log(" result: " + pow2floor(source));
console.log(" time: " + a + " ms");
var b = runner(MATHpow2, source);
console.log("\n--- MATHpow2 ---");
console.log(" result: " + MATHpow2(source));
console.log(" time: " + b + " ms");
var b = runner(nearestPowerOf2, source);
console.log("\n--- nearestPowerOf2 ---");
console.log(" result: " + nearestPowerOf2(source));
console.log(" time: " + b + " ms");
var b = runner(blpo2, source);
console.log("\n--- blpo2 ---");
console.log(" result: " + blpo2(source));
console.log(" time: " + b + " ms");
// pow2floor: 1631 ms
// MATHpow2: 13942 ms
// nearestPowerOf2: 937 ms
// blpo2 : 919 ms **WINNER**

Here is also a branchless 32 bit version which is the fastest (9x) (on cellphones even more!) as of now.
It can also be scaled to 64 or 128 bits adding 1 or two lines:
x = x | (x >> 64);
x = x | (x >> 128);
on my computer:
2097152,blpo2: 118 ms **FASTEST**
2097152,nearestPowerOf2: 973 ms
2097152,pow2floor: 2033 ms
on my phone:
2097152,blpo2: 216 ms **FASTEST**
2097152,nearestPowerOf2: 1259 ms
2097152,pow2floor: 2904 ms
function blpo2(x) {
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
x = x | (x >> 32);
return x - (x >> 1);
}
function pow2floor(v) {
var p = 1;
while (v >>= 1) {
p <<= 1;
}
return p;
}
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
function runner(fn, val) {
var res;
var st = new Date().getTime()
for (var i = 0; i < 100000000; i++) {
res = fn(val);
}
dt = new Date().getTime() - st;
return res + "," + fn.name + ": " + dt + " ms"
}
var source = 3000000;
console.log(runner(blpo2, source), "**FASTEST**")
console.log(runner(nearestPowerOf2, source))
console.log(runner(pow2floor, source))

Unfortunately, you would need an equivalent of the C function frexp. The best I've been able to find is in JSFiddle, and its code uses Math.pow.
There are a couple of alternatives you could benchmark, using real data, along with your current attempt:
Starting at 1.0, multiply repeatedly by 2.0 until it is greater than or equal to the input, then multiply by 0.5 until it is less than or equal to the input. You would need special handling for values at the ends of the double range.
Store an ascending value array of all the exact powers of two in the double range, and do a binary search.
The first one is likely to be fastest if your data is typically close to 1.0. The second one requires up to 11 conditional branches.

Without ES6...
x=Math.floor(Math.random()*500000); //for example
nearestpowerof2=2**(x.toString(2).length-1);
console.log(x,">>>",nearestpowerof2);
In other words: the result is 2 to the power of the length of the binary representation of the number subtracted by 1.

And this is another.
function nP2(n) {
return 1 << Math.log2(n);
}
console.log(nP2(345367));
console.log(nP2(34536));
console.log(nP2(3453));
console.log(nP2(345));
console.log(nP2(34));

And another way (this one is slow but it's fun to code recursive ones):
function calc(n, c) {
c = c || 0;
n = n >> 1;
return (n > 0) ? calc(n, c + 1) : 2 ** c;
}
console.log(calc(345367));
console.log(calc(34536));
console.log(calc(3453));
console.log(calc(345));
console.log(calc(34));

Oh and I forgot the one-liner:
a=3764537465
console.log(2**~~Math.log2(a))
In other words, here we raise 2 to the power of the rounded logarithm in base 2 of the number. But alas, this is 140 times slower than the winner: blpo2 https://stackoverflow.com/a/74916422/236062

Related

javascript program to convert binary to decimal and back

I have tried this
function binToDec(num) {
let dec = 0;
for(let i = 0; i < num.length; i++) {
if(num[num.length - (i + 1)] === '1') {
dec += 2 ** i;
}
}
return dec;
}
console.log(binToDec('1010'));
this code is not mine and it works but i want to know how it converts the binary number to decimal and it will be very helpful is you could tell me another way to do it.
I have also tried this
function binToDec(num) {
let bin = parseInt(num, 2);
return bin;
}
console.log(binToDec(1010));
I know this also work but i am not looking for this answer.
thank you for your help.
I just starts with the last character of the string and adds the value of this position to the result.
string dec
------ -------
1010 0
0 0
1 0 + 2
0 2
1 2 + 8
------ ------
10
function binToDec(num) {
let dec = 0;
for (let i = 0; i < num.length; i++) {
if (num[num.length - (i + 1)] === '1') {
dec += 2 ** i;
}
}
return dec;
}
console.log(binToDec('1010')); // 10
Another way is to start with the left side of the sting and
multiply the converted value by the base (2) and
add the value of the string.
The result is now the converted number. This works for all bases, as long as the value at the index is converted to a number.
function binToDec(num) {
let dec = 0;
for (let i = 0; i < num.length; i++) {
dec *= 2;
dec += +num[i];
}
return dec;
}
console.log(binToDec('1101')); // 13
Explanation
Think of how base 10 works.
909 = 900 + 9
= (9 * 100) + (0 * 10) + (9 * 1)
= (9 * 10**2) + (0 * 10**1) + (9 * 10**0)
As you can see, a natural number in base 10 can be seen as a sum where each term is in the form of:
digit * base**digit_position
This is true for any base:
base 2 : 0b101 = (0b1 * 2**2) + (0b0 * 2**1) + (0b1 * 2**0)
base 16 : 0xF0F = (0xF * 16**2) + (0x0 * 16**1) + (0xF * 16**0)
Therefore, here is a possible abstraction of a natural number:
function natural_number (base, digits) {
var sum = 0;
for (var i = 0; i < digits.length; i++) {
digit = digits[i];
digit_position = digits.length - (i + 1);
sum += digit * base**digit_position;
}
return sum;
}
> | natural_number(2, [1, 0, 1]) // 1 * 2**2 + 1 * 2**0
< | 5
> | natural_number(10, [1, 0, 1]) // 1 * 10**2 + 1 * 10**0
< | 101
> | natural_number(16, [1, 0, 1]) // 1 * 16**2 + 1 * 16**0
< | 257
Your own function takes only binary numbers (base 2). In this case digit can be either 0 or 1, that's all. We know that it's useless to multiply something by 0 or 1, so the addition can be replaced with:
if (digit === 1) {
sum += 2**digit_position;
}
Which is the equivalent of:
if (num[num.length - (i + 1)] === '1') {
dec += 2 ** i;
}
Do you get it? :-)
Alternative
You don't feel confortable with the exponentiation operator (**)? There is a workaround. Did you ever notice that multiplying a number by 10 is nothing more than shifting its digits one time to the left?
909 * 10 = 9090
Actually, shifting a number to the left boils down to multiplying this number by its base:
number *= base
This is true for any base:
base 2 : 0b11 * 2 = 0b110
base 16 : 0xBEE * 16 + 0xF = 0xBEE0 + 0xF = 0xBEEF
Based on this, we can build an algorithm to convert an array of digits into a number. A trace of execution with [9,0,9] in base 10 as input would look like this:
init | 0 | n = 0
add 9 | 9 | n += 9
shift | 90 | n *= 10
add 0 | 90 | n += 0
shift | 900 | n *= 10
add 9 | 909 | n += 9
Here is a possible implementation:
function natural_number (base, digits) {
var n = 0;
for (var i = 0; i < digits.length; i++) {
n += digits[i];
if (i + 1 < digits.length) {
n *= base;
}
}
return n;
}
Of course this function works the same as before, and there is a good reason for that. Indeed, unroll the for loop that computes [9,0,9] in base 10, you get this:
return ((0 + 9) * 10 + 0) * 10 + 9;
Then expand this expression:
((0 + 9) * 10 + 0) * 10 + 9
= (0 + 9) * 10 * 10 + 0 * 10 + 9
= 9 * 10 * 10 + 0 * 10 + 9
= 9 * 10**2 + 0 * 10**1 + 9 * 10**0
Do you recognize the equation discussed earlier? :-)
Bonus
Reverse function:
function explode_natural_number (base, number) {
var remainder, exploded = [];
while (number) {
remainder = number % base;
exploded.unshift(remainder);
number = (number - remainder) / base;
}
return exploded.length ? exploded : [0];
}
> | explode_natural_number(2, 5)
< | [1, 0, 1]
> | explode_natural_number(3, 5) // base 3 (5 = 1 * 3**1 + 2 * 3**0) :-)
< | [1, 2]
> | explode_natural_number(16, natural_number(16, [11, 14, 14, 15])) // 0xBEEF
< | [11, 14, 14, 15]
String to number and number to string:
function parse_natural_number (number, base) {
var ZERO = 48, A = 65; // ASCII codes
return natural_number(base, number.split("").map(function (digit) {
return digit.toUpperCase().charCodeAt(0);
}).map(function (code) {
return code - (code < A ? ZERO : A - 10);
}));
}
function stringify_natural_number (number, base) {
var ZERO = 48, A = 65; // ASCII codes
return String.fromCharCode.apply(
String, explode_natural_number(base, number).map(function (digit) {
return digit + (digit < 10 ? ZERO : A - 10);
})
);
}
> | stringify_natural_number(parse_natural_number("48879", 10), 16)
< | "BEEF"
> | parse_natural_number("10", 8)
< | 8
More levels of abstraction for convenience:
function bin_to_dec (number) {
return parse_natural_number(number, 2);
}
function oct_to_dec (number) {
return parse_natural_number(number, 8);
}
function dec_to_dec (number) {
return parse_natural_number(number, 10);
}
function hex_to_dec (number) {
return parse_natural_number(number, 16);
}
function num_to_dec (number) {
switch (number[0] + number[1]) {
case "0b" : return bin_to_dec(number.slice(2));
case "0x" : return hex_to_dec(number.slice(2));
default : switch (number[0]) {
case "0" : return oct_to_dec(number.slice(1));
default : return dec_to_dec(number);
}
}
}
> | oct_to_dec("10")
< | 8
> | num_to_dec("010")
< | 8
> | 010 // :-)
< | 8
function dec_to_bin (number) {
return stringify_natural_number(number, 2);
}
> | dec_to_bin(8)
< | "1000"

I'm trying to port a 1D perlin noise tutorial on C++

I'm trying to port a 1D perlin noise tutorial on C++ using SFMl lib : (tutorial link in javascript) https://codepen.io/Tobsta/post/procedural-generation-part-1-1d-perlin-noise
However this doesn't work, i don't get any error but this is what i get : https://i.imgur.com/2tAPhsH.png .
basically a straight line
And this is what i should get : https://i.imgur.com/GPnfsuK.png
Here's the ported code from the above link :
TerrainBorder constructor:
TerrainBorder::TerrainBorder(sf::RenderWindow &window) {
M = 4294967296;
A = 1664525;
C = 1;
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> dist(0, M);
Z = floor(dist(rng) * M);
x = 0;
y = window.getSize().y / 2.0f;
amp = 100;
wl = 100;
fq = 1.0f / wl;
a = rand();
b = rand();
ar = sf::VertexArray(sf::Points);
}
Functions:
double TerrainBorder::rand()
{
Z = (A * Z + C) % M;
return Z / M - 0.5;
}
double TerrainBorder::interpolate(double pa, double pb , double px) {
double ft = px * PI,
f = (1 - cos(ft)) * 0.5;
return pa * (1 - f) + pb * f;
}
void TerrainBorder::drawPoints(sf::RenderWindow &window) {
while (x < window.getSize().x) {
if (static_cast<int> (x) % wl == 0) {
a = b;
b = rand();
y = window.getSize().y / 2 + a * amp;
} else {
y = window.getSize().y / 2 + interpolate(a, b, static_cast<int> (x)
% wl / wl) * amp;
}
ar.append(sf::Vertex(sf::Vector2f(x, y)));
x += 1;
}
}
Then i'm drawing the sf::VectorArray (which contains all the sf::Vertex in the game loop
i solved my problem ty for the answer anyway :)
I had to deal with types problems :p
I figured out that :
double c = x % 100 / 100;
std::cout << c << std::endl; // 0
!=
double c = x % 100;
std::cout << c / 100 << std::endl; // Some numbers depending on x
If it can help anyone in the future :)
C++ requires a careful choice for the types of the numeric variables, to avoid overflows and unexpected conversions.
The snippet shown in OP's question doesn't specify the types of M, A and Z, but uses a std::uniform_int_distribution of int, while M is initialized with a value that's out of the range of an int in most implementations.
It's also worth to be noted that the Standard Library already provides a std::linear_congruential_engine:
#include <iostream>
#include <random>
int main()
{
std::random_device rd;
std::mt19937 rng(rd());
// Calculate the first value
constexpr std::size_t M = 4294967296;
std::uniform_int_distribution<std::size_t> ui_dist(0, M);
std::size_t Z = ui_dist(rng);
// Initialize the engine
static std::linear_congruential_engine<std::size_t, 1664525, 1, M> lcg_dist(Z);
// Generate the values
for (int i = 0; i < 10; ++i)
{
Z = lcg_dist();
std::cout << Z / double(M) << '\n'; // <- To avoid an integer division
}
}

How to manipulate bits in Javascript?

Let's say I have a variable X = 4
How to create a number with a binary representation 1111 (ones with length X) using bitwise operators ( & | ~ << ^ ) and take a position and toggle the bit at that position to zero (0).
Example:
X = initial(4) // X should be : 1111
Y = solution(X, 2) // Y should be 1101
Z = solution(Y, 3) // Z should be 1001
Yes, you'd use Math.pow (or on modern browsers, the exponentiation operator, **) and the bitwise operators to do that.
function initial(digits) {
return Math.pow(2, digits) - 1;
}
function solution(value, bit) {
return value & ~(1 << (bit - 1)); // () around `bit - 1` aren't necessary,
// but I find it clearer
}
var X = initial(4); // X should be : 1111
console.log(X.toString(2));
var Y = solution(X, 2); // Y should be 1101
console.log(Y.toString(2));
var Z = solution(Y, 3); // Z should be 1001
console.log(Z.toString(2));
Or — doh! — comments on the question point out that you can create the initial number without Math.pow or exponentiation:
function initial(digits) {
return (1 << digits) - 1;
}
function solution(value, bit) {
return value & ~(1 << (bit - 1)); // () around `bit - 1` aren't necessary,
// but I find it clearer
}
var X = initial(4); // X should be : 1111
console.log(X.toString(2));
var Y = solution(X, 2); // Y should be 1101
console.log(Y.toString(2));
var Z = solution(Y, 3); // Z should be 1001
console.log(Z.toString(2));

How to calculate the square root without using library and built-in methods in Javascript?

Please help me to write a function to compute the square root of positive real numbers using the formula:
x i+1 = (1/2) * (xi + (A / x1)),
where 'A' - input real number.
On the zero iteration next statements have been taken x0 = A
The error should be at least 10-6
Output
sqrt (2) = 1.414
sqrt (9) = 3
sqrt (25) = 5
You could take xi (x) and the new value of xi + 1 (x1) and check if the values are equal. Then end the series and return that value.
For starting, you need an apporopriate value like the half of the given value.
function sqrt(a) {
var x,
x1 = a / 2;
do {
x = x1;
x1 = (x + (a / x)) / 2;
} while (x !== x1);
return x;
}
console.log(sqrt (2)); // 1.414
console.log(sqrt (9)); // 3
console.log(sqrt (25)); // 5
You can also use bisection - a more general method for solving problems:
var sqrt = function(n) {
if (n<0) {
throw "are you kidding?! we are REAL here.";
}
if (n === 0) {
return 0;
}
var bisect = function(l,r) {
var avg = (l+r)/2;
if (r-l<0.00000001) {
return (l+r)/2;
}
if (avg*avg > n) {
return bisect(l, avg);
} else if (avg*avg < n) {
return bisect(avg, r);
}
}
return bisect(0, n < 1 ? 1 : n);
}

De-interlace bytes

Given an interlaced bit sequence of:
ABABABABABABABAB
What javascript bitwise operation can I use to convert it to be in the sequence:
AAAAAAAABBBBBBBB
That's known as an unshuffle (see also Hacker's Delight 7.2, shuffling bits).
The algorithm given in Hacker's Delight is:
t = (x ^ (x >> 1)) & 0x22222222; x = x ^ t ^ (t << 1);
t = (x ^ (x >> 2)) & 0x0C0C0C0C; x = x ^ t ^ (t << 2);
t = (x ^ (x >> 4)) & 0x00F000F0; x = x ^ t ^ (t << 4);
t = (x ^ (x >> 8)) & 0x0000FF00; x = x ^ t ^ (t << 8);
Those right shifts can be either logical or arithmetic, the AND with the mask ensures that bits affected by that difference do no appear in t anyway.
This is for 32bit numbers, for 16 bit numbers you can chop off the left half of every mask and skip the last step.
This is a sequence of delta swaps, see The Art of Computer Programming volume 4A, Bitwise tricks and techniques, bitswapping.
Check out this algorithm, if it's good for you:
function deinterlace(input) {
var maskOdd = 1;
var maskEven = 2;
var result = 0;
for (var i = 0; i < 8; i++) {
result = result << 1;
if(maskOdd & input) {
result += 1;
}
maskOdd = maskOdd << 2;
}
for (var j = 0; j < 8; j++) {
result = result << 1;
if(maskEven & input) {
result += 1;
console.log(result);
}
}
return result;
}
Working fiddle.

Categories