Related
I tried this sort of implementation, but it doesn't appear to be working.
function urs32(n, amount) {
const mask = (1 << (32 - amount)) - 1
return (n >> amount) & mask
}
function flip32(n) {
const mask = (1 << 32) - 1
return ~n & mask
}
log(~0b10101010 >>> 0, urs32(~0b10101010, 0))
log(~0b10101010 >>> 0, flip32(0b10101010))
function log(a, b) {
console.log(a.toString(2), b.toString(2))
}
I would expect for a to equal b in both cases, if done right. Basically I am trying to flip 32-bits (so 1's become 0s, 0's become 1s). I see that 1 << 32 === 0, so to get the value, I do 2 ** 32, but still doesn't work.
How do you implement the equivalent of ~n >>> 0 on a BigInt?
Basically what I am trying to do is create the countLeadingOnes functions (out of the countLeadingZeroes functions), like so:
const LEADING_ZERO_BIT_TABLE = makeLeadingZeroTable()
function makeLeadingZeroTable() {
let i = 0
const table = new Uint8Array(256).fill(0)
while (i < 256) {
let count = 8
let index = i
while (index > 0) {
index = (index / 2) | 0
count--
}
table[i] = count
i++
}
return table
}
function countLeadingZeroes32JS(n)
{
let accum = LEADING_ZERO_BIT_TABLE[n >>> 24];
if (accum === 8) {
accum += LEADING_ZERO_BIT_TABLE[(n >>> 16)]
}
if (accum === 16) {
accum += LEADING_ZERO_BIT_TABLE[(n >>> 8)]
}
if (accum === 24) {
accum += LEADING_ZERO_BIT_TABLE[ n ]
}
return accum;
}
function countLeadingZeroes16JS(n)
{
let accum = LEADING_ZERO_BIT_TABLE[n >>> 8]
if (accum === 8) {
accum += LEADING_ZERO_BIT_TABLE[n]
}
return accum;
}
function countLeadingZeroes8JS(n)
{
return LEADING_ZERO_BIT_TABLE[n]
}
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b10100010001000100010001000100010))
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b00000010001000100010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b1010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0000001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0000000000100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b10100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b00100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b00000010))
function countLeadingOnes32JS(n) {
return countLeadingZeroes32JS(~n >>> 0)
}
function countLeadingOnes16JS(n) {
return countLeadingZeroes16JS(~n >>> 0)
}
function countLeadingOnes8JS(n) {
return countLeadingZeroes8JS(~n >>> 0)
}
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b11100010001000100010001000100010))
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b11111100001000100010001000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b0100001000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b1111110000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b1111111111000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b01000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b11000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b11111100))
But it appears that ~n >>> 0 doesn't work on 32-bit integers. How to get this working properly?
How to implement unsigned right shift for BigInt in JavaScript?
Unsigned right-shift is difficult to define meaningfully for arbitrary-size integers, so before you (or anyone) can implement it, you'll have to decide how you want it to behave.
That said, considering the rest of this question, I don't see why you would even need this.
I would expect for a to equal b in both cases
Why would it? Unsigned right-shift and bit flipping are different operations and produce different results.
I see that 1 << 32 === 0
Nope, 1 << 32 === 1. JavaScript (like x86 CPUs) performs an implicit &31 on the shift amount, so since 32 & 31 === 0, ... << 32 is the same as ... << 0.
How do you implement the equivalent of ~n >>> 0 on a BigInt?
The equivalent of ~n is ~n. (That's not a typo. It's literally the same thing.)
The equivalent of ... >>> 0 is BigInt.asUintN(32, ...). (Note that neither the Number version nor the BigInt version shifts anything, so this doesn't answer your headline question "how to implement USR for BigInt".)
it appears that ~n >>> 0 doesn't work on 32-bit integers.
It sure does work. In fact, it only works on 32-bit integers.
The >>> 0 part is completely unnecessary though, you could just drop it.
The reason why this line:
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
isn't producing the number of leading ones is because the function it's calling is ...Zeroes...; an apparent copy-paste bug.
The reason why countLeadingOnes16JS isn't working correctly is because ~ in JavaScript always flips 32 bits. Since a 16-bit number's 32-bit representation has (at least) 16 leading zeros, those all become ones after flipping, and countLeadingZeroes16JS gets an input that's far bigger than it can handle: LEADING_ZERO_BIT_TABLE[n >>> 8] looks up an element that doesn't exist in the table, because the result of n >>> 8 is a 24-bit number in this case, not an 8-bit number. The solution is to use a mask after flipping; a valid implementation of clo16 might be:
function countLeadingOnes16(n) {
return countLeadingZeroes16(~n & 0xFFFF);
}
No BigInts and no >>> 0 required.
countLeadingOnes8 is similar.
You may want to read https://en.wikipedia.org/wiki/Two%27s_complement (or some other description of that concept) to understand what's going on with bitwise operations on negative numbers.
You may also want to learn how to debug your own code. There's a range of techniques: for example, you could have:
inserted console.log statements for intermediate results,
or stepped through execution in a debugger,
or simply evaluated small snippets in the console,
any of which would have made it very easy for you to see what's happening on the path from input number to end result.
For anyone else reading this: there's Math.clz32, which is highly efficient because it gets compiled to a machine instruction, so implementing countLeadingZeros by hand is unnecessary and wasteful. For smaller widths, just subtract: function clz8(n) { return Math.clz32(n) - 24; }
I came across this challenge on Edabit and couldn't work out this bitwise operation solution.
notNotNot = (a,b) => !!(a%2 >> b)
The challenge:
//Something which is not true is false, but something which is not not true is true!
//Create a function where given n number of "not", evaluate whether it's true or false.
//Examples:
notNotNot(1, true) ➞ false
// Not true
notNotNot(2, false) ➞ false
// Not not false
notNotNot(6, true) ➞ true
// Not not not not not not true
I did some research that that operator:
Shifts right by pushing copies of the leftmost bit in from the left, and let the rightmost bits fall off.
That I reckon I understood (e.g. 5 >> 1 same as 0101 >> 1 which evaluates to 0010), but I can't see how that works with a boolean? I know true evaluates to 1 and false to 0.
The function you gave does not satisfy the challenge. Right shifting will not do what is asked for. For example, your notNotNot(6,true) is false, not true when put through your function.
Your question is about bitwise operation on a boolean though. Since operators like >> and << work on integers, Javascript first converts the boolean value to an integer. So true becomes 1 and false becomes 0. To see this you can shift by zero:
console.log("true is",true >> 0)
console.log("false is", false >> 0)
So bitwise operation on booleans is really just bitwise operation on either 0 or 1.
Using !! is a handy way to convert anything into a boolean. It takes anything that would be considered equivalent to false (such as 0, null, undefined or "") and gives back false. Similarly anything that is truthy (like 14, "hello", [4], {a:1}) and give back true. !! works because the first exclamation mark gives the 'not' of the expression which is always true or false, then the second exclamation mark gives the opposite of that (false or true).
Getting back to the challenge, it wants to apply the not-operator 'a' times and compare to the 'b' value. So something like this would work:
function notNotNot(a, b) { return !!(a%2 - b); }
console.log("notNotNot(1, true)",notNotNot(1, true));
console.log("notNotNot(2, false)",notNotNot(2, false));
console.log("notNotNot(6, true)",notNotNot(6, true));
Bitwise operators always convert their operands to an integer. So, 4 >> true is the same as 4 >> 1 which will do a bit shift right by one position
(decimal) 4 = (binary) 100
(binary) 100 >> 1 = (binary) 010
(binary) 010 = (decimal) 2
console.log(4 >> true);
So, using true or false is a just a roundabout way to use 1 or 0.
The notNotNot function has very simple operation, overall:
a%2 converts the first number into 0 for even or 1 for odd.
>> b shifts right by either 0 positions for false or 1 position for true.
a is odd (1) and b is false = 1
there is zero shifts to the right, so the number remains the same.
a is odd (1) and b is true = 0
the only set bit 1 is shifted right and discarded.
a is even (0) and b is false = 0
there is zero shifts to the right, so the number remains the same.
a is even (0) and b is true = 0
the base number is 0 which doesn't have any bits set, so shifting right any amount does not change it.
!!() converts the result to boolean.
With that said, the solution here is wrong, since notNotNot(2, true) will produce false - a is even and b is true. The expectation is that it will produce true since !!true = true. The same problem is present for any even number and true.
It can be easily fixed by using bitwise XOR instead of right shift:
a is odd (1) and b is false = 1
both match, so they are flipped to 0
a is odd (1) and b is true = 0
they don't match, so we get 1
a is even (0) and b is false = 0
both match, so we get 0
a is even (0) and b is true = 1
they don't match, so we get 1
notNotNot = (a,b) => !!(a%2 ^ b);
console.log("!!true = ", notNotNot(2, true))
console.log("!!!true =", notNotNot(3, true))
console.log("!!false = ", notNotNot(2, false))
console.log("!!!false = ", notNotNot(3, false))
//bonus
console.log("true = ", notNotNot(0, true))
console.log("false = ", notNotNot(0, false))
Just for completeness sake, in case you want a fully bitwise operation:
The modulo operation %2 can be changed to a bitwise AND &1 get the lowest bit. For even numbers, this would yield 0 since you'd be computing
xxx0
&
0001
which is zero. And for odd numbers the same applies but you'd get one as a result:
xxx1
&
0001
So the results of a&1 and a%2 are identical. Furthermore, even though bitwise operations convert the number to a 32-bit signed integer that doesn't matter as the parity would be preserved.
//larger than 31 bits
const largeValue = 2**31 + 1;
//larger than 32 bits
const veryLargeValue = 2**32 + 1
console.log("2**31 + 1 =", largeValue);
console.log("2**32 + 1 =", veryLargeValue);
console.log("2**31 + 1 to 32-bit signed integer =", largeValue | 0);
console.log("2**32 + 1 to 32-bit signed integer = ", veryLargeValue | 0);
const isOddModulo = number =>
console.log(`(${number} % 2) can detect an odd number: ${(number % 2) === 1}`);
const isOddBitwise = number =>
console.log(`(${number} & 1) can detect an odd number: ${(number & 1) === 1}`);
isOddModulo(largeValue);
isOddBitwise(largeValue);
isOddModulo(veryLargeValue);
isOddBitwise(veryLargeValue);
Firstly, (a,b) => !!(a%2 >> b) does not match the results of the examples. I will break down exactly what it's doing using notNotNot(6, true) ➞ true.
Fist a%2, simply get a divide by 2 return the remainder. So we will get 0 for an even number and 1 for an odd number. a = 6 a%2 = 0 in this case.
Then 0 >> b shift 1 number off from the right because as you said true evaluates to 1. So we get 0 >> 1 = 0.
Last !!(0), is simple, and can be broken down like so, !0 = true, then !true = false.
So if we think about this as long as b is true, we will always get returned false. Let's say we have a = 5, b = true evaluating to 5%2 = 1, 1 >> 1 = 0. You can see because of the mod (%2) we will only ever have 1 or 0 (only ever have 1 digit) and true will always shift off the 1 when we have it.
A simple way to look at this problem is like an isEvenOrNot function. So a is the number we are checking and b is a boolean to check if it's even (true) or not even (false). This works because every second not added will be true.
So a solution using bitwise could be something like: (a,b) => !!(a&1 ^ b).
I will let you have the fun of breaking down why it works! :)
A little bit more into explaining how shift works with a boolean. So true as you said will be 1 and false will be 0. So as shown in your example, 0101 >> true is the same as 0101 >> 1.
I hope this helps.
I used the following as a reference for bitwise: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Bitwise_Operators
(a%2) //ignore all but the least significant bit (LSB)
(a%2 >> b ) //if TRUE, shifts right, resolves to 0
//if FALSE, no shift, resolves to LSB
// 0 and LSB are both integers so convert to boolean by using logical/boolean NOT
!(a%2 >> b ) //resolves to the boolean which it is NOT
!!(a%2 >> b ) //resolves to the boolean which it is NOT NOT
NB For either boolean,
an even number of NOTs results in the original boolean
an odd number of NOTs results in the opposite boolean
The LSB of any number dictates whether the number is odd or even.(0 even, 1 odd)
I see that your task is:
/* Create a function where given n number of "not",
evaluate whether it's true or false.*/
I don't know why you are writing notnotnot function
for me that is not what the task asks.
So according to the task I made that function not
that accepts a number of "nots" and evaluates them.
The first way
function not(n) {
return Boolean(n - n - 1);
}
The second way using XOr(^)
function not(n) {
return Boolean(bool ^ (bool - 1));
}
The third way using Mod(%) pointed by #VLAZ
function not(n) {
return Boolean(n % 2);
}
The fourth way using bitwise And(&)
function not(n) {
return Boolean(n & 1);
}
Test
not(0)
//> false
not(1)
//> true
not(2)
//> false
not(515)
//> true
Lets analysis solution first
notNotNot(oddNumber, true) ➞ false
notNotNot(evenNumber, true) ➞ true
notNotNot(oddNumber, false) ➞ true
notNotNot(evenNumber, false) ➞ false
Now analysis the for (a,b) => !!(a%2 >> b)
a%2 == 0 ➞ even number
a%2 == 1 ➞ odd number
// For a%2 == 0
a%2 >> b ➞ if b is true ➞ 0 >> 1 ➞ 0 // Not working
a%2 >> b ➞ if b is false ➞ 0 >> 0 ➞ 0
// For a%2 == 1
a%2 >> b ➞ if b is true ➞ 1 >> 1 ➞ 0
a%2 >> b ➞ if b is false ➞ 1 >> 0 ➞ 1
Thats means this is not working for notNotNot(6, true) is true but current solution gives false.
We can you ^(XOR) operator to make it correct Like (a,b) => !!(a%2 ^ b)
Now analysis the for (a,b) => !!(a%2 ^ b)
a%2 == 0 ➞ even number
a%2 == 1 ➞ odd number
// For a%2 == 0
a%2 ^ b ➞ if b is true ➞ 0 ^ 1 ➞ 1 // Now working
a%2 ^ b ➞ if b is false ➞ 0 ^ 0 ➞ 0
// For a%2 == 1
a%2 ^ b ➞ if b is true ➞ 1 ^ 1 ➞ 0
a%2 ^ b ➞ if b is false ➞ 1 ^ 0 ➞ 1
!(a%2 ^ b) use `!` to make int as boolean but solution result will reversed then
!!(a%2 ^ b) use `!` again to reversed it again and make it correct.
Example:
notNotNot = (a,b) => !!(a%2 ^ b);
console.log("!!!!true = ", notNotNot(4, true))
console.log("!!!!false = ", notNotNot(4, false))
console.log("!!!true =", notNotNot(3, true))
console.log("!!!false = ", notNotNot(3, false))
I need to do a left shift operation that behaves the exact same way as JavaScript's. The problem is that this:
a << 16
behaves like Clojure's "bit-shift-left" only if a <= 32767:
// JS
32767 << 16 // 2147418112
32768 << 16 // -2147483648
567890 << 16 // -1437466624
;; CLJ
(bit-shift-left 32767 16) // 2147418112
(bit-shift-left 32768 16) // 2147483648
(bit-shift-left 567890 16) // 37217239040
I noticed that, when doing "37431 << 16", JS does something completely different from Clojure at a binary level. While Clojure transforms 1001001000110111 into 10010010001101110000000000000000, JS transforms 1001001000110111 into 1101101110010010000000000000000:
// CLJ, then JS
10 01001 00011 01110 00000 00000 00000
1 10110 11100 10010 00000 00000 00000
I notice this is two's complement, and I notice that JS may be doing this because it cannot (for some reason) use more than 32 bits for this (all bit-level operations done on 32 bits, maybe?), so I wonder if I should apply two's complement to the number if it is above 32767. But then again, I'm a Clojure newbie so I'm not very sure on how to do this.
Firstly, clojure.core/bit-shift-left will treat its left input as a long. You can use clojure.lang.Numbers/shiftLeftInt to shift a number as an int:
(clojure.lang.Numbers/shiftLeftInt 567890 16)
;= -1437466624
This matches the result you get in JavaScript. There's no wrapper around this static method in clojure.core, but you can provide your own.
Secondly, (clojure.lang.Numbers/shiftLeftInt 37431 16) evaluates to -1841889280 in Clojure (1.8.0) and 37431 << 16 evaluates to the same number, -1841889280, in Node (4.4.5), so I don't think there's any problem there. You'll have to apply >>> 0 to your number in JavaScript to get the expected bits in the string representation, though:
// Node 4.4.5
> ((37431 << 16) >>> 0).toString(2)
'10010010001101110000000000000000'
It's good to note that fishing out individual bits with & works fine without the >>> 0 "unsigned cast":
> (37431 << 16) & (1 << 31)
-2147483648
> (37431 << 16) & (1 << 30)
0
> (37431 << 16) & (1 << 29)
0
> (37431 << 16) & (1 << 28)
268435456
And you can compute both string representations in Clojure:
(Integer/toString (clojure.lang.Numbers/shiftLeftInt 37431 16) 2)
;= "-1101101110010010000000000000000"
(Integer/toBinaryString (clojure.lang.Numbers/shiftLeftInt 37431 16))
;= "10010010001101110000000000000000"
Note that in Java bit shift operators take only the rightmost 5 or 6 bits (for ints and longs, respectively) of the right operand into account, so if you try shifting an int or long by more than 31/63 bits, you won't get the result you expect. java.lang.BigInteger has a shiftLeft method that does not have this limitation.
Are there any side effects if i convert a string to a number like below..
var numb=str*1;
If I check with the below code it says this is a number..
var str="123";
str=str*1;
if(!isNaN(str))
{
alert('Hello');
}
Please let me know if there are any concerns in using this method..
When you use parseFloat, or parseInt, the conversion is less strict. 1b5 -> 1.
Using 1*number or +number to convert will result in NaN when the input is not valid number. Though unlike parseInt, floating point numbers will be parsed correctly.
Table covering all possible relevant options.
//Variables // parseInt parseFloat + 1* /1 ~~ |0 ^1 >>0 >>>0
var a = '123,',// 123 123 NaN 0 & <<0 0
b = '1.e3',// 1 1000 1000 1000 1000
c = '1.21',// 1 1.21 1.21 1 1
d = '0020',// 16 20 20 20 20
e = '0x10',// 16 0 16 16 16
f = '3e9', // 3 3000000000 <-- -1294967296 3000000000
g = '3e10',// 3 30000000000 <-- -64771072 4230196224
h = 3e25 ,// 3 3e+25 3e+25 0 0
i = '3e25',// 3 3e+25 3e+25 0 0
j = 'a123',// NaN NaN NaN 0 0
k = ' 1 ',// 1 1 1 1 1
l = ' ',// NaN NaN 0 0 0
m = '.1 ',// NaN 0.1 0.1 1 1
n = '1. ',// 1 1 1 1 1
o = '1e999',// 1 Infinity Infinity 0 0
p = '1e-999',// 1 0 0 0 0
q = false ,// NaN NaN 0 0 0
r = void 0,// NaN NaN NaN 0 0
_ = function(){return 1;}, /* Function _ used below */
s={valueOf:_},//NaN NaN 1 1 1
t={toString:_};// 1 1 1 1 1
// Intervals: (-1e+20, +1e20) (-∞,+∞) (-∞,+∞) (-2³¹,+2³¹) [0, 2³²)
// In FF9 and Chrome 17, Infinity === Math.pow(2, 1024), approx. 1.7976e+308
// In FF9 and Chrome 17, bitwise operators always return 0 after about ±1e+25
Notes on number conversion methods:
The number conversion always fail if the first character, after trimming white-space, is not a number.
parseInt returns an integer representation of the first argument. When the radix (second argument) is omitted, the radix depends on the given input.
0_ = octal (base-8), 0x_ = hexadecimal (base-16). Default: base-10.
parseInt ignores any non-digit characters, even if the argument was actually a number: See h, i.
To avoid unexpected results, always specify the radix, usually 10: parseInt(number, 10).
parseFloat is the most tolerant converter. It always interpret input as base-10, regardless of the prefix (unlike parseInt). For the exact parsing rules, see here.
The following methods will always fail to return a meaningful value if the string contains any non-number characters. (valid examples: 1.e+0 .1e-1)
+n, 1*n, n*1, n/1 and Number(n) are equivalent.
~~n, 0|n, n|0, n^1, 1^n, n&n, n<<0 and n>>0 are equivalent. These are signed bitwise operations, and will always return a numeric value (zero instead of NaN).
n>>>0 is also a bitwise operation, but does not reserve a sign bit. Consequently, only positive numbers can be represented, and the upper bound is 232 instead of 231.
When passed an object, parseFloat and parseInt will only look at the .toString() method. The other methods first look for .valueOf(), then .toString(). See q - t.
NaN, "Not A Number":typeof NaN === 'number'
NaN !== NaN. Because of this awkwardness, use isNaN() to check whether a value is NaN.
When to use which method?
parseFloat( x ) when you want to get as much numeric results as possible (for a given string).
parseFloat( (x+'').replace(/^[^0-9.-]+/,'') ) when you want even more numeric results.
parseInt( x, 10 ) if you want to get integers.
+x, 1*x .. if you're only concerned about getting true numeric values of a object, rejecting any invalid numbers (as NaN).
~~, 0| .. if you want to always get a numeric result (zero for invalid).
>>>0 if negative numbers do not exists.
The last two methods have a limited range. Have a look at the footer of the table.
The shortest way to test whether a given parameter is a real number is explained at this answer:
function isNumber(n) {
return typeof n == 'number' && !isNaN(n - n);
}
I am receiving and sending a decimal representation of two little endian numbers. I would like to:
shift one variable 8 bits left
OR them
shift a variable number of bits
create 2 8 bit numbers representing the first and second half of the 16 bit number.
javascript (according to https://developer.mozilla.org/en/JavaScript/Reference/Operators/Bitwise_Operators) uses big endian representation when shifting...
endianness is a bit foreign to me (I am only 90 percent sure that my outlined steps are what i want.) so swapping is a bit dizzying. please help! I only really need to know how to swap the order in an efficient manner. (I can only think of using a for loop on a toString() return value)
function swap16(val) {
return ((val & 0xFF) << 8)
| ((val >> 8) & 0xFF);
}
Explanation:
Let's say that val is, for example, 0xAABB.
Mask val to get the LSB by &ing with 0xFF: result is 0xBB.
Shift that result 8 bits to the left: result is 0xBB00.
Shift val 8 bits to the right: result is 0xAA (the LSB has "dropped off" the right-hand side).
Mask that result to get the LSB by &ing with 0xFF: result is 0xAA.
Combine the results from steps 3 and step 5 by |ing them together:
0xBB00 | 0xAA is 0xBBAA.
function swap32(val) {
return ((val & 0xFF) << 24)
| ((val & 0xFF00) << 8)
| ((val >> 8) & 0xFF00)
| ((val >> 24) & 0xFF);
}
Explanation:
Let's say that val is, for example, 0xAABBCCDD.
Mask val to get the LSB by &ing with 0xFF: result is 0xDD.
Shift that result 24 bits to the left: result is 0xDD000000.
Mask val to get the second byte by &ing with 0xFF00: result is 0xCC00.
Shift that result 8 bits to the left: result is 0xCC0000.
Shift val 8 bits to the right: result is 0xAABBCC (the LSB has "dropped off" the right-hand side).
Mask that result to get the second byte by &ing with 0xFF00: result is 0xBB00.
Shift val 24 bits to the right: result is 0xAA (everything except the MSB has "dropped off" the right-hand side).
Mask that result to get the LSB by &ing with 0xFF: result is 0xAA.
Combine the results from steps 3, 5, 7 and 9 by |ing them together:
0xDD000000 | 0xCC0000 | 0xBB00 | 0xAA is 0xDDCCBBAA.
Such function can be used to change endianness in js:
const changeEndianness = (string) => {
const result = [];
let len = string.length - 2;
while (len >= 0) {
result.push(string.substr(len, 2));
len -= 2;
}
return result.join('');
}
changeEndianness('AA00FF1234'); /// '3412FF00AA'
Use the << (bit shift) operator. Ex: 1 << 2 == 4.
I really think that the underlying implementation of JavaScript will use whatever endianess the platform it is running on is using. Since you cannot directly access memory in JavaScript you won't ever have to worry about how numbers are represented physically in memory. Bit shifting integer values always yield the same result no matter the endianess. You only see a difference when looking at individual bytes in memory using pointers.
Here is a oneliner for arrays to swap between big and little endian (and vise versa). The swapping is done using reverse on byte level. I guess for large arrays, it is more efficient than looping over scalar swap function.
function swapbyte(x) {
return new Float64Array(new Int8Array(x.buffer).reverse().buffer).reverse()
}
// Example
buf = new ArrayBuffer(16); // for 2 float64 numbers
enBig = new Float64Array(buf);
enBig[0] = 3.2073756306779606e-192;
enBig[1] = 2.7604354232023903e+199;
enLittle = swapbyte(enBig)
// two famous numbers are revealed
console.log(enLittle)
// Float64Array [ 6.283185307179586, 2.718281828459045 ]
// swapping again yields the original input
console.log(swapbyte(enLittle))
// Float64Array [ 3.2073756306779606e-192, 2.7604354232023903e+199 ]