I have a very large number represented as binary in JavaScript:
var largeNumber = '11010011010110100001010011111010010111011111000010010111000111110011111011111000001100000110000011000001100111010100111010101110100010001011010101110011110000011000001100000110000011001001100000110000011000001100000110000111000011100000110000011000001100000110000011000010101100011001110101101001100110100100000110000011000001100000110001001101011110110010001011010001101011010100011001001110001110010100111011011111010000110001110010101010001111010010000101100001000001100001011000011011111000011110001110111110011111111000100011110110101000101100000110000011000001100000110000011010011101010110101101001111101001010010111101011000011101100110010011001001111101'
When I convert it to decimal by use of parseInt(largeNumber, 10)l it gives me 1.5798770299367407e+199 but when I try to convert it back to binary:
parseInt(`1.5798770299367407e+199`, 2)
it returns 1 (which I think is related to how parseInt works by rounding value) when I was expecting to see my original binary representation of largeNumber. Can you explain me such behavior? And how I can convert it back to original state in JavaScript?
EDIT: This question is a result of my experiment where I was playing around with storing and transferring large amount of boolean data. The largeNumber is a representation of a collection [true,true,false,true ...] of boolean values which has to be shared between client, client worker and server.
As noted in Andrew L.'s answer, and by several commenters, your largeNumber exceeds what JavaScript can represent as an integer in an ordinary number without loss of precision—which is 9.007199254740991e+15.
If you want to work with larger integers, you will need a BigInt library or other special-purpose code.
Below is some code demonstrating how to convert arbitrarily large positive integers between different base representations, showing that the exact decimal representation of your largeNumber is
15 798 770 299 367 407 029 725 345 423 297 491 683 306 908 462 684 165 669 735 033 278 996 876 231 474 309 788 453 071 122 111 686 268 816 862 247 538 905 966 252 886 886 438 931 450 432 740 640 141 331 094 589 505 960 171 298 398 097 197 475 262 433 234 991 526 525
function parseBigInt(bigint, base) {
//convert bigint string to array of digit values
for (var values = [], i = 0; i < bigint.length; i++) {
values[i] = parseInt(bigint.charAt(i), base);
}
return values;
}
function formatBigInt(values, base) {
//convert array of digit values to bigint string
for (var bigint = '', i = 0; i < values.length; i++) {
bigint += values[i].toString(base);
}
return bigint;
}
function convertBase(bigint, inputBase, outputBase) {
//takes a bigint string and converts to different base
var inputValues = parseBigInt(bigint, inputBase),
outputValues = [], //output array, little-endian/lsd order
remainder,
len = inputValues.length,
pos = 0,
i;
while (pos < len) { //while digits left in input array
remainder = 0; //set remainder to 0
for (i = pos; i < len; i++) {
//long integer division of input values divided by output base
//remainder is added to output array
remainder = inputValues[i] + remainder * inputBase;
inputValues[i] = Math.floor(remainder / outputBase);
remainder -= inputValues[i] * outputBase;
if (inputValues[i] == 0 && i == pos) {
pos++;
}
}
outputValues.push(remainder);
}
outputValues.reverse(); //transform to big-endian/msd order
return formatBigInt(outputValues, outputBase);
}
var largeNumber =
'1101001101011010000101001111101001011101' +
'1111000010010111000111110011111011111000' +
'0011000001100000110000011001110101001110' +
'1010111010001000101101010111001111000001' +
'1000001100000110000011001001100000110000' +
'0110000011000001100001110000111000001100' +
'0001100000110000011000001100001010110001' +
'1001110101101001100110100100000110000011' +
'0000011000001100010011010111101100100010' +
'1101000110101101010001100100111000111001' +
'0100111011011111010000110001110010101010' +
'0011110100100001011000010000011000010110' +
'0001101111100001111000111011111001111111' +
'1000100011110110101000101100000110000011' +
'0000011000001100000110100111010101101011' +
'0100111110100101001011110101100001110110' +
'0110010011001001111101';
//convert largeNumber from base 2 to base 10
var largeIntDecimal = convertBase(largeNumber, 2, 10);
function groupDigits(bigint){//3-digit grouping
return bigint.replace(/(\d)(?=(\d{3})+$)/g, "$1 ");
}
//show decimal result in console:
console.log(groupDigits(largeIntDecimal));
//converting back to base 2:
var restoredOriginal = convertBase(largeIntDecimal, 10, 2);
//check that it matches the original:
console.log(restoredOriginal === largeNumber);
If you're looking to transfer a large amount of binary data, you should use BigInt. BigInt allows you to represent an arbitrary number of bits.
// parse large number from string
let numString = '1101001101011010000101001111101001011101111100001001'
// as number
let num = BigInt('0b' + numString)
// now num holds large number equivalent to numString
console.log(num) // 3718141639515913n
// print as base 2
console.log(num.toString(2)) // 1101001101011010000101001111101001011101111100001001
Helper functions
// some helper functions
// get kth bit from right
function getKthBit(x, k){
return (x & (1n << k)) >> k;
}
// set kth bit from right to 1
function setKthBit(x, k){
return (1n << k) | x;
}
// set kth bit from right to 0
function unsetKthBit(x, k){
return (x & ~(1n << k));
}
getKthBit(num, 0n);
// 1n
getKthBit(num, 5n);
// 0n
setKthBit(num, 1n).toString(2);
// 1101001101011010000101001111101001011101111100001011
setKthBit(num, 4n);
// 1101001101011010000101001111101001011101111100011001
unsetKthBit(num, 0n).toString(2);
// 1101001101011010000101001111101001011101111100001000
unsetKthBit(num, 0n).toString(2);
// 1101001101011010000101001111101001011101111100000001
For convenience you may want to add this to BigInt if you're going to be serializing back to the client. Then you can read it back as a string. Otherwise you will get "Uncaught TypeError: Do not know how to serialize a BigInt" because for some reason Javascript Object Notation doesn't know how to serialize one of the types in Javascript.
Object.defineProperty(BigInt.prototype, "toJSON", {
get() {
"use strict";
return () => this.toString() + 'n';
}
});
BigInt is built into js
function parseBigInt(str, base=10) {
base = BigInt(base)
var bigint = BigInt(0)
for (var i = 0; i < str.length; i++) {
var code = str[str.length-1-i].charCodeAt(0) - 48; if(code >= 10) code -= 39
bigint += base**BigInt(i) * BigInt(code)
}
return bigint
}
parseBigInt('11010011010110100001010011111010010111011111000010010111000111110011111011111000001100000110000011000001100111010100111010101110100010001011010101110011110000011000001100000110000011001001100000110000011000001100000110000111000011100000110000011000001100000110000011000010101100011001110101101001100110100100000110000011000001100000110001001101011110110010001011010001101011010100011001001110001110010100111011011111010000110001110010101010001111010010000101100001000001100001011000011011111000011110001110111110011111111000100011110110101000101100000110000011000001100000110000011010011101010110101101001111101001010010111101011000011101100110010011001001111101', 2)
// 15798770299367407029725345423297491683306908462684165669735033278996876231474309788453071122111686268816862247538905966252886886438931450432740640141331094589505960171298398097197475262433234991526525n
When you convert it back to binary, you don't parse it as base 2, that's wrong. You're also trying to parse an integer as a float, this can cause imprecision. With this line:
parseInt(`1.5798770299367407e+199`, 2)
You're telling JS to parse a base 10 as base 2! What you need to do is convert it to binary like so (note the use of parseFloat):
var largeNumber = '11010011010110100001010011111010010111011111000010010111000111110011111011111000001100000110000011000001100111010100111010101110100010001011010101110011110000011000001100000110000011001001100000110000011000001100000110000111000011100000110000011000001100000110000011000010101100011001110101101001100110100100000110000011000001100000110001001101011110110010001011010001101011010100011001001110001110010100111011011111010000110001110010101010001111010010000101100001000001100001011000011011111000011110001110111110011111111000100011110110101000101100000110000011000001100000110000011010011101010110101101001111101001010010111101011000011101100110010011001001111101';
//intLN is integer of large number
var intLN = parseFloat(largeNumber, 2); //here, you used base 10 to parse as integer, Incorrect
console.log(intLN);
var largeNumberConvert = intLN.toString(2); //here, we convert back to binary with toString(radix).
console.log(largeNumberConvert);
Before, you converted a decimal to binary. What you need to do is call toString(radix) to convert it back into binary, so:
var binaryRepresentation = integerFormOfLargeNumber.toString(2);
If you look at the output, you see:
Infinity
Infinity
Since your binary number is quite large, it can affect the results. Because JS supports up to 64 bits, the number is way too large. It causes Infinity and is imprecise. If you try re-converting the largeNumberConvert from binary to decimal like this:
parseInt(largeNumberConvert, 10);
You can see that it outputs Infinity.
Related
I need to get the bytes of a big integer in JavaScript.
I've tried a couple of big integer libraries, but the one that actually offered this function wouldn't work.
I am not quite sure how to implement this myself, given a string containing a large number, which is generally what the libraries give access to.
Is there a library that works and allows to do this?
Or is it actually not hard, and I am just missing something?
I was googling for quick and elegant solution of this problem in JavaScript, but the only what I found was the method of conversion, based on intermediate hex-string. What is suboptimal for sure and that code also didn't work for me, unfortunately. So, I implemented my own code and wanted to post it as an answer to my own question, but found this one.
Explanation
First of all, I will answer to the opposite question, since it is more illustrative.
Reading BigInteger from a bytes array
What is an array of bytes for us? This is a number in 256-base numeral system, which we want to convert to more convenient for us 10-base (decimal) system.
For instance, let's take an array of bytes
[AA][BB][CC][DD] (1 byte is 8 bits or 2 hexadecimal digits).
Depending on the side we start from (see https://en.wikipedia.org/wiki/Endianness), we can read it as:
(AA*1 + BB*256 + CC*256^2 + DD*256^3) in little-endian
or (DD*1 + CC*256 + BB*256^2 + AA*256^3) in big-endian.
Let's use little-endian here. So, our number encoded by the array [AA][BB][CC][DD] is:
AA + BB*256 + CC*256^2 + DD*256^3
= 170 + 187*256 + 204*65536 + 221*16777216
= 170 + 47872 + 13369344 + 3707764736
= 3721182122
Writing BigInteger to a bytes array
For writing a number into an array of bytes we have to perform an opposite operation, i.e. having a number in decimal system to find all digits of it in 256-base numeral system. Let's take the same number: 3721182122
To find it's least significant byte (https://en.wikipedia.org/wiki/Bit_numbering#Least_significant_byte), we have to just divide it by 256. The remainder represents higher digits. So, we divide the remainder again by 256 and so on, until we receive 0 remainder:
3721182122 = 14535867*256 + 170
14535867 = 56780*256 + 187
56780 = 221*256 + 204
221 = 0*256 + 221
So, the result is [170][187][204][221] in decimal, [AA][BB][CC][DD] in hex.
Solution in JavaScript
Now, here is this algorithm encoded in NodeJS with big-integer library.
const BigInteger = require('big-integer');
const zero = BigInteger(0);
const one = BigInteger(1);
const n256 = BigInteger(256);
function fromLittleEndian(bytes) {
let result = zero;
let base = one;
bytes.forEach(function (byte) {
result = result.add(base.multiply(BigInteger(byte)));
base = base.multiply(n256);
});
return result;
}
function fromBigEndian(bytes) {
return fromLittleEndian(bytes.reverse());
}
function toLittleEndian(bigNumber) {
let result = new Uint8Array(32);
let i = 0;
while (bigNumber.greater(zero)) {
result[i] = bigNumber.mod(n256);
bigNumber = bigNumber.divide(n256);
i += 1;
}
return result;
}
function toBigEndian(bytes) {
return toLittleEndian(bytes).reverse();
}
console.log('Reading BigInteger from an array of bytes');
let bigInt = fromLittleEndian(new Uint8Array([170, 187, 204, 221]));
console.log(bigInt.toString());
console.log('Writing BigInteger to an array of bytes');
let bytes = toLittleEndian(bigInt);
console.log(bytes);
Benchmark
I have written small benchmark for this approach. Anybody is welcome to modify it for his own conversion method and to compare with my one.
https://repl.it/repls/EvenSturdyEquipment
Set "i" to be your BigInt's value. You can see the bytes by looking at "a" after running this:
i=11111n;n=1500;a=new Uint8Array(n);while(i>0){a[--n]=Number(i&255n);i>>=8n}
You can also extract the BigInt back out from the Uint8Array:
a.reduce((p,c)=>BigInt(p)*256n+BigInt(c))
I've got a version that works with BigInt that's supported by the browser:
const big0 = BigInt(0)
const big1 = BigInt(1)
const big8 = BigInt(8)
bigToUint8Array(big: bigint) {
if (big < big0) {
const bits: bigint = (BigInt(big.toString(2).length) / big8 + big1) * big8
const prefix1: bigint = big1 << bits
big += prefix1
}
let hex = big.toString(16)
if (hex.length % 2) {
hex = '0' + hex
}
const len = hex.length / 2
const u8 = new Uint8Array(len)
var i = 0
var j = 0
while (i < len) {
u8[i] = parseInt(hex.slice(j, j + 2), 16)
i += 1
j += 2
}
return u8
}
I've got a BigDecimal implementation that works with sending & receiving bytes as arbitary precision big decimal: https://jackieli.dev/posts/bigint-to-uint8array/
For instance, 10100 would be inverted to 01011; 010 would be inverted to 101; 101 would be converted to 010.
The problem is when I use ~5, it becomes -6 because js uses 32 bit signed.
How do I invert an unsigned arbitrary-bit binary number?
I would like to create a function that takes in this unsigned arbitrary-bit binary number and return its inverted form( 101->010)
I want to convert from string 101 to 010
You can create a function that flips the required number of digits like so
var flipbits = function (v, digits) {
return ~v & (Math.pow(2, digits) - 1);
}
console.log(flipbits(5, 3)); // outputs 2
console.log(flipbits(2, 3)); // outputs 5
note - this isn't "arbitrary number of bits" ... it's 32 at best
working with strings, you can have arbitrary bit length (this one wont work without transpiling in Internet Exploder)
var flipbits = str => str.split('').map(b => (1 - b).toString()).join('');
console.log(flipbits('010')); // outputs 101
console.log(flipbits('101')); // outputs 010
The above in ES5
var flipbits = function flipbits(str) {
return str.split('').map(function (b) {
return (1 - b).toString();
}).join('');
};
console.log(flipbits('010')); // outputs 101
console.log(flipbits('101')); // outputs 010
Inverting the bits will always be the same, but to convert an unsigned integer to a signed integer you can use the unsigned >>> shift operator to work on unsigned numbers:
console.log(~5); // -6
console.log(~5>>>0); // 4294967290
If you want to make sure you only flip the significant bits in the number, you'll instead want to mask it via an & operation with how many significant bits you need. Here is an example of the significant bit masking:
function invert(x) {
let significant = 0;
let test = x;
while (test > 1) {
test = test >> 1;
significant = (significant << 1) | 1;
}
return (~x) & significant;
}
console.log(invert(5)); // 2 (010 in binary)
In JavaScript, ~ or tilde does this
-(N+1)
So your current operation is correct but not what you are looking for:
~5
-(5 + 1)
-6
Reference
You can use String.prototype.replace() with RegExp /(0)|(1)/
function toggle(n) {
return n.replace(/(0)|(1)/g, function(m, p1, p2) { return p2 ? 0 : 1 });
}
console.log(
toggle("10100"),
toggle("101")
)
You can use a function that converts numbers to binary as a string, flips the 0s and 1s, then converts back to a number. It seems to give the expected results, but looks pretty ugly:
function flipBits(n) {
return parseInt(n.toString(2).split('').map(bit => 1 - bit).join(''),2)
}
[0,1,2,3,4,5,123,987679876,987679875].forEach(
n => console.log(n + ' -> ' + flipBits(n))
);
Maybe there's a mix of bitwise operators to do the same thing.
Edit
It seems you're working with strings, so just split, flip and join again:
// Requires support for ECMAScript ed 5.1 for map and
// ECMAScript 2015 for arrow functions
function flipStringBits(s) {
return s.split('').map(c => 1 - c).join('');
}
['0','010','110','10011100110'].forEach(
v => console.log(v + ' -> ' + flipStringBits(v))
);
Basic function for ECMAScript ed 3 (works everywhere, even IE 4).
function flipStringBitsEd3(s) {
var b = s.split('')
for (var i = 0, iLen = b.length; i < iLen; i++) {
b[i] = 1 - b[i];
}
return b.join('');
}
// Tests
console.log('Ed 3 version');
var data = ['0', '010', '110', '10011100110'];
for (var i = 0, iLen = data.length; i < iLen; i++) {
console.log(data[i] + ' ->\n' + flipStringBitsEd3(data[i]) + '\n');
}
Works with any length string. The ed 3 version will work everywhere and is probably faster than functions using newer features.
You can create a mask for number's width and take xor to flip the bits.
/**
* #param {number} num
* #return {number}
*/
var findComplement = function(num) {
let len = num.toString(2).length;
let mask = Math.pow(2, len) - 1;
return num ^ mask;
};
console.log(findComplement(5));
For Integer values, you can use the javaScript program to reverse the order of the bits in a given integer and as a result return new integer as described below:
function binaryReverse(value) {
return parseInt(value.toString(2).split('').reverse().join(''), 2);
}
console.log(binaryReverse(25));
console.log(binaryReverse(19));
Output:
19
25
I encounter this curious phenomenon trying to implement a UUID generator in JavaScript.
Basically, in JavaScript, if I generate a large list of random numbers with the built-in Math.random() on Node 4.2.2:
var records = {};
var l;
for (var i=0; i < 1e6; i += 1) {
l = String(Math.random()).length;
if (records[l]) {
records[l] += 1;
} else {
records[l] = 1;
}
}
console.log(records);
The numbers of digits have a strange pattern:
{ '12': 1,
'13': 11,
'14': 65,
'15': 663,
'16': 6619,
'17': 66378,
'18': 611441,
'19': 281175,
'20': 30379,
'21': 2939,
'22': 282,
'23': 44,
'24': 3 }
I thought this is a quirk of the random number generator of V8, but similar pattern appears in Python 3.4.3:
12 : 2
13 : 5
14 : 64
15 : 672
16 : 6736
17 : 66861
18 : 610907
19 : 280945
20 : 30455
21 : 3129
22 : 224
And the Python code is as follows:
import random
random.seed()
records = {}
for i in range(0, 1000000):
n = random.random()
l = len(str(n))
try:
records[l] += 1
except KeyError:
records[l] = 1;
for i in sorted(records):
print(i, ':', records[i])
The pattern from 18 to below is expected: say if random number should have 20 digits, then if the last digit of a number is 0, it effectively has only 19 digits. If the random number generator is good, the probability of that happening is roughly 1/10.
But why the pattern is reversed for 19 and beyond?
I guess this is related to float numbers' binary representation, but I can't figure out exactly why.
The reason is indeed related to floating point representation. A floating point number representation has a maximum number of (binary) digits it can represent, and a limited exponent value range. Now when you print this out without using scientific notation, you might in some cases need to have some zeroes after the decimal point before the significant digits start to follow.
You can visualize this effect by printing those random numbers which have the longest length when converted to string:
var records = {};
var l, r;
for (var i=0; i < 1e6; i += 1) {
r = Math.random();
l = String(r).length;
if (l === 23) {
console.log(r);
}
if (records[l]) {
records[l] += 1;
} else {
records[l] = 1;
}
}
This prints only the 23-long strings, and you will get numbers like these:
0.000007411070483631654
0.000053944830052166104
0.000018188989763578967
0.000029525788901141325
0.000009613635131744402
0.000005937417234758158
0.000021099748521158368
Notice the zeroes before the first non-zero digit. These are actually not stored in the number part of a floating point representation, but implied by its exponent part.
If you were to take out the leading zeroes, and then make a count:
var records = {};
var l, r, s;
for (var i=0; i < 1e6; i += 1) {
r = Math.random();
s = String(r).replace(/^[0\.]+/, '');
l = s.length;
if (records[l]) {
records[l] += 1;
} else {
records[l] = 1;
}
}
... you'll get results which are less strange.
However, you will see some irregularity that is due to how javascript converts tiny numbers to string: when they get too small, the scientific notation is used in the string representation. You can see this with the following script (not sure if every browser has the same breaking point, so maybe you need to play a bit with the number):
var i = 0.00000123456789012345678;
console.log(String(i), String(i/10));
This gives me the following output:
0.0000012345678901234567 1.2345678901234568e-7
So very small numbers will get a more fixed string length as a result, quite often 22 characters, while in the non-scientific notation a length of 23 is common. This influences also the second script I provided and length 22 will get more hits than 23.
It should be noted that javascript does not switch to scientific notation when converting to string in binary representation:
var i = 0.1234567890123456789e-120;
console.log(i.toString(2));
The above will print a string of over 450 binary digits!
It's because some of the values are like this:
0.00012345...
And thus they're longer.
It is said that
All numbers in javascript are 64bit floating point numbers.
I am wondering whether numbers are always use 64bit in memory?
I have a data structure like this (in C-style code)
{
int x; // [0-9]
int y; // [0-9]
int d; // [0-3]
}
x and y will be absolutely within the range of [0-9] and the only possible value of d is 0, 1, 2, 3.
If I store them as 3 separated number, will the structure use 64bit * 3 = 192 bits = 24 Bytes?
If so, I would like to store it in one number, x * 100 + y * 10 + d, and this should only use 64 bits(8 Bytes). Is this better without considering CPU usage.
And I also considered about string solution.
x.toString() + y.toString() + d.toString();
because all x, y and d are less than 10, they should be only 1 character, 16 bits.
So the structure become 16 bits * 3 = 48 bits = 6 Bytes.
Is this the most storage optimized solution?
And how about the storage in mongoDB? If I store the data structure into mongoDB, is it the same situation?
I wrote a snippets to test the storage in mongo.
The final structure includes 3 instances of structures above. And the total amount is 66816.
I stored them into 3 separated databases:
layout-full (I lost an 's'): an array includes 3 {x: valueX, y: valueY, d: valueD}
layouts-int: xydxydxyd (in decimal) ex. 233250750 means {x:2,y:3,d:3},{x:2,y:5,d:0},{x:7,y:5,d:0}
layouts-str: convert int above to 2-char string. String.fromCharCode(i >> 16) + String.fromCharCode(i & 0xFFFF)
And the result is...
> show dbs
layout-full 0.03125GB
layouts-int 0.03125GB
layouts-str 0.03125GB
But the details are...
collection in layout-full
"size" : 8017920,
"avgObjSize" : 120,
"storageSize" : 11182080,
collection in layouts-int
"size" : 2138112,
"avgObjSize" : 32,
"storageSize" : 5591040,
collection in layouts-str
"size" : 2405396,
"avgObjSize" : 36.000299329501914,
"storageSize" : 5591040,
From these results, I found the int storage is the most space saving method.
I also did this:
> db.tiny.save({})
> db.tiny.stats().avgObjSize
24
> db.tiny.remove()
> db.tiny.save({l:null})
> db.tiny.stats().avgObjSize
28
> db.tiny.remove()
> db.tiny.save({l:[{x:null,y:null,d:null},{x:null,y:null,d:null},{x:null,y:null,d:null}]})
> db.tiny.stats().avgObjSize
84
So the _id will use 24 bytes and the key part, {l:, will use 4 = 28 - 24 bytes.
And you can find that a integer uses 32 - 28 = 4 bytes, so integers less than 2^31 seems to be stored as 32-bit integer in mongo db.
Also the in string solution, a 2-char string uses 36 - 28 = 8 bytes, just equals the value I guessed.
And for the full-structure solution, from the last tiny db test, you can see that a structure without data uses 84 bytes, so data use 120 - 84 = 36 bytes = 9 * 4 bytes. And I have just 9 integer numbers in my final data structure (triple x,y,d). This also prove that integers are stored as 32-bit integer.
And why the empty structure uses 84 bytes?
Through some more experiments, I found that 1 array or empty json object uses 4 bytes, and a key uses * 4.
So, the empty structure actually is
{ // 1 object +4 bytes = 4
'_id': ObjectId('0123456789abcdef012345678'), // 3-char key + 12-byte id = 24
'l': [ // 1-char key + 1 array = 8
{'x': null, 'y': null, 'd': null}, // 1 object+ 3 keys = 16
{'x': null, 'y': null, 'd': null}, // 1 object+ 3 keys = 16
{'x': null, 'y': null, 'd': null} // 1 object+ 3 keys = 16
]
}
The result is 4 + 24 + 8 + 16 * 3 = 84 bytes.
I hope my experiments are useful for others.
This will store the three numbers in a single-character string. The only way to do better is to use typed arrays, but I'm not sure that's an option here.
function showMeSomeMagic(x, y, d) {
// we assume that:
// - x, y are integers in the range [0 9]
// - d is an integer in the range [0 3]
// if not add the appropriate checks/casting/coercing
var n = (d << 8) + (y << 4) + x;
// return a String made of a single Unicode code point in
// the range [0xE000 0xE399], i.e. inside the Unicode BMP PUA
return String.fromCharCode( n + 0xE000 );
}
function showMeSomeInverseMagic(s) {
// we assume that:
// s is a String created by showMeSomeMagic
var n = s.charCodeAt(0) - 0xE000;
var x = n & 15;
var y = (n >> 4) & 15;
var d = (n >> 8) & 15;
return { x:x, y:y, d:d };
}
edit: updated according to the OP comment
I'm trying to write a function that is the inverse of the function below.
So that I can get the output from the function foo and generate it's input parameter.
I'm not entirely sure if it's possible.
function foo(str){
var hexMap = {
"0":0,
"1":1,
"2":2,
"3":3,
"4":4,
"5":5,
"6":6,
"7":7,
"8":8,
"9":9,
"A":10,
"B":11,
"C":12,
"D":13,
"E":14,
"F":15
};
var charList = [];
str = str.toUpperCase();
for (var i = 0; i < str.length; i += 2) {
charList.push(hexMap[str.charAt(i)] * 16 + hexMap[str.charAt(i + 1)]);
}
charList.splice(0, 8);
charList.splice(0, 123);
var sliceEnd = charList[0] + charList[1] * 256;
charList.splice(0, 4);
charList = charList.slice(0, sliceEnd);
return charList;
}
Your function takes in a string that is hopefully a hexadecimal string using only the characters [0-9a-fA-F]. Then it makes an array where every two hex characters are converted to a decimal integer between 0 and 255. Then the function immediately throws away the first 131 elements from this array. This means that the first 262 characters on your string have no impact on the output of the function (The first 262 characters can be any characters).
Then there is this line:
var sliceEnd = charList[0] + charList[1] * 256;
sliceEnd becomes a number between 0 and 65535 (the maximum size of the resulting array). Based on the characters at indices 262 - 265 in the input string. (Two two digit hex values converted to two integers. The value at position 264 is multiplied by 256 and added to the value at position 262).
Then the resulting array contains the integers converted using the same method from the characters from position 270 to 270 + sliceEnd*2.
MSN is correct that this function is not 1 to 1 and therefore not mathematically invertible, but you can write a function which given an array of less than 65536 integers between 0 and 255 can generate an input string for foo which will give back that array. Specifically the following function will do just that:
function bar(arr){
var sliceEnd = arr.length;
var temp = '00' + (sliceEnd & 255).toString(16);
var first = temp.substring(temp.length - 2);
temp = '00' + Math.floor(sliceEnd/256).toString(16);
var second = temp.substring(temp.length - 2);
var str = '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' + first + second + '0000';
for(var i = 0; i < arr.length; i++){
temp = '00' + arr[i].toString(16);
str += temp.substring(temp.length - 2);
}
return str;
}
This gives you the property that foo(bar(x)) === x (if x is an array of less than 65536 integers between 0 and 255 as stated previously), but not the property bar(foo(x)) === x because as MSN pointed out that property is impossible to achieve for your function.
EG. bar([17,125,12,11]) gives the string:
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000117dcb" which if you give as input to your function foo you get back the original array: [17,125,12,11], but there are many other inputs (at least 268 of those 0's can be any other of the values in [0-9a-fA-F], and the 04 can be anything greater than 04 which means 22^268*(255 - 4) different strings multiplied by a bit more since that only takes into account either lower case or capitals but not both when multiplying by 255 - 4. regardless 22^268 is a ridiculous number of inputs for one output anyways, and that's ignoring the fact that their are an infinite amount of strings which begin with the string above and have any other hexadecimal string appended to them which will give the same output from foo because of the sliceEnd variable.
That function is not a 1 to 1 function, i.e., many inputs will generate the same output.