How to reverse 32-bits in JavaScript using the "table" optimization? - javascript

So I finally figured out how to reverse 8-bits, 16-bits, and possibly 32-bits. However it doesn't appear to work in the 32-bit case in JavaScript. How do you do this in JavaScript using an optimized table-based approach?
Table Solution in 32 Bits
const BIT_REVERSAL_TABLE = new Array(256)
for (var i = 0; i < 256; ++i) {
var v = i, r = i, s = 7;
for (v >>>= 1; v; v >>>= 1) {
r <<= 1;
r |= v & 1;
--s;
}
BIT_REVERSAL_TABLE[i] = (r << s) & 0xff;
}
function reverseBits32(n) {
return (BIT_REVERSAL_TABLE[n & 0xff] << 24) |
(BIT_REVERSAL_TABLE[(n >>> 8) & 0xff] << 16) |
(BIT_REVERSAL_TABLE[(n >>> 16) & 0xff] << 8) |
BIT_REVERSAL_TABLE[(n >>> 24) & 0xff];
}
log32(0b11110010111110111100110010101011)
function log32(n) {
console.log(`${bits(n, 32)} => ${bits(reverseBits32(n), 32)}`)
}
function bits(n, size) {
return `0b${n.toString(2).padStart(size, '0')}`
}
Notice the logs are saying:
0b11110010111110111100110010101011 => 0b0-101010110011000010000010110001
When it should say:
0b11110010111110111100110010101011 => 0b11010101001100111101111101001111
Non-table Solutions in 32 Bits (that don't work)
// brev_knuth
function reverseBits32(a) {
let t
a = (a << 15) | (a >> 17);
t = (a ^ (a >> 10)) & 0x003f801f;
a = (t + (t << 10)) ^ a;
t = (a ^ (a >> 4)) & 0x0e038421;
a = (t + (t << 4)) ^ a;
t = (a ^ (a >> 2)) & 0x22488842;
a = (t + (t << 2)) ^ a;
return a;
}
function reverseBits32(x) {
x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
return((x >> 16) | (x << 16));
}
I think these probably don't work because the bit-shifting creates a larger than 32-bit number? Not sure. Any hints would be appreciated.
Non-table Solution in 32 Bits (that works)
function reverseBits32(n) {
let res = 0;
for (let i = 0; i < 32; i++) {
res = (res << 1) + (n & 1);
n = n >>> 1;
}
return res >>> 0;
}
log32(0b11110010111110111100110010101011)
function log32(n) {
console.log(`${bits(n, 32)} => ${bits(reverseBits32(n), 32)}`)
}
function bits(n, size) {
return `0b${n.toString(2).padStart(size, '0')}`
}
First, why doesn't my 32-bit table solution work? Then, how can you make this work using the table approach in JavaScript (without using BigInt, and without using "convert to string and reverse" hacks) on a 32-bit integer?
I will note that this "BigInt 64" version in JavaScript works, so yay.
function reverseBits64(n) {
return (BigInt(BIT_REVERSAL_TABLE[Number(n & 255n)]) << 56n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 8n) & 255n)]) << 48n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 16n) & 255n)]) << 40n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 24n) & 255n)]) << 32n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 32n) & 255n)]) << 24n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 40n) & 255n)]) << 16n) |
(BigInt(BIT_REVERSAL_TABLE[Number((n >> 48n) & 255n)]) << 8n) |
BigInt(BIT_REVERSAL_TABLE[Number((n >> 56n) & 255n)]);
}
// 0b1111001011111011110011001010101111110010111110111100110010101011 =>
// 0b1101010100110011110111110100111111010101001100111101111101001111

The issue is that in your reverseBits32, sometimes, a signed 32 bit number is negative! like, half the time
One way to deal with it is using BigInt
const reverseBits32= (x) => {
x = BigInt(x);
x = (((x & 0xaaaaaaaan) >> 1n) | ((x & 0x55555555n) << 1n));
x = (((x & 0xccccccccn) >> 2n) | ((x & 0x33333333n) << 2n));
x = (((x & 0xf0f0f0f0n) >> 4n) | ((x & 0x0f0f0f0fn) << 4n));
x = (((x & 0xff00ff00n) >> 8n) | ((x & 0x00ff00ffn) << 8n));
x = ((x >> 16n) | (x << 16n)) & 0xffffffffn;
return Number(x);
}
Now you're using bit manipulation as you wanted
You'll find there's a similar solution to the other non-working functions
Table based solution
const BIT_REVERSAL_TABLE = new Array(256)
for (var i = 0; i < 256; ++i) {
var v = i, r = i, s = 7;
for (v >>>= 1; v; v >>>= 1) {
r <<= 1;
r |= v & 1;
--s;
}
BIT_REVERSAL_TABLE[i] = (r << s) & 0xff;
}
function reverseBits32(n) {
return (BIT_REVERSAL_TABLE[n & 0xff] * 2**24) +
(BIT_REVERSAL_TABLE[(n >>> 8) & 0xff] * 2 **16) +
(BIT_REVERSAL_TABLE[(n >>> 16) & 0xff] * 2 **8) +
BIT_REVERSAL_TABLE[(n >>> 24) & 0xff];
}
log32(0b11110010111110111100110010101011)
function log32(n) {
console.log(`${bits(n, 32)} => ${bits(reverseBits32(n), 32)}`)
}
function bits(n, size) {
return `0b${n.toString(2).padStart(size, '0')}`
}

Related

How to correctly translate this block CRC32 from Go to JavaScript?

I have this function in Go:
package main
import (
"fmt"
"github.com/snksoft/crc"
)
var crcTable *crc.Table
func init() {
params := crc.CRC32
params.FinalXor = 0
params.ReflectOut = false
crcTable = crc.NewTable(params)
}
func crcCalculateBlock(data []byte) uint32 {
if len(data)%4 > 0 {
panic("block size needs to be a multiple of 4")
}
h := crc.NewHashWithTable(crcTable)
var buf [4]byte
for i := 0; i < len(data); i += 4 {
buf[0] = data[i+3]
buf[1] = data[i+2]
buf[2] = data[i+1]
buf[3] = data[i+0]
h.Update(buf[:])
}
return h.CRC32()
}
func main() {
data := []byte{1, 2, 3, 4, 5, 6, 7, 8}
crc := crcCalculateBlock([]byte(data))
fmt.Printf("CRC is 0x%04X\n", crc)
}
The result is: 0x948B389D
I am trying to translate it to JavaScript but I am missing something:
var makeCRCTable = function(){
var c;
var crcTable = [];
for(var n =0; n < 256; n++){
c = n;
for(var k =0; k < 8; k++){
c = ((c&1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));
}
crcTable[n] = c;
}
return crcTable;
}
var crc32 = function(u8array) {
var crcTable = window.crcTable || (window.crcTable = makeCRCTable());
var crc = 0 ^ (-1);
for (var i = 0; i < u8array.length; i+=4 ) {
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+3]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+2]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+1]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i]) & 0xFF];
}
return (crc ^ (-1)) >>> 0;
};
console.log(crc32(Uint8Array.from([1,2,3,4,5,6,7,8])).toString(16))
but the result is different. ( 46e32ed6 )
even without the final xor I get b91cd129
Can anyone explain to me how to correct that and why is that wrong?
There are two differences:
the Go implementation has called reflect (see https://github.com/snksoft/crc/blob/03404db21ad4e7182edf4843b51f6252799f7140/crc.go#L168-L170):
if t.crcParams.ReflectOut != t.crcParams.ReflectIn {
ret = reflect(ret, t.crcParams.Width)
}
the FinalXor in Go is 0 (params.FinalXor = 0) while in js it's -1 (return (crc ^ (-1)) >>> 0;)
Here is the updated js implementation that generates the same hash value.
var makeCRCTable = function () {
var c;
var crcTable = [];
for (var n = 0; n < 256; n++) {
c = n;
for (var k = 0; k < 8; k++) {
c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1;
}
crcTable[n] = c;
}
return crcTable;
};
var crc32 = function (u8array) {
var crcTable = window.crcTable || (window.crcTable = makeCRCTable());
var crc = 0 ^ -1;
for (var i = 0; i < u8array.length; i += 4) {
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i + 3]) & 0xff];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i + 2]) & 0xff];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i + 1]) & 0xff];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i]) & 0xff];
}
crc = reverseBits(crc, 32);
return (crc ^ 0) >>> 0;
};
function reverseBits(integer, bitLength) {
if (bitLength > 32) {
throw Error(
'Bit manipulation is limited to <= 32 bit numbers in JavaScript.'
);
}
let result = 0;
for (let i = 0; i < bitLength; i++) {
result |= ((integer >> i) & 1) << (bitLength - 1 - i);
}
return result >>> 0; // >>> 0 makes it unsigned even if bit 32 (the sign bit) was set
}
console.log(crc32(Uint8Array.from([1, 2, 3, 4, 5, 6, 7, 8])).toString(16));
Note: the reverseBits function is copied from this answer: https://stackoverflow.com/a/67064710/1369400
Thanks to Zeke Lu, I prefer this code.
function rev(x) {
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2);
x = ((x >> 4) & 0x0F0F0F0F) | ((x & 0x0F0F0F0F) << 4);
x = ((x >> 8) & 0x00FF00FF) | ((x & 0x00FF00FF) << 8);
x = (x >>> 16) | (x << 16);
return x >>> 0;
}
var makeCRCTable = function(){
var c;
var crcTable = [];
for(var n =0; n < 256; n++){
c = n;
for(var k =0; k < 8; k++){
c = ((c&1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));
}
crcTable[n] = c;
}
return crcTable;
}
var crc32 = function(u8array) {
var crcTable = window.crcTable || (window.crcTable = makeCRCTable());
var crc = 0 ^ (-1);
for (var i = 0; i < u8array.length; i+=4 ) {
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+3]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+2]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i+1]) & 0xFF];
crc = (crc >>> 8) ^ crcTable[(crc ^ u8array[i]) & 0xFF];
}
return rev(crc);
};
console.log(crc32(Uint8Array.from([1,2,3,4,5,6,7,8])).toString(16))

How to understand Ternary JavaScript expression?

I'm bad with JS at now, especially with the operator "?"
And I'm trying to understand the following code.
Maybe it could be more friendly ?
So, if I don't want to use this operator, how can it be looks.
JavaScript code:
function(t) {
for (var e, r = t.length, n = "", i = 0, s = 0, a = 0; i < r; )
(s = t.charCodeAt(i)) < 128 ? (n += String.fromCharCode(s),
i++) : s > 191 && s < 224 ? (a = t.charCodeAt(i + 1),
n += String.fromCharCode((31 & s) << 6 | 63 & a),
i += 2) : (a = t.charCodeAt(i + 1),
e = t.charCodeAt(i + 2),
n += String.fromCharCode((15 & s) << 12 | (63 & a) << 6 | 63 & e),
i += 3);
return n
}
It seems like this is same as:
function(t) {
for (var e, r = t.length, n = "", i = 0, s = 0, a = 0; i < r; )
if((s = t.charCodeAt(i)) < 128) {
n += String.fromCharCode(s);
i++;
} else if(s > 191 && s < 224) {
a = t.charCodeAt(i + 1);
n += String.fromCharCode((31 & s) << 6 | 63 & a);
i += 2;
} else {
a = t.charCodeAt(i + 1);
e = t.charCodeAt(i + 2);
n += String.fromCharCode((15 & s) << 12 | (63 & a) << 6 |
63 & e);
i += 3;
}
return n
}
This is an overly complicated expression involving multiple ternary operators.
I think it should be simplified.
A ternary operator behaves like an if but it is an expression that returns one value out of 2 options, depending on the first operand.
For example:
operand ? valueIfTrue : valueIfFalse is a ternary expression that returns valueIfTrue if operand is "truthy" and returns valueIfFalse if operand is "falsey".
You can substitute any expression in place of valueIfTrue and valueIfFalse and this way you can get really complicated expressions, sometimes unnecessarily complex.
As an example of making expressions complicated, let's consider: For example: operand ? valueIfTrue : valueIfFalse
If we then replace valueIfTrue with another ternary operator, e.g. myOtherOperand ? myOtherIfTrue : myOtherIfFalse then the original expression becomes:
operand ? myOtherOperand ? myOtherIfTrue : myOtherIfFalse: valueIfFalse
This is not a nice way to write it, it can be improved like this, I just put parenthesis.
operand ? (myOtherOperand ? myOtherIfTrue : myOtherIfFalse) : valueIfFalse
It can be improved again by formatting like this:
operand
? myOtherOperand
? myOtherIfTrue // if both operand and myOtherOperand are true
: myOtherIfFalse // if operand is true and myOtherOperand is false
: valueIfFalse // this will be returned if operand is false
This shows that code formatting is essential for understanding it. But of course the first step is to have simple code. Anyways, here is how I would format the code in the question so it can be easier to understand:
function myFunction(t) {
for (var e, r = t.length, n = "", i = 0, s = 0, a = 0; i < r; ) {
(s = t.charCodeAt(i)) < 128
? (n += String.fromCharCode(s), i++)
: s > 191 && s < 224
? (a = t.charCodeAt(i + 1), n += String.fromCharCode((31 & s) << 6 | 63 & a), i += 2)
: (a = t.charCodeAt(i + 1),
e = t.charCodeAt(i + 2),
n += String.fromCharCode((15 & s) << 12 | (63 & a) << 6 | 63 & e),
i += 3); // end of ternary operators
return n;
}
}
Now it is clearer and we see statements separated by commas inside of the two ternary operators that are used. Commas are used to execute multiple things in the same expression, e.g. (n += String.fromCharCode(s), i++) will increase n and also i. In this case, it is better to move those outside of a ternary and into a normal if statement like this:
function myFunction(t) {
for (var e, r = t.length, n = "", i = 0, s = 0, a = 0; i < r;) {
const firstCheck = (s = t.charCodeAt(i)) < 128;
const secondCheck = s > 191 && s < 224;
if (firstCheck) {
n += String.fromCharCode(s);
i++;
} else if (secondCheck) {
// This is originally: (a = t.charCodeAt(i + 1), n += String.fromCharCode((31 & s) << 6 | 63 & a), i += 2);
a = t.charCodeAt(i + 1);
n += String.fromCharCode((31 & s) << 6 | 63 & a);
i += 2;
} else {
// this is originally:
// (a = t.charCodeAt(i + 1),
// e = t.charCodeAt(i + 2),
// n += String.fromCharCode((15 & s) << 12 | (63 & a) << 6 | 63 & e),
// i += 3);
a = t.charCodeAt(i + 1);
e = t.charCodeAt(i + 2);
n += String.fromCharCode((15 & s) << 12 | (63 & a) << 6 | 63 & e);
i += 3;
}
return n;
}
}
So basically break it down and take it step by step to understand it, then you can change it because you understand it.

how to generate SHA256 with 32 bytes for a given string?

I am looking for javascript code for generating a SHA256 with 32 bytes for a given string
this code is suppose to work inside a browser so it should work without any dependencies
so far I found the following function that gives 64 bytes SHA256:
/**
* Secure Hash Algorithm (SHA256)
* http://www.webtoolkit.info/
* Original code by Angel Marin, Paul Johnston
**/
function SHA256(s){
var chrsz = 8;
var hexcase = 0;
function safe_add (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
function S (X, n) { return ( X >>> n ) | (X << (32 - n)); }
function R (X, n) { return ( X >>> n ); }
function Ch(x, y, z) { return ((x & y) ^ ((~x) & z)); }
function Maj(x, y, z) { return ((x & y) ^ (x & z) ^ (y & z)); }
function Sigma0256(x) { return (S(x, 2) ^ S(x, 13) ^ S(x, 22)); }
function Sigma1256(x) { return (S(x, 6) ^ S(x, 11) ^ S(x, 25)); }
function Gamma0256(x) { return (S(x, 7) ^ S(x, 18) ^ R(x, 3)); }
function Gamma1256(x) { return (S(x, 17) ^ S(x, 19) ^ R(x, 10)); }
function core_sha256 (m, l) {
var K = new Array(0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0xFC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x6CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2);
var HASH = new Array(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19);
var W = new Array(64);
var a, b, c, d, e, f, g, h, i, j;
var T1, T2;
m[l >> 5] |= 0x80 << (24 - l % 32);
m[((l + 64 >> 9) << 4) + 15] = l;
for ( var i = 0; i<m.length; i+=16 ) {
a = HASH[0];
b = HASH[1];
c = HASH[2];
d = HASH[3];
e = HASH[4];
f = HASH[5];
g = HASH[6];
h = HASH[7];
for ( var j = 0; j<64; j++) {
if (j < 16) W[j] = m[j + i];
else W[j] = safe_add(safe_add(safe_add(Gamma1256(W[j - 2]), W[j - 7]), Gamma0256(W[j - 15])), W[j - 16]);
T1 = safe_add(safe_add(safe_add(safe_add(h, Sigma1256(e)), Ch(e, f, g)), K[j]), W[j]);
T2 = safe_add(Sigma0256(a), Maj(a, b, c));
h = g;
g = f;
f = e;
e = safe_add(d, T1);
d = c;
c = b;
b = a;
a = safe_add(T1, T2);
}
HASH[0] = safe_add(a, HASH[0]);
HASH[1] = safe_add(b, HASH[1]);
HASH[2] = safe_add(c, HASH[2]);
HASH[3] = safe_add(d, HASH[3]);
HASH[4] = safe_add(e, HASH[4]);
HASH[5] = safe_add(f, HASH[5]);
HASH[6] = safe_add(g, HASH[6]);
HASH[7] = safe_add(h, HASH[7]);
}
return HASH;
}
function str2binb (str) {
var bin = Array();
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz) {
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (24 - i % 32);
}
return bin;
}
function Utf8Encode(string) {
string = string.replace(/\r\n/g,'\n');
var utftext = '';
for (var n = 0; n < string.length; n++) {
var c = string.charCodeAt(n);
if (c < 128) {
utftext += String.fromCharCode(c);
}
else if((c > 127) && (c < 2048)) {
utftext += String.fromCharCode((c >> 6) | 192);
utftext += String.fromCharCode((c & 63) | 128);
}
else {
utftext += String.fromCharCode((c >> 12) | 224);
utftext += String.fromCharCode(((c >> 6) & 63) | 128);
utftext += String.fromCharCode((c & 63) | 128);
}
}
return utftext;
}
function binb2hex (binarray) {
var hex_tab = hexcase ? '0123456789ABCDEF' : '0123456789abcdef';
var str = '';
for(var i = 0; i < binarray.length * 4; i++) {
str += hex_tab.charAt((binarray[i>>2] >> ((3 - i % 4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((3 - i % 4)*8 )) & 0xF);
}
return str;
}
s = Utf8Encode(s);
return binb2hex(core_sha256(str2binb(s), s.length * chrsz));
}
The result you're receiving actually are 32 bytes.
Let's have a look at an example:
console.log(SHA256("test"));
returns:
9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08
What we see is a string representation of 32 bytes which results in 64 bytes of individual chars. However a group of two chars is actually one byte, ranging from 0 up to 255 in decimal.
9f (hex) -> 10011111 (binary) -> 159 (decimal)
86 (hex) -> 10000110 (binary) -> 134 (decimal)
and so on.
If you want to store the 64 character sequence as 'real' 32 bytes, you need to convert pairs of two characters to an 8-bit unsigned integer value (0-255) and put those in a JavaScript typed array.
For example ...
let hexString = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08";
let unsignedIntegers = hexString.match(/[\dA-F]{2}/gi).map(function(s) {
return parseInt(s, 16);
});
let typedArray = new Uint8Array(unsignedIntegers);
console.log(typedArray);
... gives a 32 bytes Uint8Array.

Integrating Old Javascript Functions into React-Native

I'm currently using this package of Javascript on my website. And I need to be able to use it in the app that I'm trying to build (I'm VERY new to react-native--a PHP, javascript developer here).
How do I integrate it? Do I have to convert it or is it possible to just include it and call the functions? I have no idea. Sigh. I've searched but I didn't find anything that answered my question--though that could be because I'm new to RN and don't know what the heck I'm doing :(.
Thanks in advance for your advice!
To use the functions, I currently just call them with the second line in Javascript:
var strINeedHashed = "Hash Me Please";
var hash = hex_sha256(strINeedHashed);
Here's the JS I need access to:
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
function safe_add (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
function S (X, n) {return ( X >>> n ) | (X << (32 - n));}
function R (X, n) {return ( X >>> n );}
function Ch(x, y, z) {return ((x & y) ^ ((~x) & z));}
function Maj(x, y, z) {return ((x & y) ^ (x & z) ^ (y & z));}
function Sigma0256(x) {return (S(x, 2) ^ S(x, 13) ^ S(x, 22));}
function Sigma1256(x) {return (S(x, 6) ^ S(x, 11) ^ S(x, 25));}
function Gamma0256(x) {return (S(x, 7) ^ S(x, 18) ^ R(x, 3));}
function Gamma1256(x) {return (S(x, 17) ^ S(x, 19) ^ R(x, 10));}
function core_sha256 (m, l) {
var K = new Array(0x428A2F98,0x71374491,0xB5C0FBCF,0xE9B5DBA5,0x3956C25B,0x59F111F1,0x923F82A4,0xAB1C5ED5,0xD807AA98,0x12835B01,0x243185BE,0x550C7DC3,0x72BE5D74,0x80DEB1FE,0x9BDC06A7,0xC19BF174,0xE49B69C1,0xEFBE4786,0xFC19DC6,0x240CA1CC,0x2DE92C6F,0x4A7484AA,0x5CB0A9DC,0x76F988DA,0x983E5152,0xA831C66D,0xB00327C8,0xBF597FC7,0xC6E00BF3,0xD5A79147,0x6CA6351,0x14292967,0x27B70A85,0x2E1B2138,0x4D2C6DFC,0x53380D13,0x650A7354,0x766A0ABB,0x81C2C92E,0x92722C85,0xA2BFE8A1,0xA81A664B,0xC24B8B70,0xC76C51A3,0xD192E819,0xD6990624,0xF40E3585,0x106AA070,0x19A4C116,0x1E376C08,0x2748774C,0x34B0BCB5,0x391C0CB3,0x4ED8AA4A,0x5B9CCA4F,0x682E6FF3,0x748F82EE,0x78A5636F,0x84C87814,0x8CC70208,0x90BEFFFA,0xA4506CEB,0xBEF9A3F7,0xC67178F2);
var HASH = new Array(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19);
var W = new Array(64);
var a, b, c, d, e, f, g, h, i, j;
var T1, T2;
/* append padding */
m[l >> 5] |= 0x80 << (24 - l % 32);
m[((l + 64 >> 9) << 4) + 15] = l;
for ( var i = 0; i<m.length; i+=16 ) {
a = HASH[0]; b = HASH[1]; c = HASH[2]; d = HASH[3]; e = HASH[4]; f = HASH[5]; g = HASH[6]; h = HASH[7];
for ( var j = 0; j<64; j++) {
if (j < 16) W[j] = m[j + i];
else W[j] = safe_add(safe_add(safe_add(Gamma1256(W[j - 2]), W[j - 7]), Gamma0256(W[j - 15])), W[j - 16]);
T1 = safe_add(safe_add(safe_add(safe_add(h, Sigma1256(e)), Ch(e, f, g)), K[j]), W[j]);
T2 = safe_add(Sigma0256(a), Maj(a, b, c));
h = g; g = f; f = e; e = safe_add(d, T1); d = c; c = b; b = a; a = safe_add(T1, T2);
}
HASH[0] = safe_add(a, HASH[0]); HASH[1] = safe_add(b, HASH[1]); HASH[2] = safe_add(c, HASH[2]); HASH[3] = safe_add(d, HASH[3]); HASH[4] = safe_add(e, HASH[4]); HASH[5] = safe_add(f, HASH[5]); HASH[6] = safe_add(g, HASH[6]); HASH[7] = safe_add(h, HASH[7]);
}
return HASH;
}
function str2binb (str) {
var bin = Array();
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (24 - i%32);
return bin;
}
function binb2hex (binarray) {
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for (var i = 0; i < binarray.length * 4; i++) {
str += hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8+4)) & 0xF) + hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8 )) & 0xF);
}
return str;
}
function hex_sha256(s){return binb2hex(core_sha256(str2binb(s),s.length * chrsz));}
if that is working javascript, you just add it to your project's code.
Fastest, a bit ugly way, is to include that code in the file where you using it. More proper way would be to make of file for that and include es6 exports for functions that you need to access.
React Native uses JavaScript, so in theory if you function is written in JavaScript you should have no issues integrating it into your React Native project. Just simply add your old function to the React Native project and just call it as you normally would :)

Failed to decode base64 in javascript

I receive an id_token as part of my current href. It is encoded in base64. I try to decode it using atob(extractedIdToken), but get the following error:
Failed to execute 'atob' on 'Window': The string to be decoded is not correctly encoded
When I copy and paste the extracted id_token in my code and go to an online decoding site, it decodes correctly. Do you have suggestion?
I always use this to decode and encode in Base64, try it
var Base64 = {
_keyStr: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",
encode: function (e) {
var t = "";
var n, r, i, s, o, u, a;
var f = 0;
e = Base64._utf8_encode(e);
while (f < e.length) {
n = e.charCodeAt(f++);
r = e.charCodeAt(f++);
i = e.charCodeAt(f++);
s = n >> 2;
o = (n & 3) << 4 | r >> 4;
u = (r & 15) << 2 | i >> 6;
a = i & 63;
if (isNaN(r)) {
u = a = 64
} else if (isNaN(i)) {
a = 64
}
t = t + this._keyStr.charAt(s) + this._keyStr.charAt(o) + this._keyStr.charAt(u) + this._keyStr.charAt(a)
}
return t
},
decode: function (e) {
var t = "";
var n, r, i;
var s, o, u, a;
var f = 0;
e = e.replace(/[^A-Za-z0-9+/=]/g, "");
while (f < e.length) {
s = this._keyStr.indexOf(e.charAt(f++));
o = this._keyStr.indexOf(e.charAt(f++));
u = this._keyStr.indexOf(e.charAt(f++));
a = this._keyStr.indexOf(e.charAt(f++));
n = s << 2 | o >> 4;
r = (o & 15) << 4 | u >> 2;
i = (u & 3) << 6 | a;
t = t + String.fromCharCode(n);
if (u != 64) {
t = t + String.fromCharCode(r)
}
if (a != 64) {
t = t + String.fromCharCode(i)
}
}
t = Base64._utf8_decode(t);
return t
},
_utf8_encode: function (e) {
e = e.replace(/rn/g, "n");
var t = "";
for (var n = 0; n < e.length; n++) {
var r = e.charCodeAt(n);
if (r < 128) {
t += String.fromCharCode(r)
} else if (r > 127 && r < 2048) {
t += String.fromCharCode(r >> 6 | 192);
t += String.fromCharCode(r & 63 | 128)
} else {
t += String.fromCharCode(r >> 12 | 224);
t += String.fromCharCode(r >> 6 & 63 | 128);
t += String.fromCharCode(r & 63 | 128)
}
}
return t
},
_utf8_decode: function (e) {
var t = "";
var n = 0;
var r = c1 = c2 = 0;
while (n < e.length) {
r = e.charCodeAt(n);
if (r < 128) {
t += String.fromCharCode(r);
n++
} else if (r > 191 && r < 224) {
c2 = e.charCodeAt(n + 1);
t += String.fromCharCode((r & 31) << 6 | c2 & 63);
n += 2
} else {
c2 = e.charCodeAt(n + 1);
c3 = e.charCodeAt(n + 2);
t += String.fromCharCode((r & 15) << 12 | (c2 & 63) << 6 | c3 & 63);
n += 3
}
}
return t
}
};
Thanks all for your answers. I ended up moving the Base64 decoding process to the Java backend using the java package: java.util.Base64.
In my case, the issue was that the encoded string should have been treated as segments. Just separate the code with the separator "." or whatever your case is. Then, decode only the different parts separately. This resolved my issue and I was able to decode using window.atob(). Hope this helps.

Categories