How to decode byte of array data using javascript [duplicate] - javascript

How do I convert a byte array into a string?
I have found these functions that do the reverse:
function string2Bin(s) {
var b = new Array();
var last = s.length;
for (var i = 0; i < last; i++) {
var d = s.charCodeAt(i);
if (d < 128)
b[i] = dec2Bin(d);
else {
var c = s.charAt(i);
alert(c + ' is NOT an ASCII character');
b[i] = -1;
}
}
return b;
}
function dec2Bin(d) {
var b = '';
for (var i = 0; i < 8; i++) {
b = (d%2) + b;
d = Math.floor(d/2);
}
return b;
}
But how do I get the functions working the other way?
Thanks.
Shao

You need to parse each octet back to number, and use that value to get a character, something like this:
function bin2String(array) {
var result = "";
for (var i = 0; i < array.length; i++) {
result += String.fromCharCode(parseInt(array[i], 2));
}
return result;
}
bin2String(["01100110", "01101111", "01101111"]); // "foo"
// Using your string2Bin function to test:
bin2String(string2Bin("hello world")) === "hello world";
Edit: Yes, your current string2Bin can be written more shortly:
function string2Bin(str) {
var result = [];
for (var i = 0; i < str.length; i++) {
result.push(str.charCodeAt(i).toString(2));
}
return result;
}
But by looking at the documentation you linked, I think that the setBytesParameter method expects that the blob array contains the decimal numbers, not a bit string, so you could write something like this:
function string2Bin(str) {
var result = [];
for (var i = 0; i < str.length; i++) {
result.push(str.charCodeAt(i));
}
return result;
}
function bin2String(array) {
return String.fromCharCode.apply(String, array);
}
string2Bin('foo'); // [102, 111, 111]
bin2String(string2Bin('foo')) === 'foo'; // true

ES6 update
Now, string 'foo' also equals
String.fromCharCode(...[102, 111, 111])
Original answer
Simply apply your byte array to String.fromCharCode. For example
String.fromCharCode.apply(null, [102, 111, 111])
equals 'foo'.
MDN docs here.
Caveat: works for arrays shorter than 65535 - MDN docs here.

Try the new Text Encoding API:
// create an array view of some valid bytes
let bytesView = new Uint8Array([104, 101, 108, 108, 111]);
console.log(bytesView);
// convert bytes to string
// encoding can be specfied, defaults to utf-8 which is ascii.
let str = new TextDecoder().decode(bytesView);
console.log(str);
// convert string to bytes
// encoding can be specfied, defaults to utf-8 which is ascii.
let bytes2 = new TextEncoder().encode(str);
// look, they're the same!
console.log(bytes2);
console.log(bytesView);

This should work:
String.fromCharCode(...array);
Or
String.fromCodePoint(...array)

That string2Bin can be written even more succinctly, and without any loops, to boot!
function string2Bin ( str ) {
return str.split("").map( function( val ) {
return val.charCodeAt( 0 );
} );
}

String to byte array: "FooBar".split('').map(c => c.charCodeAt(0));
Byte array to string: [102, 111, 111, 98, 97, 114].map(c => String.fromCharCode(c)).join('');

I think this would be more efficient:
function toBinString (arr) {
var uarr = new Uint8Array(arr.map(function(x){return parseInt(x,2)}));
var strings = [], chunksize = 0xffff;
// There is a maximum stack size. We cannot call String.fromCharCode with as many arguments as we want
for (var i=0; i*chunksize < uarr.length; i++){
strings.push(String.fromCharCode.apply(null, uarr.subarray(i*chunksize, (i+1)*chunksize)));
}
return strings.join('');
}

Even if I'm a bit late, I thought it would be interesting for future users to share some one-liners implementations I did using ES6.
One thing that I consider important depending on your environment or/and what you will do with with the data is to preserve the full byte value. For example, (5).toString(2) will give you 101, but the complete binary conversion is in reality 00000101, and that's why you might need to create a leftPad implementation to fill the string byte with leading zeros. But you may not need it at all, like other answers demonstrated.
If you run the below code snippet, you'll see the first output being the conversion of the abc string to a byte array and right after that the re-transformation of said array to it's corresponding string.
// For each byte in our array, retrieve the char code value of the binary value
const binArrayToString = array => array.map(byte => String.fromCharCode(parseInt(byte, 2))).join('')
// Basic left pad implementation to ensure string is on 8 bits
const leftPad = str => str.length < 8 ? (Array(8).join('0') + str).slice(-8) : str
// For each char of the string, get the int code and convert it to binary. Ensure 8 bits.
const stringToBinArray = str => str.split('').map(c => leftPad(c.charCodeAt().toString(2)))
const array = stringToBinArray('abc')
console.log(array)
console.log(binArrayToString(array))

If your array is encoded in UTF-8 and you can't use the TextDecoder API because it is not supported on IE:
You can use the FastestSmallestTextEncoderDecoder polyfill recommended by the Mozilla Developer Network website;
You can use this function also provided at the MDN website:
function utf8ArrayToString(aBytes) {
var sView = "";
for (var nPart, nLen = aBytes.length, nIdx = 0; nIdx < nLen; nIdx++) {
nPart = aBytes[nIdx];
sView += String.fromCharCode(
nPart > 251 && nPart < 254 && nIdx + 5 < nLen ? /* six bytes */
/* (nPart - 252 << 30) may be not so safe in ECMAScript! So...: */
(nPart - 252) * 1073741824 + (aBytes[++nIdx] - 128 << 24) + (aBytes[++nIdx] - 128 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 247 && nPart < 252 && nIdx + 4 < nLen ? /* five bytes */
(nPart - 248 << 24) + (aBytes[++nIdx] - 128 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 239 && nPart < 248 && nIdx + 3 < nLen ? /* four bytes */
(nPart - 240 << 18) + (aBytes[++nIdx] - 128 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 223 && nPart < 240 && nIdx + 2 < nLen ? /* three bytes */
(nPart - 224 << 12) + (aBytes[++nIdx] - 128 << 6) + aBytes[++nIdx] - 128
: nPart > 191 && nPart < 224 && nIdx + 1 < nLen ? /* two bytes */
(nPart - 192 << 6) + aBytes[++nIdx] - 128
: /* nPart < 127 ? */ /* one byte */
nPart
);
}
return sView;
}
let str = utf8ArrayToString([50,72,226,130,130,32,43,32,79,226,130,130,32,226,135,140,32,50,72,226,130,130,79]);
// Must show 2H₂ + O₂ ⇌ 2H₂O
console.log(str);

If you are using node.js you can do this:
yourByteArray.toString('base64');

Too late to answer but if your input is in form of ASCII bytes, then you could try this solution:
function convertArrToString(rArr){
//Step 1: Convert each element to character
let tmpArr = new Array();
rArr.forEach(function(element,index){
tmpArr.push(String.fromCharCode(element));
});
//Step 2: Return the string by joining the elements
return(tmpArr.join(""));
}
function convertArrToHexNumber(rArr){
return(parseInt(convertArrToString(rArr),16));
}

I had some decrypted byte arrays with padding characters and other stuff I didn't need, so I did this (probably not perfect, but it works for my limited use)
var junk = String.fromCharCode.apply(null, res).split('').map(char => char.charCodeAt(0) <= 127 && char.charCodeAt(0) >= 32 ? char : '').join('');

> const stringToBin = (str) => [...str].map(item=>item.charCodeAt())
> undefined
> stringToBin('hello')
> (5) [104, 101, 108, 108, 111]
> const binToString = (array) => String.fromCharCode(...array)
> undefined
> binToString(stringToBin('hello'))
> 'hello'

What you are looking for is String.fromCharCode
What you want to do is loop through the array of bytes (represented as integers), create the string equivalent and add it to the result:
function bin2String(array) {
var result = "";
for (const char of array) {
result += String.fromCharCode(char);
}
return result;
}
console.log(bin2String([116, 104, 101, 32, 114, 101, 115, 117, 108, 116]));
You can also use the Array.Map function to convert the array of bytes into an array of strings, then join them all.
function string2Bin(array) {
return array.map(byte => String.fromCharCode(byte)).join("");
}
console.log(string2Bin([116, 104, 101, 32, 114, 101, 115, 117, 108, 116]));

UPDATE
#rosberg-linhares posted best solution so far to handle UTF8.
Didn't find any solution that would work with UTF-8 characters. String.fromCharCode is good until you meet 2 byte character.
For example word Hüser can come over the wire in form of arraybuffer as [0x48,0xc3,0xbc,0x73,0x65,0x72] (e.g. through websocket connection)
But if you go through it with String.fromCharCode you will have Hüser as each byte will be converted to a char separately, and letter ü is encoded in two bytes.
Solution
Currently I'm using following solution:
function pad(n) { return (n.length < 2 ? '0' + n : n); }
function decodeUtf8(data) {
return decodeURIComponent(
data.map(byte => ('%' + pad(byte.toString(16)))).join('')
);
}

The simplest solution I've found is:
var text = atob(byteArray);

Related

Compress alphanumeric characters with a limit of 8 bytes hex character

My application has a limit input of 16 hex characters to represent 8 ASCII characters. The only characters I need are A-Z and 0-9. I do not need lower cases or any non-standard alphanumeric characters. A 2 character hex can represent all of the characters on the keyboard, but I don't need all of it.
Is there some type of library that can compress the alphanumeric string to be able to fit it in 16 hex characters?
Example:
12345678 = 31 32 33 34 35 36 37 38
I want to be able to pack in more alphanumeric characters like this:
ABCDEFGHI12345678 = 31 32 33 34 35 36 37 38
Is there a library that does this?
After some experimentation, here's my compression algorithm.
It takes a 6-character message containing A-Z and 0-9 and turns it into 8 hex characters.
It can be used twice to compress as 12-character string into 16 hex characters.
If my math is correct, this is the best compression you can achieve, because 16^16 is almost equal to 36^(12.38), which means that you can fit at most 12 characters of a 36-character set (A-Z, 0-9) into 16 hex characters.
I hope it'll be useful for your application.
const testMessage = '6CHARS';
function charToInt(char, shift) {
let charCode = char.charCodeAt(0) - 48;
if (charCode > 9) {
charCode -= 7;
}
charCode *= 36 ** shift;
return charCode;
}
function intToChar(int, shift) {
let number = int / (36 ** shift);
if (number > 9) {
number += 7;
}
number += 48;
return String.fromCharCode(number);
}
function stringToInt(stringWithSixCharacters) {
let result = 0;
for (let index = 0; index < 6; index++) {
result += charToInt(stringWithSixCharacters.charAt(index), 5 - index);
}
return result;
}
function intToString(intFromSixCharacters) {
let number = intFromSixCharacters;
let result = '';
for (let index = 0; index < 6; index++) {
const mod = number % (36 ** (index + 1));
const char = intToChar(mod, index);
result = char + result;
number = number - mod;
}
return result;
}
function intToHex(int) {
return int.toString(16).padStart(8, '0').toUpperCase();
}
function hexToInt(hex) {
return parseInt(messageHex, 16);
}
console.log('testMessage:', testMessage);
const messageCode = stringToInt(testMessage);
const messageHex = intToHex(messageCode);
console.log('messageCode:', messageCode);
console.log('hex:', messageHex); // prints '16DFB4C8'
const extractedMessageCode = hexToInt(messageHex);
const extractedMessage = intToString(extractedMessageCode);
console.log('extractedMessageCode:', extractedMessageCode);
console.log('extractedMessage:', extractedMessage); // prints '6CHARS'

Byte Array to Uint64 as a String

Let's think about the following situation.
The Go routine creates a byte array where packs a Uint64 number 5577006791947779410 in 8 bytes Big Endian [77, 101, 130, 33, 7, 252, 253, 82].
In JavaScript code I receive these bytes as Uint8Array. We know that JavaScript doesn't currently support Uint64 as safe numeric type and cannot perform bitwise operations on integers larger than 32 bits, so things like buf[0] << 56 will never work.
So what is the process of decoding these bytes directly to numeric string "5577006791947779410"?
P.S. I know there are plenty of libraries for working with big integers in JavaScript, but generally they are huge and provide lots of mathematical operations, which I don't need here. I am looking for a simple modern straightforward solution for just decoding BE-packed Uint64 and Int64 bytes to numeric string. Do you have anything in mind?
EDIT: For converting (U)int64 I would now definitely recommend #LS_DEV's solution. I would use my solution only when having an unknown or larger amount of bytes.
I started with https://stackoverflow.com/a/21668344/3872370 and modified it:
function Int64ToString(bytes, isSigned) {
const isNegative = isSigned && bytes.length > 0 && bytes[0] >= 0x80;
const digits = [];
bytes.forEach((byte, j) => {
if(isNegative)
byte = 0x100 - (j == bytes.length - 1 ? 0 : 1) - byte;
for(let i = 0; byte > 0 || i < digits.length; i++) {
byte += (digits[i] || 0) * 0x100;
digits[i] = byte % 10;
byte = (byte - digits[i]) / 10;
}
});
return (isNegative ? '-' : '') + digits.reverse().join('');
}
const tests = [
{
inp: [77, 101, 130, 33, 7, 252, 253, 82],
signed: false,
expectation: '5577006791947779410'
},
{
inp: [255, 255, 255, 255, 255, 255, 255, 255],
signed: true,
expectation: '-1'
},
];
tests.forEach(test => {
const result = Int64ToString(test.inp, test.signed);
console.log(`${result} ${result !== test.expectation ? '!' : ''}=== ${test.expectation}`);
});
At first the sign gets calculated by checking if the topmost bit is set (bytes[0] > 128). For negative numbers the bits have to be negated (255 - byte) and 1 has to be added to the number (therefore 256 instead of 255 for the last byte).
The basic idea of the forEach loop is to split each byte into its decimal digits (byte % 10 and calculating the overhead (byte - digits[i]) / 10 resp. Math.floor(byte / 10) for the next digit). For the next byte one has to add the shifted result of the last bytes' digits (byte += digits[i] * 256 resp. digits[i] << 8).
That code is optimized for shortness, simplicity and flexibility. If you are working with strings instead of bytes or numbers and don't want to use any libraries it appears that conversion performance doesn't really matter. Otherwise the function could be optimized for performance: Up to four bytes could be treated simultaneously, one only has to replace the 0x100 and 0x80, additionally (with only two byte groups remaining in the case of an (U)Int64) the forEach loop can be unrolled. Grouping the decimal digits probably won't increase performance since the resulting strings would have to be padded with zeros, introducing the need of removing leading zeros in the end result.
Another approach: divide problem in two uint32 to keep calculations manageable.
Consider lower and higher uint32 (l and h). Full number could be written as h*0x100000000+l. Considering decimal, one could also consider lower 9 digits and remaining higher digits (ld and hd): ld=(h*0x100000000+l)%1000000000 and hd=(h*0x100000000+l)/1000000000. With some arithmetic and algebra operators properties, one can break those operation into safe "half" 64bit operations and compose string at ending.
function int64_to_str(a, signed) {
const negative = signed && a[0] >= 128;
const H = 0x100000000, D = 1000000000;
let h = a[3] + a[2] * 0x100 + a[1] * 0x10000 + a[0]*0x1000000;
let l = a[7] + a[6] * 0x100 + a[5] * 0x10000 + a[4]*0x1000000;
if(negative) {
h = H - 1 - h;
l = H - l;
}
const hd = Math.floor(h * H / D + l / D);
const ld = (((h % D) * (H % D)) % D + l) % D;
const ldStr = ld + '';
return (negative ? '-' : '') +
(hd != 0 ? hd + '0'.repeat(9 - ldStr.length) : '') + ldStr;
}
let result = int64_to_str([77, 101, 130, 33, 7, 252, 253, 82], false);
let expectation = '5577006791947779410';
console.log(result + ' ' + (result === expectation ? '===' : '!==') + ' ' + expectation);
result = int64_to_str([255, 255, 255, 255, 255, 255, 255, 255], true);
expectation = '-1';
console.log(result + ' ' + (result === expectation ? '===' : '!==') + ' ' + expectation);
As detailed in the comments that algorithm works even though (h % D) * (H % D) can get larger than Number.MAX_SAFE_INTEGER, because the lost bits were nevertheless zero.
Here is my solution. The general strategy is this:
If number is negative, negate it using 2's complement and add negative sign back in at the end
Represent arbitrary size numbers as LE arrays of digits from 0 to 9
For each byte in the Uint8Array (from most to least significant), multiply running total by 256 and add to it the value of the new byte
To multiply a number by 256, double it 8 times (since 2 ** 8 == 256)
To add two numbers, use the elementary school algorithm:
Start with least significant digit
Add corresponding digits of the two numbers
Resulting digit is the sum mod 10; carry is 1 if the sum is 10 or more, otherwise 0
Continue adding corresponding digits with the carry until we add the most significant digits and carry is 0
A few notes about shorthand:
n1[i] || 0 gets the ith digit of n1. If this is past the end of i, we treat it as a 0 (imagine numbers represented with infinite 0s in front of them). Same with n2.
added > 9 produces a boolean, which is automatically converted to a number (1 if added >= 10, 0 otherwise)
i < n1.length || i < n2.length || carry checks whether there are more digits in either of the addends or the carry is still nonzero
String(b).split('').map(Number).reverse() converts, e.g. 100 to '100', then ['1', '0', '0'], then [1, 0, 0], then [0, 0, 1] so it is represented in LE base-10
result.reverse().join('') converts, e.g. [0, 0, 1] to [1, 0, 0], then '100'
Code:
function add(n1, n2) {
const sum = []
let carry = 0
for (let i = 0; i < n1.length || i < n2.length || carry; i++) {
const added = (n1[i] || 0) + (n2[i] || 0) + carry
sum[i] = added % 10
carry = added > 9 //floor(added / 10)
}
return sum
}
function times256(n1) {
for (let i = 8; i; i--) n1 = add(n1, n1)
return n1
}
function toString(buffer) {
const isNegative = buffer[0] & 128 //check if high bit is set
if (isNegative) { //convert to positive, using 2's complement
buffer = buffer.map(b => ~b) //invert all bits
let i = buffer.length - 1
while (buffer[i] === 255) { //add 1 to the number, carrying if necessary
buffer[i] = 0
i--
}
buffer[i]++
}
const result = buffer.reduce((sum, b) =>
add(
times256(sum), //multiply sum by 256
String(b).split('').map(Number).reverse() //then add b
),
[]
)
const stringResult = result.reverse().join('')
if (isNegative) return '-' + stringResult
else return stringResult
}
This does the UInt64 version - I can't imagine that an interchange is that difficult:
<!DOCTYPE html>
<html>
<body>
<span id='out1'></span>
<br>
<span id='out2'></span>
<br>
<span id='out3'></span>
</body>
<script>
fnl='';
be=[77, 101, 130, 33, 7, 252, 253, 82];
function paddedBinary(n) {
pad='';
sv=128;
while (sv>n) {pad+='0';sv/=2;}
return pad+n.toString(2);
}
for (let i=0;i<8;i++)
fnl+=paddedBinary(be[i]);
out1.textContent=fnl;
dec=new Array(64);
for (let i=0;i<64;i++) dec[i]=new Array(21).fill(0);
function make2s() {
dec[0][0]=1;
for (let i=1;i<64;i++) {
for (let j=0;j<21;j++)
dec[i][j]=2*dec[i-1][j];
for (let j=0;j<21;j++)
if (dec[i][j]>9) {
dec[i][j]-=10;
dec[i][j+1]++;
}
}
}
function int64add(v1,v2) {
var res=new Array(21).fill(0);
for (let i=0;i<21;i++)
res[i]=v1[i]+v2[i];
for (let i=0;i<21;i++)
if (res[i]>9) {
res[i]-=10;
res[i+1]++;
}
return res;
}
make2s();
for (let i=0;i<64;i++)
out2.textContent+=dec[i]+' :: ';
cv=new Array(21).fill(0);
for (let i=0;i<fnl.length;i++)
if (fnl[i]=='1') cv=int64add(cv,dec[63-i]);
out3.textContent=cv;
</script>
</html>
The paddedBinary() function returns a 'full' 8-bit binary number, so we can create 'fnl' as a 64-bit string of the BigEndian.
As JavaScript doesn't do full 64-bit arithmetic, I create the dec[] array to store each power of 2 as individual digits, by doubling each previous digit and smoothing the tens out.
Then all is left is to add the bits that we want, which uses a similar method to smooth out the tens.
(and the answer is given in reverse!)

Converting between base-2 (binary), base-16 (hexadecimal), and base-64 in Javascript

Converting ASCII strings to and from hex and base-64 is fairly simple in Javascript. However, dealing with binary and UTF-encoded strings throws a wrench into this.
Javascript's built-in atob() and btoa() functions do not work with UTF-encoded strings, which is a major problem for strings coming from elements (e.g. inputs) in an HTML document that declares a UTF-8 charset. Additionally, it seems that base-64 can only be directly encoded using strings that are already in ASCII-encoded hex, with no direct way provided to convert a binary string (either ASCII or UTF-8-encoded) to base-64.
To compound the issue further, it appears that nearly all of the questions posted on SO and elsewhere assume that "binary string" is equivalent to binary data represented in a hex-encoded string, as opposed to a string consisting of base-2 numbers.
Given a UTF-8 or ASCII-encoded string consisting of binary, hex, or base-64 characters, how would you convert between the three?
This is the answer I have come up with so far. I have not yet looked into eliminating the intermediary steps for hex ⇌ base-64, so those functions involve a conversion to binary.
// Binary → Hex
binToHex = (value) => {
let hexString = '';
for (let i=0; i < (value.length)/4; i++) {
let piece = value.substr(4*i, 4);
hexString += parseInt(piece, 2).toString(16);
}
return hexString;
}
// Binary → Base-64
binToB64 = (value) => {
let arrayBuffer = new ArrayBuffer(Math.ceil(value.length/8))
,decArray
,uint8DecArray
bitsToDecArray = (bits) => {
let decArray = [];
for (let i=0; i< Math.ceil(value.length/8); i++) {
let length = 8*i+8 > value.length ? value.length - 8*i : 8
,bin = value.substr(8*i, length)
decArray.push(parseInt(bin, 2).toString(10));
}
return decArray;
}
decArray = bitsToDecArray(value);
uint8DecArray = new Uint8Array(decArray);
// From http://stackoverflow.com/a/7372816/4111381
base64ArrayBuffer = (arrayBuffer) => {
let base64 = ''
,encodings = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
,bytes = new Uint8Array(arrayBuffer)
,byteLength = bytes.byteLength
,byteRemainder = byteLength % 3
,mainLength = byteLength - byteRemainder
,a, b, c, d
,chunk
// Main loop deals with bytes in chunks of 3
for (var i = 0; i < mainLength; i = i + 3) {
// Combine the three bytes into a single integer
chunk = (bytes[i] << 16) | (bytes[i + 1] << 8) | bytes[i + 2]
// Use bitmasks to extract 6-bit segments from the triplet
a = (chunk & 16515072) >> 18 // 16515072 = (2^6 - 1) << 18
b = (chunk & 258048) >> 12 // 258048 = (2^6 - 1) << 12
c = (chunk & 4032) >> 6 // 4032 = (2^6 - 1) << 6
d = chunk & 63 // 63 = 2^6 - 1
// Convert the raw binary segments to the appropriate ASCII encoding
base64 += encodings[a] + encodings[b] + encodings[c] + encodings[d]
}
// Deal with the remaining bytes and padding
if (byteRemainder == 1) {
chunk = bytes[mainLength]
a = (chunk & 252) >> 2 // 252 = (2^6 - 1) << 2
// Set the 4 least significant bits to zero
b = (chunk & 3) << 4 // 3 = 2^2 - 1
base64 += encodings[a] + encodings[b] + '=='
} else if (byteRemainder == 2) {
chunk = (bytes[mainLength] << 8) | bytes[mainLength + 1]
a = (chunk & 64512) >> 10 // 64512 = (2^6 - 1) << 10
b = (chunk & 1008) >> 4 // 1008 = (2^6 - 1) << 4
// Set the 2 least significant bits to zero
c = (chunk & 15) << 2 // 15 = 2^4 - 1
base64 += encodings[a] + encodings[b] + encodings[c] + '='
}
return base64
}
return base64ArrayBuffer(uint8DecArray);
}
// Hex → Bin
hexToBin = (value) => {
let binString = '';
for (let i=0; i < value.length; i++) {
let bin = parseInt(value[i], 16).toString(2);
binString += ('0000' + bin).slice(-4);
}
return binString;
}
// Hex → Base-64
hexToB64 = (value) => {
return binToB64(hexToBin(value));
}
// Base-64 → Binary
b64ToBin = (value) => {
let bitString = ''
,base64chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
for(let i = 0; i < value.length; i+=4) {
let segment = '';
for (let j = 0; j < 4; j++) {
console.log(i+j)
if (value[i+j] != '=') {
let bin = base64chars.indexOf(value[i+j]).toString(2)
segment += ('000000' + bin).slice(-6);
}
else segment += '000000';
}
bitString += segment;
}
// Strip ending null bytes
while (bitString.endsWith('00000000')) {
bitString = bitString.substr(0, bitString.length-8);
}
return bitString;
}
// Base-64 → Hex
b64ToHex = (value) => {
return binToHex(b64ToBin(value));
}
You could try using the turbocommons library. Just download the minified js file (turbocommons-es5.zip) and write the following code:
<script src="../yourpathtothelibrary/turbocommons-es5.js"></script>
<script>
var ConversionUtils = org_turbocommons.ConversionUtils;
ConversionUtils.stringToBase64('your binary string here');
</script>
Or you can look at the code on how it is done by the library here:
https://github.com/edertone/TurboCommons/blob/master/TurboCommons-TS/src/main/ts/utils/ConversionUtils.ts
More info here:
https://turboframework.org/en/blog/2022-10-26/encode-decode-base64-strings-javascript-typescript-php
Or you can test it online here:
https://turboframework.org/en/app/stringutils/base64-encode

How can I encode/decode this proprietary checksum (Athena 16C PID controller)?

I have an Athena 16C controller that I am controlling via RS232. Its messaging protocol requires a proprietary checksum:
"CHKSUM: This is a two character Message Code Numbering System, representing the sum of all the ASCII values of all the characters (excluding the START, CHAR, the END CHAR, and the CHKSM themselves) in the message. The sum is computed using the following formula:
CHKSM = SUM(All Message Characters)%256 where % represents the modulus operator."
An example message (from their documentation) is this:
$Ø1Ø1RØ5C1<CR>
and can be broken down as:
$ [START CHAR] 01 [ID] 01 [ZONE] R [TYPE] 05 [PARAM] C1 [CHKSM] <CR> [END CHAR]
I have sent this message to the controller and it works as expected.
I am writing my code in JS and have the following that is supposed to calculate the CHKSM to put at the end of the message:
var sum = 'Ø1Ø1RØ5'
.split('')
.map(function(char) {
return char.charCodeAt(0);
})
.reduce(function(current, previous) {
return previous + current;
});
var chksm = (sum % 256);
console.log(chksm.toString(16));
The checksum should be 'C1' according to the message format. But the calculated sum is 377 which results in a checksum of 121 which equals 79 in hex.
// 0 = 48, 1 = 49, R = 82, 5 = 53 (ASCII values)
// 48 + 49 + 48 + 49 + 82 + 48 + 53 = 377
// 377 % 256 = 121 (decimal) = 79 (hex)
An engineer from Athena sent me the following VB code but I cannot understand the logic, nor the syntax particularly. Is there something basic I am missing with this problem in general?
' Covert the mod % 256 checksum to the 2 chars:
' Will set First and Second chars for encoded value. Pass in the value (Checksum mod 256)
' and where to return the 1st and 2nd chars to.
Public Sub EncodeIt(ByVal Value As Integer, ByRef FirstChar As Integer, ByRef SecondChar As Integer)
If Value > 359 Then 'Z9 = 359, absolute max possible
Value = 359
End If
'Note: backslash '\' means integer divide, not floating point!!
If Value > 99 Then
FirstChar = (Value \ 10) + 65 - 10 '65 = ascii "A"
Else
FirstChar = (Value \ 10) + 48 '48 = ascii "0"
End If
SecondChar = (Value Mod 10) + 48
End Sub
' Convert the two chars received in a message back to normal integer.
' Take the 2 chars and return a decoded integer value
Public Function DecodeIt(ByVal FirstChar As Integer, ByVal SecondChar As Integer) As Integer
'65 = ascii "A", 48 = ascii "0"
If FirstChar > 57 Then '57 = ascii "9"
Return ((FirstChar - 65 + 10) * 10) + (SecondChar - 48)
Else
Return ((FirstChar - 48) * 10) + (SecondChar - 48)
End If
End Function
The encoding from decimal to string is cutom made and not base16. This is why (121).toString(16) is not equal to C1.
From the VBA of your post the encoding/decoding functions should be:
function compute_checksum(message) {
var sum = 0;
for(var i=0; i<message.length; i++)
sum += message.charCodeAt(i);
return sum % 256;
}
function encode_checksum(value) {
value = Math.min(value, 359);
var c1 = ((value / 10) | 0) + (value > 99 ? 55 : 48);
var c2 = (value % 10) + 48;
return String.fromCharCode(c1, c2);
}
function decode_checksum(text) {
var c1 = text.charCodeAt(0);
var c2 = text.charCodeAt(1);
return (c1 > 57 ? c1 - 55 : c1 - 48) * 10 + (c2 - 48)
}
Here is a usage example:
var checksum = compute_checksum('0101R05');
console.log('checksum: ' + checksum);
var CHKSM = encode_checksum(checksum);
console.log('encoded checksum: ' + CHKSM);
console.log('decoded checksum: ' + decode_checksum(CHKSM));
I just went through this for C# and with the help of above, came up with this code that works (for me):
'reqtemp' is the string that contains controller ID#, Zone, Parameter, etc. without the start and checksum characters.
// generate checksum for Athena
int x = 0;
int sl = reqtemp.Length;
int FirstChar = 0; //checksum 1st character
int SecondChar = 0; //checksum 2nd char
string crcr; // crc for requests
for (int c = 0; c < sl; c++)
{
string sel = reqtemp.Substring(c, 1);
x = x + Convert.ToChar(sel);
}
x = x % 256; //modular 256
x = Math.Min(x, 359); // don't allow > 359
if (x > 99)
{ FirstChar = (x / 10) + 65 - 10; }
else
{ FirstChar = (x / 10) + 48; }
SecondChar = (x % 10) + 48;
crcr = Char.ConvertFromUtf32(FirstChar) + Char.ConvertFromUtf32(SecondChar);
// MessageBox.Show(crcr);
string reqtempfull = "$"+ reqtemp + crcr + (char)13;
crc.Text = reqtempfull; //display the full sp string
if (ComPort.IsOpen)
{
ComPort.Write(reqtempfull); // send it`enter code here`
}

Javascript - convert integer to array of bits

I am trying in javascript to convert an integer (which I know will be between 0 and 32), to an array of 0s and 1s. I have looked around but couldn't find something that works..
So, if I have an integer as 22 (binary 10110), I would like to access it as:
Bitarr[0] = 0
Bitarr[1] = 1
Bitarr[2] = 1
Bitarr[3] = 0
Bitarr[4] = 1
Any suggestions?
Many thanks
convert to base 2:
var base2 = (yourNumber).toString(2);
access the characters (bits):
base2[0], base2[1], base2[3], etc...
Short (ES6)
Shortest (32 chars) version which fill last bits by zero. I assume that n is your number, b is base (number of output bits):
[...Array(b)].map((x,i)=>n>>i&1)
let bits = (n,b=32) => [...Array(b)].map((x,i)=>(n>>i)&1);
let Bitarr = bits(22,8);
console.log(Bitarr[0]); // = 0
console.log(Bitarr[1]); // = 1
console.log(Bitarr[2]); // = 1
console.log(Bitarr[3]); // = 0
console.log(Bitarr[4]); // = 1
var a = 22;
var b = [];
for (var i = 0; i < 5; i++)
b[i] = (a >> i) & 1;
alert(b);
Assuming 5 bits (it seemed from your question), so 0 <= a < 32. If you like you can make the 5 larger, upto 32 (bitshifting in JavaScript works with 32 bit integer).
This should do
for(int i = 0; i < 32; ++i)
Bitarr[i] = (my_int >> i) & 1;
You can convert your integer to a binary String like this. Note the base 2 parameter.
var i = 20;
var str = i.toString(2); // 10100
You can access chars in a String as if it were an array:
alert(str[0]); // 1
alert(str[1]); // 0
etc...
Building up on previous answers: you may want your array to be an array of integers, not strings, so here is a one-liner:
(1234).toString(2).split('').map(function(s) { return parseInt(s); });
Note, that shorter version, (11).toString(2).split('').map(parseInt) will not work (chrome), for unknown to me reason it converts "0"s to NaNs
In addition, this code gives 32length array
function get_bits(value){
var base2_ = (value).toString(2).split("").reverse().join("");
var baseL_ = new Array(32 - base2_.length).join("0");
var base2 = base2_ + baseL_;
return base2;
}
1 => 1000000000000000000000000000000
2 => 0100000000000000000000000000000
3 => 1100000000000000000000000000000
You might do as follows;
var n = 1071,
b = Array(Math.floor(Math.log2(n))+1).fill()
.map((_,i,a) => n >> a.length-1-i & 1);
console.log(b);
just for the sake of refernce:
(121231241).toString(2).split('').reverse().map((x, index) => x === '1' ? 1 << index : 0).reverse().filter(x => x > 0).join(' + ');
would give you:
67108864 + 33554432 + 16777216 + 2097152 + 1048576 + 524288 + 65536 + 32768 + 16384 + 4096 + 1024 + 512 + 256 + 128 + 8 + 1

Categories