I am trying to sign the value eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0 with key a1b2c3a4b5c6. The key is hexadecimal.
This should generate signature 8Wspda1l2Z3-hLwvMI_5Q8AQic59oclZAav7kWVtGHw. This signature is provided from my service provider as a sample.
I tried the following:
var signature = Utilities.computeHmacSha256Signature("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0", "a1b2c3a4b5c6");
var encodedSignature = Utilities.base64EncodeWebSafe(signature).replace(/=+$/, '');
but I got different result: vnshuAi4GtWTZ3VxmPgLzJ--V18mm-r4cBUNA0FIvTs.
Since computeHmacSha256Signature(value, key) also allows input to be pair of byte[] (in addition to string pair as an input), I tried the following:
function toUTF8Array(str) {
var utf8 = [];
for (var i = 0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
}
else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8.push(0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
}
return utf8;
}
function resecret(s) {
var bin_secret = "";
var duo = ""
var new_secret_array = [];
var j = 0;
var len=s.length;
var element = 0;
for (var i = 0; i < len; i++) {
duo = s.charAt(i) + s.charAt(i+1);
element = parseInt(duo, 16);
// element = "0x"+duo;
bin_secret = bin_secret + element;
new_secret_array[j] = element;
j++;
i++;
}
return new_secret_array;
}
function test() {
var value="eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0";
var key = "a1b2c3a4b5c6";
var signature = Utilities.computeHmacSha256Signature(toUTF8Array(value),resecret(key));
}
This time, I get an error:
Cannot convert Array to (class)[].
What am I doing wrong here? Has anyone able to run computeHmacSha256Signature(value, key) using byte array?
How do I get the signature 8Wspda1l2Z3-hLwvMI_5Q8AQic59oclZAav7kWVtGHw?
I am encoding an executable in go and trying to decode it in javascript.
Decoding the encoded string in javascript does not result in a matching file. I am able to encode a string like "this is a test string" and decode it in javascript and it works fine. But when i take an executable application and do the same thing, the decoded file is larger than the file before encoding.
What am i doing wrong? Thanks!
Here is the test executable i am using. It is in c++, compile it with g++ and use the output.
#include <iostream>
int main(void) {
char test1[] = "hello";
std::cout << "test1: " << test1 << std::endl;
char test2[] = "world";
std::cout << "test2: " << test2 << std::endl;
char test3[] = "foobar";
std::cout << "test3: " << test3 << std::endl;
return 0;
}
Here is the go app i am using to convert the file to bytes.
package main
import (
"fmt"
"github.com/atotto/clipboard"
"io/ioutil"
)
func main() {
bytes, err := ioutil.ReadFile("/path/to/file/a.out")
if err != nil {
fmt.Println(err)
}
enc := make([]byte, base64.RawStdEncoding.EncodedLen(len(bytes)))
base64.RawStdEncoding.Encode(enc, bytes)
fmt.Println("byte size: ", len(bytes))
fmt.Println("encoded byte size: ", len(enc))
clipboard.WriteAll(string(enc))
}
Here is how i am attempting to decode and save the file in javascript.
let decodedBytes = atob("put the bytes here from your clipboard from running the go app");
fs.writeFileSync(
"/destination/to/save/file",
decodedBytes
);
I figured it out. After some research and reading i found this question and this article. Initially the question did not help me, but after reading that article for some time, I tried a few of the examples and was able to get one of them to work. I was able to get solution 1 to work. Here is the javascript i have now to get this working.
Saved file is exactly the same as the source.
function b64ToUint6(nChr) {
return nChr > 64 && nChr < 91
? nChr - 65
: nChr > 96 && nChr < 123
? nChr - 71
: nChr > 47 && nChr < 58
? nChr + 4
: nChr === 43
? 62
: nChr === 47
? 63
: 0;
}
function base64DecToArr(sBase64, nBlockSize) {
var sB64Enc = sBase64.replace(/[^A-Za-z0-9\+\/]/g, ""),
nInLen = sB64Enc.length,
nOutLen = nBlockSize
? Math.ceil(((nInLen * 3 + 1) >>> 2) / nBlockSize) * nBlockSize
: (nInLen * 3 + 1) >>> 2,
aBytes = new Uint8Array(nOutLen);
for (
var nMod3, nMod4, nUint24 = 0, nOutIdx = 0, nInIdx = 0;
nInIdx < nInLen;
nInIdx++
) {
nMod4 = nInIdx & 3;
nUint24 |= b64ToUint6(sB64Enc.charCodeAt(nInIdx)) << (18 - 6 * nMod4);
if (nMod4 === 3 || nInLen - nInIdx === 1) {
for (nMod3 = 0; nMod3 < 3 && nOutIdx < nOutLen; nMod3++, nOutIdx++) {
aBytes[nOutIdx] = (nUint24 >>> ((16 >>> nMod3) & 24)) & 255;
}
nUint24 = 0;
}
}
return aBytes;
}
let decodedBytes = base64DecToArr("bytes to decode");
fs.writeFileSync(
"/destination/to/save/file",
decodedBytes
);
I have to validate whether a provided parameter is a type of byte array or not using Javascript. How can I achieve this? Please advice.
Because its not clear which type / instance of a Typed-Array you are trying to check, here an universal check. check if byteLength is present, then it should be a byteArray
function isByteArray(array) {
if (array && array.byteLength !== undefined) return true;
return false;
}
Modified version of Jonis Answer: (this will return an ArrayBuffer, which is designed to contain bytes)
function toUTF8Array(str) {
var utf8 = new ArrayBuffer(str.length);
for (var i=0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) {
utf8[i] = charcode;
continue;
}
if (charcode < 0x800) {
utf8[i] = (0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
continue;
}
if (charcode < 0xd800 || charcode >= 0xe000) {
utf8[i] = (0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
continue;
}
i++;
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8[i - 1] = (0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
return utf8;
}
The .charCodeAt function returns with the unicode code of the caracter. But I would like to get the byte array instead. I know, if the charcode is over 127, then the character is stored in two or more bytes.
var arr=[];
for(var i=0; i<str.length; i++) {
arr.push(str.charCodeAt(i))
}
The logic of encoding Unicode in UTF-8 is basically:
Up to 4 bytes per character can be used. The fewest number of bytes possible is used.
Characters up to U+007F are encoded with a single byte.
For multibyte sequences, the number of leading 1 bits in the first byte gives the number of bytes for the character. The rest of the bits of the first byte can be used to encode bits of the character.
The continuation bytes begin with 10, and the other 6 bits encode bits of the character.
Here's a function I wrote a while back for encoding a JavaScript UTF-16 string in UTF-8:
function toUTF8Array(str) {
var utf8 = [];
for (var i=0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
}
else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8.push(0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
}
return utf8;
}
JavaScript Strings are stored in UTF-16. To get UTF-8, you'll have to convert the String yourself.
One way is to mix encodeURIComponent(), which will output UTF-8 bytes URL-encoded, with unescape, as mentioned on ecmanaut.
var utf8 = unescape(encodeURIComponent(str));
var arr = [];
for (var i = 0; i < utf8.length; i++) {
arr.push(utf8.charCodeAt(i));
}
The Encoding API lets you both encode and decode UTF-8 easily (using typed arrays):
var encoded = new TextEncoder().encode("Γεια σου κόσμε");
var decoded = new TextDecoder("utf-8").decode(encoded);
console.log(encoded, decoded);
Browser support isn't too bad, and there's a polyfill that should work in IE11 and older versions of Edge.
While TextEncoder can only encode to UTF-8, TextDecoder supports other encodings. I used it to decode Japanese text (Shift-JIS) in this way:
// Shift-JIS encoded text; must be a byte array due to values 129 and 130.
var arr = [130, 108, 130, 102, 130, 80, 129, 64, 130, 102, 130, 96, 130, 108, 130, 100,
129, 64, 130, 99, 130, 96, 130, 115, 130, 96, 129, 124, 130, 79, 130, 80];
// Convert to byte array
var data = new Uint8Array(arr);
// Decode with TextDecoder
var decoded = new TextDecoder("shift-jis").decode(data.buffer);
console.log(decoded);
The Google Closure library has functions to convert to/from UTF-8 and byte arrays. If you don't want to use the whole library, you can copy the functions from here. For completeness, the code to convert to a string to a UTF-8 byte array is:
goog.crypt.stringToUtf8ByteArray = function(str) {
// TODO(user): Use native implementations if/when available
var out = [], p = 0;
for (var i = 0; i < str.length; i++) {
var c = str.charCodeAt(i);
if (c < 128) {
out[p++] = c;
} else if (c < 2048) {
out[p++] = (c >> 6) | 192;
out[p++] = (c & 63) | 128;
} else if (
((c & 0xFC00) == 0xD800) && (i + 1) < str.length &&
((str.charCodeAt(i + 1) & 0xFC00) == 0xDC00)) {
// Surrogate Pair
c = 0x10000 + ((c & 0x03FF) << 10) + (str.charCodeAt(++i) & 0x03FF);
out[p++] = (c >> 18) | 240;
out[p++] = ((c >> 12) & 63) | 128;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
} else {
out[p++] = (c >> 12) | 224;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
}
}
return out;
};
Assuming the question is about a DOMString as input and the goal is to get an Array, that when interpreted as string (e.g. written to a file on disk), would be UTF-8 encoded:
Now that nearly all modern browsers support Typed Arrays, it'd be ashamed if this approach is not listed:
According to the W3C, software supporting the File API should accept DOMStrings in their Blob constructor (see also: String encoding when constructing a Blob)
Blobs can be converted to an ArrayBuffer using the .readAsArrayBuffer() function of a File Reader
Using a DataView or constructing a Typed Array with the buffer read by the File Reader, one can access every single byte of the ArrayBuffer
Example:
// Create a Blob with an Euro-char (U+20AC)
var b = new Blob(['€']);
var fr = new FileReader();
fr.onload = function() {
ua = new Uint8Array(fr.result);
// This will log "3|226|130|172"
// E2 82 AC
// In UTF-16, it would be only 2 bytes long
console.log(
fr.result.byteLength + '|' +
ua[0] + '|' +
ua[1] + '|' +
ua[2] + ''
);
};
fr.readAsArrayBuffer(b);
Play with that on JSFiddle. I haven't benchmarked this yet but I can imagine this being efficient for large DOMStrings as input.
You can save a string raw as is by using FileReader.
Save the string in a blob and call readAsArrayBuffer(). Then the onload-event results an arraybuffer, which can converted in a Uint8Array.
Unfortunately this call is asynchronous.
This little function will help you:
function stringToBytes(str)
{
let reader = new FileReader();
let done = () => {};
reader.onload = event =>
{
done(new Uint8Array(event.target.result), str);
};
reader.readAsArrayBuffer(new Blob([str], { type: "application/octet-stream" }));
return { done: callback => { done = callback; } };
}
Call it like this:
stringToBytes("\u{1f4a9}").done(bytes =>
{
console.log(bytes);
});
output: [240, 159, 146, 169]
explanation:
JavaScript use UTF-16 and surrogate-pairs to store unicode characters in memory. To save unicode character in raw binary byte streams an encoding is necessary.
Usually and in the most case, UTF-8 is used for this. If you not use an enconding you can't save unicode character, just ASCII up to 0x7f.
FileReader.readAsArrayBuffer() uses UTF-8.
As there is no pure byte type in JavaScript we can represent a byte array as an array of numbers, where each number represents a byte and thus will have an integer value between 0 and 255 inclusive.
Here is a simple function that does convert a JavaScript string into an Array of numbers that contain the UTF-8 encoding of the string:
function toUtf8(str) {
var value = [];
var destIndex = 0;
for (var index = 0; index < str.length; index++) {
var code = str.charCodeAt(index);
if (code <= 0x7F) {
value[destIndex++] = code;
} else if (code <= 0x7FF) {
value[destIndex++] = ((code >> 6 ) & 0x1F) | 0xC0;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0xFFFF) {
value[destIndex++] = ((code >> 12) & 0x0F) | 0xE0;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x1FFFFF) {
value[destIndex++] = ((code >> 18) & 0x07) | 0xF0;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x03FFFFFF) {
value[destIndex++] = ((code >> 24) & 0x03) | 0xF0;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x7FFFFFFF) {
value[destIndex++] = ((code >> 30) & 0x01) | 0xFC;
value[destIndex++] = ((code >> 24) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else {
throw new Error("Unsupported Unicode character \""
+ str.charAt(index) + "\" with code " + code + " (binary: "
+ toBinary(code) + ") at index " + index
+ ". Cannot represent it as UTF-8 byte sequence.");
}
}
return value;
}
function toBinary(byteValue) {
if (byteValue < 0) {
byteValue = byteValue & 0x00FF;
}
var str = byteValue.toString(2);
var len = str.length;
var prefix = "";
for (var i = len; i < 8; i++) {
prefix += "0";
}
return prefix + str;
}
I was using Joni's solution and it worked fine, but this one is much shorter.
This was inspired by the atobUTF16() function of Solution #3 of Mozilla's Base64 Unicode discussion
function convertStringToUTF8ByteArray(str) {
let binaryArray = new Uint8Array(str.length)
Array.prototype.forEach.call(binaryArray, function (el, idx, arr) { arr[idx] = str.charCodeAt(idx) })
return binaryArray
}
function convertByte()
{
var c=document.getElementById("str").value;
var arr = [];
var i=0;
for(var ind=0;ind<c.length;ind++)
{
arr[ind]=c.charCodeAt(i);
i++;
}
document.getElementById("result").innerHTML="The converted value is "+arr.join("");
}
The JavaScript method String.fromCharCode() behaves equivalently to Python's unichar() in the following sense:
print unichr(213) # prints Õ on the console
console.log(String.fromCharCode(213)); // prints Õ on the console as well
For my purposes, however, I need a JavaScript equivalent to the Python function chr(). Is there such a JavaScript function or a way to make String.fromCharCode() behave like chr()?
That is, I need something in JavaScript that mimics
print chr(213) # prints � on the console
So turns out you just want to work with raw bytes in node.js, there's a module for that. If you are a real wizard, you can get this stuff to work with javascript strings alone but it's harder and far less efficient.
var b = new Buffer(1);
b[0] = 213;
console.log(b.toString()); //�
var b = new Buffer(3);
b[0] = 0xE2;
b[1] = 0x98;
b[2] = 0x85;
console.log(b.toString()); //★
print chr(213) # prints � on the console
So this prints a raw byte (0xD5), that is interpreted in UTF-8 (most likely) which is not valid UTF-8 byte sequence and thus is displayed as the replacement character (�).
The interpretation as UTF-8 is not relevant here, you most likely just want raw bytes.
To create raw bytes in javascript you could use UInt8Array.
var a = new Uint8Array(1);
a[0] = 213;
You could optionally then interpret the raw bytes as utf-8:
console.log( utf8decode(a)); // "�"
//Not recommended for production use ;D
//Doesn't handle > BMP to keep the answer shorter
function utf8decode(uint8array) {
var codePoints = [],
i = 0,
byte, codePoint, len = uint8array.length;
for (i = 0; i < len; ++i) {
byte = uint8array[i];
if ((byte & 0xF8) === 0xF0 && len > i + 3) {
codePoint = ((byte & 0x7) << 18) | ((uint8array[++i] & 0x3F) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0xFFFF < codePoint && codePoint <= 0x10FFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xF0) === 0xE0 && len > i + 2) {
codePoint = ((byte & 0xF) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0x7FF < codePoint && codePoint <= 0xFFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xE0) === 0xC0 && len > i + 1) {
codePoint = ((byte & 0x1F) << 6) | ((uint8array[++i] & 0x3F));
if (!(0x7F < codePoint && codePoint <= 0x7FF)) {
codePoints.push(0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0x80) === 0x00) {
codePoints.push(byte & 0x7F);
} else {
codePoints.push(0xFFFD);
}
}
return String.fromCharCode.apply(String, codePoints);
}
What you are most likely trying to do has nothing to do with trying to interpret the bytes as utf8 though.
Another example:
//UTF-8 For the black star U+2605 ★:
var a = new Uint8Array(3);
a[0] = 0xE2;
a[1] = 0x98;
a[2] = 0x85;
utf8decode(a) === String.fromCharCode(0x2605) //True
utf8decode(a) // ★
In python 2.7 (Ubuntu):
print chr(0xE2) + chr(0x98) + chr(0x85)
#prints ★
If you want this "Questionmark in a box" for every number that is not in the standard ASCII table, how about this little function?
function chr(c) {
return (c < 0 || c > 126) ? '�' : String.fromCharCode(c);
}