How to convert UTF8 string to byte array? - javascript

The .charCodeAt function returns with the unicode code of the caracter. But I would like to get the byte array instead. I know, if the charcode is over 127, then the character is stored in two or more bytes.
var arr=[];
for(var i=0; i<str.length; i++) {
arr.push(str.charCodeAt(i))
}

The logic of encoding Unicode in UTF-8 is basically:
Up to 4 bytes per character can be used. The fewest number of bytes possible is used.
Characters up to U+007F are encoded with a single byte.
For multibyte sequences, the number of leading 1 bits in the first byte gives the number of bytes for the character. The rest of the bits of the first byte can be used to encode bits of the character.
The continuation bytes begin with 10, and the other 6 bits encode bits of the character.
Here's a function I wrote a while back for encoding a JavaScript UTF-16 string in UTF-8:
function toUTF8Array(str) {
var utf8 = [];
for (var i=0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
}
else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8.push(0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
}
return utf8;
}

JavaScript Strings are stored in UTF-16. To get UTF-8, you'll have to convert the String yourself.
One way is to mix encodeURIComponent(), which will output UTF-8 bytes URL-encoded, with unescape, as mentioned on ecmanaut.
var utf8 = unescape(encodeURIComponent(str));
var arr = [];
for (var i = 0; i < utf8.length; i++) {
arr.push(utf8.charCodeAt(i));
}

The Encoding API lets you both encode and decode UTF-8 easily (using typed arrays):
var encoded = new TextEncoder().encode("Γεια σου κόσμε");
var decoded = new TextDecoder("utf-8").decode(encoded);
console.log(encoded, decoded);
Browser support isn't too bad, and there's a polyfill that should work in IE11 and older versions of Edge.
While TextEncoder can only encode to UTF-8, TextDecoder supports other encodings. I used it to decode Japanese text (Shift-JIS) in this way:
// Shift-JIS encoded text; must be a byte array due to values 129 and 130.
var arr = [130, 108, 130, 102, 130, 80, 129, 64, 130, 102, 130, 96, 130, 108, 130, 100,
129, 64, 130, 99, 130, 96, 130, 115, 130, 96, 129, 124, 130, 79, 130, 80];
// Convert to byte array
var data = new Uint8Array(arr);
// Decode with TextDecoder
var decoded = new TextDecoder("shift-jis").decode(data.buffer);
console.log(decoded);

The Google Closure library has functions to convert to/from UTF-8 and byte arrays. If you don't want to use the whole library, you can copy the functions from here. For completeness, the code to convert to a string to a UTF-8 byte array is:
goog.crypt.stringToUtf8ByteArray = function(str) {
// TODO(user): Use native implementations if/when available
var out = [], p = 0;
for (var i = 0; i < str.length; i++) {
var c = str.charCodeAt(i);
if (c < 128) {
out[p++] = c;
} else if (c < 2048) {
out[p++] = (c >> 6) | 192;
out[p++] = (c & 63) | 128;
} else if (
((c & 0xFC00) == 0xD800) && (i + 1) < str.length &&
((str.charCodeAt(i + 1) & 0xFC00) == 0xDC00)) {
// Surrogate Pair
c = 0x10000 + ((c & 0x03FF) << 10) + (str.charCodeAt(++i) & 0x03FF);
out[p++] = (c >> 18) | 240;
out[p++] = ((c >> 12) & 63) | 128;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
} else {
out[p++] = (c >> 12) | 224;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
}
}
return out;
};

Assuming the question is about a DOMString as input and the goal is to get an Array, that when interpreted as string (e.g. written to a file on disk), would be UTF-8 encoded:
Now that nearly all modern browsers support Typed Arrays, it'd be ashamed if this approach is not listed:
According to the W3C, software supporting the File API should accept DOMStrings in their Blob constructor (see also: String encoding when constructing a Blob)
Blobs can be converted to an ArrayBuffer using the .readAsArrayBuffer() function of a File Reader
Using a DataView or constructing a Typed Array with the buffer read by the File Reader, one can access every single byte of the ArrayBuffer
Example:
// Create a Blob with an Euro-char (U+20AC)
var b = new Blob(['€']);
var fr = new FileReader();
fr.onload = function() {
ua = new Uint8Array(fr.result);
// This will log "3|226|130|172"
// E2 82 AC
// In UTF-16, it would be only 2 bytes long
console.log(
fr.result.byteLength + '|' +
ua[0] + '|' +
ua[1] + '|' +
ua[2] + ''
);
};
fr.readAsArrayBuffer(b);
Play with that on JSFiddle. I haven't benchmarked this yet but I can imagine this being efficient for large DOMStrings as input.

You can save a string raw as is by using FileReader.
Save the string in a blob and call readAsArrayBuffer(). Then the onload-event results an arraybuffer, which can converted in a Uint8Array.
Unfortunately this call is asynchronous.
This little function will help you:
function stringToBytes(str)
{
let reader = new FileReader();
let done = () => {};
reader.onload = event =>
{
done(new Uint8Array(event.target.result), str);
};
reader.readAsArrayBuffer(new Blob([str], { type: "application/octet-stream" }));
return { done: callback => { done = callback; } };
}
Call it like this:
stringToBytes("\u{1f4a9}").done(bytes =>
{
console.log(bytes);
});
output: [240, 159, 146, 169]
explanation:
JavaScript use UTF-16 and surrogate-pairs to store unicode characters in memory. To save unicode character in raw binary byte streams an encoding is necessary.
Usually and in the most case, UTF-8 is used for this. If you not use an enconding you can't save unicode character, just ASCII up to 0x7f.
FileReader.readAsArrayBuffer() uses UTF-8.

As there is no pure byte type in JavaScript we can represent a byte array as an array of numbers, where each number represents a byte and thus will have an integer value between 0 and 255 inclusive.
Here is a simple function that does convert a JavaScript string into an Array of numbers that contain the UTF-8 encoding of the string:
function toUtf8(str) {
var value = [];
var destIndex = 0;
for (var index = 0; index < str.length; index++) {
var code = str.charCodeAt(index);
if (code <= 0x7F) {
value[destIndex++] = code;
} else if (code <= 0x7FF) {
value[destIndex++] = ((code >> 6 ) & 0x1F) | 0xC0;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0xFFFF) {
value[destIndex++] = ((code >> 12) & 0x0F) | 0xE0;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x1FFFFF) {
value[destIndex++] = ((code >> 18) & 0x07) | 0xF0;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x03FFFFFF) {
value[destIndex++] = ((code >> 24) & 0x03) | 0xF0;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x7FFFFFFF) {
value[destIndex++] = ((code >> 30) & 0x01) | 0xFC;
value[destIndex++] = ((code >> 24) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else {
throw new Error("Unsupported Unicode character \""
+ str.charAt(index) + "\" with code " + code + " (binary: "
+ toBinary(code) + ") at index " + index
+ ". Cannot represent it as UTF-8 byte sequence.");
}
}
return value;
}
function toBinary(byteValue) {
if (byteValue < 0) {
byteValue = byteValue & 0x00FF;
}
var str = byteValue.toString(2);
var len = str.length;
var prefix = "";
for (var i = len; i < 8; i++) {
prefix += "0";
}
return prefix + str;
}

I was using Joni's solution and it worked fine, but this one is much shorter.
This was inspired by the atobUTF16() function of Solution #3 of Mozilla's Base64 Unicode discussion
function convertStringToUTF8ByteArray(str) {
let binaryArray = new Uint8Array(str.length)
Array.prototype.forEach.call(binaryArray, function (el, idx, arr) { arr[idx] = str.charCodeAt(idx) })
return binaryArray
}

function convertByte()
{
var c=document.getElementById("str").value;
var arr = [];
var i=0;
for(var ind=0;ind<c.length;ind++)
{
arr[ind]=c.charCodeAt(i);
i++;
}
document.getElementById("result").innerHTML="The converted value is "+arr.join("");
}

Related

Is function computeHmacSha256Signature(value, key) broken?

I am trying to sign the value eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0 with key a1b2c3a4b5c6. The key is hexadecimal.
This should generate signature 8Wspda1l2Z3-hLwvMI_5Q8AQic59oclZAav7kWVtGHw. This signature is provided from my service provider as a sample.
I tried the following:
var signature = Utilities.computeHmacSha256Signature("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0", "a1b2c3a4b5c6");
var encodedSignature = Utilities.base64EncodeWebSafe(signature).replace(/=+$/, '');
but I got different result: vnshuAi4GtWTZ3VxmPgLzJ--V18mm-r4cBUNA0FIvTs.
Since computeHmacSha256Signature(value, key) also allows input to be pair of byte[] (in addition to string pair as an input), I tried the following:
function toUTF8Array(str) {
var utf8 = [];
for (var i = 0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
}
else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8.push(0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
}
return utf8;
}
function resecret(s) {
var bin_secret = "";
var duo = ""
var new_secret_array = [];
var j = 0;
var len=s.length;
var element = 0;
for (var i = 0; i < len; i++) {
duo = s.charAt(i) + s.charAt(i+1);
element = parseInt(duo, 16);
// element = "0x"+duo;
bin_secret = bin_secret + element;
new_secret_array[j] = element;
j++;
i++;
}
return new_secret_array;
}
function test() {
var value="eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzY29wZSI6InRzIiwibm9uY2UiOiI4MTZkMWVmNi0zYjNlLTQ1MmEtOWM5Yi0xNDYyZWIzNWZlNzUiLCJpc3MiOiIwYzE3MjYwNDEwMjhkMTI5ZGI3YjU4NzUzYzU2OTYwYyIsImV4cCI6MTU4ODUyOTE3OCwibmJmIjoxNTg4NTI4ODE4LCJpYXQiOjE1ODg1Mjg4MTgsImp0aSI6IjE2ZGExZGZiLTkyYjQtNDI0ZS04ZTU5LWIyNzZmYmQ3MWVkYSIsInJlZ2lvbiI6Im55IiwibWV0aG9kIjoiR0VUIiwicGF0aCI6Ii9hdXRoZW50aWNhdGUvY29ubmVjdC9kZXZpY2UiLCJob3N0IjoiaHR0cHM6Ly9hcGkuYmxvb21iZXJnLmNvbSIsImNsaWVudF9pZCI6IjBjMTcyNjA0MTAyOGQxMjlkYjdiNTg3NTNjNTY5NjBjIn0";
var key = "a1b2c3a4b5c6";
var signature = Utilities.computeHmacSha256Signature(toUTF8Array(value),resecret(key));
}
This time, I get an error:
Cannot convert Array to (class)[].
What am I doing wrong here? Has anyone able to run computeHmacSha256Signature(value, key) using byte array?
How do I get the signature 8Wspda1l2Z3-hLwvMI_5Q8AQic59oclZAav7kWVtGHw?

How to convert a UTF16 file into a UTF8 file in nodejs

I have an xml file encoded in UTF16, and I would like to convert it to UTF8 in order to process it. If I use this command:
iconv -f UTF-16 -t UTF-8 file.xml > converted_file.xml
The file is converted correctly and I'm able to process it. I want to do the same in nodejs.
Currently I have a buffer of my file and I've tried everything I could think of and what I could find on the internet but unsuccessfully.
Here is some examples of what I've tried so far:
content = new Buffer((new Buffer(content, 'ucs2')).toString('utf8'));
I've also tried using those functions:
http://jonisalonen.com/2012/from-utf-16-to-utf-8-in-javascript/
https://stackoverflow.com/a/14601808/1405208
The first one doen't change anything and the links only give me chinese characters.
var content = fs.readFileSync('myfile.xml', {encoding:'ucs2'});
fs.writeFileSync('myfile.xml', content, {encoding:'utf8'});
While the answer above me is the best answer for the question asked.
I'm hoping that this answer will help some folks that need to read a file as a binary string:
const reader = new FileReader();
reader.readAsBinaryString(this.fileToImport);
In my case the file was in utf-16 and I tried to read it into XLSX:
const wb = XLSX.read(bstr, { type: "binary" });
Combining both links from above, I first removed the first two chars that signaled it was UTF-16 (0xFFFE)
then used this link to create the right number (but I think that it actually provides UTF-7 encoding)
https://stackoverflow.com/a/14601808/1405208
Lastly, I applied the second link to get the right set of UTF-8 number: https://stackoverflow.com/a/14601808/1405208
The Code that I ended up with:
decodeUTF16LE(binaryStr) {
if (binaryStr.charCodeAt(0) != 255 || binaryStr.charCodeAt(1) != 254) {
return binaryStr;
}
const utf8 = [];
for (var i = 2; i < binaryStr.length; i += 2) {
let charcode = binaryStr.charCodeAt(i) | (binaryStr.charCodeAt(i + 1) << 8);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6), 0x80 | (charcode & 0x3f));
} else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12), 0x80 | ((charcode >> 6) & 0x3f), 0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff) << 10) | (charcode & 0x3ff));
utf8.push(
0xf0 | (charcode >> 18),
0x80 | ((charcode >> 12) & 0x3f),
0x80 | ((charcode >> 6) & 0x3f),
0x80 | (charcode & 0x3f)
);
}
}
return String.fromCharCode.apply(String, utf8);
},

Decode UTF-8 with Javascript

I have Javascript in an XHTML web page that is passing UTF-8 encoded strings. It needs to continue to pass the UTF-8 version, as well as decode it. How is it possible to decode a UTF-8 string for display?
<script type="text/javascript">
// <![CDATA[
function updateUser(usernameSent){
var usernameReceived = usernameSent; // Current value: Größe
var usernameDecoded = usernameReceived; // Decode to: Größe
var html2id = '';
html2id += 'Encoded: ' + usernameReceived + '<br />Decoded: ' + usernameDecoded;
document.getElementById('userId').innerHTML = html2id;
}
// ]]>
</script>
To answer the original question: here is how you decode utf-8 in javascript:
http://ecmanaut.blogspot.ca/2006/07/encoding-decoding-utf8-in-javascript.html
Specifically,
function encode_utf8(s) {
return unescape(encodeURIComponent(s));
}
function decode_utf8(s) {
return decodeURIComponent(escape(s));
}
We have been using this in our production code for 6 years, and it has worked flawlessly.
Note, however, that escape() and unescape() are deprecated. See this.
This should work:
// http://www.onicos.com/staff/iz/amuse/javascript/expert/utf.txt
/* utf.js - UTF-8 <=> UTF-16 convertion
*
* Copyright (C) 1999 Masanao Izumo <iz#onicos.co.jp>
* Version: 1.0
* LastModified: Dec 25 1999
* This library is free. You can redistribute it and/or modify it.
*/
function Utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3;
out = "";
len = array.length;
i = 0;
while(i < len) {
c = array[i++];
switch(c >> 4)
{
case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
// 0xxxxxxx
out += String.fromCharCode(c);
break;
case 12: case 13:
// 110x xxxx 10xx xxxx
char2 = array[i++];
out += String.fromCharCode(((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
// 1110 xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
out += String.fromCharCode(((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
}
}
return out;
}
Check out the JSFiddle demo.
Also see the related questions: here and here
Perhaps using the textDecoder will be sufficient.
Not supported in IE though.
var decoder = new TextDecoder('utf-8'),
decodedMessage;
decodedMessage = decoder.decode(message.data);
Handling non-UTF8 text
In this example, we decode the Russian text "Привет, мир!", which means "Hello, world." In our TextDecoder() constructor, we specify the Windows-1251 character encoding, which is appropriate for Cyrillic script.
let win1251decoder = new TextDecoder('windows-1251');
let bytes = new Uint8Array([207, 240, 232, 226, 229, 242, 44, 32, 236, 232, 240, 33]);
console.log(win1251decoder.decode(bytes)); // Привет, мир!
The interface for the TextDecoder is described here.
Retrieving a byte array from a string is equally simpel:
const decoder = new TextDecoder();
const encoder = new TextEncoder();
const byteArray = encoder.encode('Größe');
// converted it to a byte array
// now we can decode it back to a string if desired
console.log(decoder.decode(byteArray));
If you have it in a different encoding then you must compensate for that upon encoding.
The parameter in the constructor for the TextEncoder is any one of the valid encodings listed here.
Update #Albert's answer adding condition for emoji.
function Utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3, char4;
out = "";
len = array.length;
i = 0;
while(i < len) {
c = array[i++];
switch(c >> 4)
{
case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
// 0xxxxxxx
out += String.fromCharCode(c);
break;
case 12: case 13:
// 110x xxxx 10xx xxxx
char2 = array[i++];
out += String.fromCharCode(((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
// 1110 xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
out += String.fromCharCode(((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
case 15:
// 1111 0xxx 10xx xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
char4 = array[i++];
out += String.fromCodePoint(((c & 0x07) << 18) | ((char2 & 0x3F) << 12) | ((char3 & 0x3F) << 6) | (char4 & 0x3F));
break;
}
return out;
}
Here is a solution handling all Unicode code points include upper (4 byte) values and supported by all modern browsers (IE and others > 5.5). It uses decodeURIComponent(), but NOT the deprecated escape/unescape functions:
function utf8_to_str(a) {
for(var i=0, s=''; i<a.length; i++) {
var h = a[i].toString(16)
if(h.length < 2) h = '0' + h
s += '%' + h
}
return decodeURIComponent(s)
}
Tested and available on GitHub
To create UTF-8 from a string:
function utf8_from_str(s) {
for(var i=0, enc = encodeURIComponent(s), a = []; i < enc.length;) {
if(enc[i] === '%') {
a.push(parseInt(enc.substr(i+1, 2), 16))
i += 3
} else {
a.push(enc.charCodeAt(i++))
}
}
return a
}
Tested and available on GitHub
This is what I found after a more specific Google search than just UTF-8 encode/decode. so for those who are looking for a converting library to convert between encodings, here you go.
https://github.com/inexorabletash/text-encoding
var uint8array = new TextEncoder().encode(str);
var str = new TextDecoder(encoding).decode(uint8array);
Paste from repo readme
All encodings from the Encoding specification are supported:
utf-8 ibm866 iso-8859-2 iso-8859-3 iso-8859-4 iso-8859-5 iso-8859-6 iso-8859-7 iso-8859-8 iso-8859-8-i iso-8859-10 iso-8859-13 iso-8859-14 iso-8859-15 iso-8859-16 koi8-r koi8-u macintosh windows-874 windows-1250 windows-1251 windows-1252 windows-1253 windows-1254 windows-1255 windows-1256 windows-1257 windows-1258 x-mac-cyrillic gb18030 hz-gb-2312 big5 euc-jp iso-2022-jp shift_jis euc-kr replacement utf-16be utf-16le x-user-defined
(Some encodings may be supported under other names, e.g. ascii, iso-8859-1, etc. See Encoding for additional labels for each encoding.)
#albert's solution was the closest I think but it can only parse up to 3 byte utf-8 characters
function utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3;
out = "";
len = array.length;
i = 0;
// XXX: Invalid bytes are ignored
while(i < len) {
c = array[i++];
if (c >> 7 == 0) {
// 0xxx xxxx
out += String.fromCharCode(c);
continue;
}
// Invalid starting byte
if (c >> 6 == 0x02) {
continue;
}
// #### MULTIBYTE ####
// How many bytes left for thus character?
var extraLength = null;
if (c >> 5 == 0x06) {
extraLength = 1;
} else if (c >> 4 == 0x0e) {
extraLength = 2;
} else if (c >> 3 == 0x1e) {
extraLength = 3;
} else if (c >> 2 == 0x3e) {
extraLength = 4;
} else if (c >> 1 == 0x7e) {
extraLength = 5;
} else {
continue;
}
// Do we have enough bytes in our data?
if (i+extraLength > len) {
var leftovers = array.slice(i-1);
// If there is an invalid byte in the leftovers we might want to
// continue from there.
for (; i < len; i++) if (array[i] >> 6 != 0x02) break;
if (i != len) continue;
// All leftover bytes are valid.
return {result: out, leftovers: leftovers};
}
// Remove the UTF-8 prefix from the char (res)
var mask = (1 << (8 - extraLength - 1)) - 1,
res = c & mask, nextChar, count;
for (count = 0; count < extraLength; count++) {
nextChar = array[i++];
// Is the char valid multibyte part?
if (nextChar >> 6 != 0x02) {break;};
res = (res << 6) | (nextChar & 0x3f);
}
if (count != extraLength) {
i--;
continue;
}
if (res <= 0xffff) {
out += String.fromCharCode(res);
continue;
}
res -= 0x10000;
var high = ((res >> 10) & 0x3ff) + 0xd800,
low = (res & 0x3ff) + 0xdc00;
out += String.fromCharCode(high, low);
}
return {result: out, leftovers: []};
}
This returns {result: "parsed string", leftovers: [list of invalid bytes at the end]} in case you are parsing the string in chunks.
EDIT: fixed the issue that #unhammer found.
// String to Utf8 ByteBuffer
function strToUTF8(str){
return Uint8Array.from(encodeURIComponent(str).replace(/%(..)/g,(m,v)=>{return String.fromCodePoint(parseInt(v,16))}), c=>c.codePointAt(0))
}
// Utf8 ByteArray to string
function UTF8toStr(ba){
return decodeURIComponent(ba.reduce((p,c)=>{return p+'%'+c.toString(16),''}))
}
Using my 1.6KB library, you can do
ToString(FromUTF8(Array.from(usernameReceived)))
This is a solution with extensive error reporting.
It would take an UTF-8 encoded byte array (where byte array is represented as
array of numbers and each number is an integer between 0 and 255 inclusive)
and will produce a JavaScript string of Unicode characters.
function getNextByte(value, startByteIndex, startBitsStr,
additional, index)
{
if (index >= value.length) {
var startByte = value[startByteIndex];
throw new Error("Invalid UTF-8 sequence. Byte " + startByteIndex
+ " with value " + startByte + " (" + String.fromCharCode(startByte)
+ "; binary: " + toBinary(startByte)
+ ") starts with " + startBitsStr + " in binary and thus requires "
+ additional + " bytes after it, but we only have "
+ (value.length - startByteIndex) + ".");
}
var byteValue = value[index];
checkNextByteFormat(value, startByteIndex, startBitsStr, additional, index);
return byteValue;
}
function checkNextByteFormat(value, startByteIndex, startBitsStr,
additional, index)
{
if ((value[index] & 0xC0) != 0x80) {
var startByte = value[startByteIndex];
var wrongByte = value[index];
throw new Error("Invalid UTF-8 byte sequence. Byte " + startByteIndex
+ " with value " + startByte + " (" +String.fromCharCode(startByte)
+ "; binary: " + toBinary(startByte) + ") starts with "
+ startBitsStr + " in binary and thus requires " + additional
+ " additional bytes, each of which shouls start with 10 in binary."
+ " However byte " + (index - startByteIndex)
+ " after it with value " + wrongByte + " ("
+ String.fromCharCode(wrongByte) + "; binary: " + toBinary(wrongByte)
+") does not start with 10 in binary.");
}
}
function fromUtf8 (str) {
var value = [];
var destIndex = 0;
for (var index = 0; index < str.length; index++) {
var code = str.charCodeAt(index);
if (code <= 0x7F) {
value[destIndex++] = code;
} else if (code <= 0x7FF) {
value[destIndex++] = ((code >> 6 ) & 0x1F) | 0xC0;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0xFFFF) {
value[destIndex++] = ((code >> 12) & 0x0F) | 0xE0;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x1FFFFF) {
value[destIndex++] = ((code >> 18) & 0x07) | 0xF0;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x03FFFFFF) {
value[destIndex++] = ((code >> 24) & 0x03) | 0xF0;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x7FFFFFFF) {
value[destIndex++] = ((code >> 30) & 0x01) | 0xFC;
value[destIndex++] = ((code >> 24) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else {
throw new Error("Unsupported Unicode character \""
+ str.charAt(index) + "\" with code " + code + " (binary: "
+ toBinary(code) + ") at index " + index
+ ". Cannot represent it as UTF-8 byte sequence.");
}
}
return value;
}
You should take decodeURI for it.
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURI
As simple as this:
decodeURI('https://developer.mozilla.org/ru/docs/JavaScript_%D1%88%D0%B5%D0%BB%D0%BB%D1%8B');
// "https://developer.mozilla.org/ru/docs/JavaScript_шеллы"
Consider to use it inside try catch block for not missing an URIError.
Also it has full browsers support.
const decoder = new TextDecoder();
console.log(decoder.decode(new Uint8Array([97])));
MDN resource link
I reckon the easiest way would be to use a built-in js functions decodeURI() / encodeURI().
function (usernameSent) {
var usernameEncoded = usernameSent; // Current value: utf8
var usernameDecoded = decodeURI(usernameReceived); // Decoded
// do stuff
}
I searched for a simple solution and this works well for me:
//input data
view = new Uint8Array(data);
//output string
serialString = ua2text(view);
//convert UTF8 to string
function ua2text(ua) {
s = "";
for (var i = 0; i < ua.length; i++) {
s += String.fromCharCode(ua[i]);
}
return s;
}
Only issue I have is sometimes I get one character at a time. This might be by design with my source of the arraybuffer. I'm using https://github.com/xseignard/cordovarduino to read serial data on an android device.
Preferably, as others have suggested, use the Encoding API. But if you need to support IE (for some strange reason) MDN recommends this repo FastestSmallestTextEncoderDecoder
If you need to make use of the polyfill library:
import {encode, decode} from "fastestsmallesttextencoderdecoder";
Then (regardless of the polyfill) for encoding and decoding:
// takes in USVString and returns a Uint8Array object
const encoded = new TextEncoder().encode('€')
console.log(encoded);
// takes in an ArrayBuffer or an ArrayBufferView and returns a DOMString
const decoded = new TextDecoder().decode(encoded);
console.log(decoded);

Counterpart to Python's chr() in JavaScript

The JavaScript method String.fromCharCode() behaves equivalently to Python's unichar() in the following sense:
print unichr(213) # prints Õ on the console
console.log(String.fromCharCode(213)); // prints Õ on the console as well
For my purposes, however, I need a JavaScript equivalent to the Python function chr(). Is there such a JavaScript function or a way to make String.fromCharCode() behave like chr()?
That is, I need something in JavaScript that mimics
print chr(213) # prints � on the console
So turns out you just want to work with raw bytes in node.js, there's a module for that. If you are a real wizard, you can get this stuff to work with javascript strings alone but it's harder and far less efficient.
var b = new Buffer(1);
b[0] = 213;
console.log(b.toString()); //�
var b = new Buffer(3);
b[0] = 0xE2;
b[1] = 0x98;
b[2] = 0x85;
console.log(b.toString()); //★
print chr(213) # prints � on the console
So this prints a raw byte (0xD5), that is interpreted in UTF-8 (most likely) which is not valid UTF-8 byte sequence and thus is displayed as the replacement character (�).
The interpretation as UTF-8 is not relevant here, you most likely just want raw bytes.
To create raw bytes in javascript you could use UInt8Array.
var a = new Uint8Array(1);
a[0] = 213;
You could optionally then interpret the raw bytes as utf-8:
console.log( utf8decode(a)); // "�"
//Not recommended for production use ;D
//Doesn't handle > BMP to keep the answer shorter
function utf8decode(uint8array) {
var codePoints = [],
i = 0,
byte, codePoint, len = uint8array.length;
for (i = 0; i < len; ++i) {
byte = uint8array[i];
if ((byte & 0xF8) === 0xF0 && len > i + 3) {
codePoint = ((byte & 0x7) << 18) | ((uint8array[++i] & 0x3F) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0xFFFF < codePoint && codePoint <= 0x10FFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xF0) === 0xE0 && len > i + 2) {
codePoint = ((byte & 0xF) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0x7FF < codePoint && codePoint <= 0xFFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xE0) === 0xC0 && len > i + 1) {
codePoint = ((byte & 0x1F) << 6) | ((uint8array[++i] & 0x3F));
if (!(0x7F < codePoint && codePoint <= 0x7FF)) {
codePoints.push(0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0x80) === 0x00) {
codePoints.push(byte & 0x7F);
} else {
codePoints.push(0xFFFD);
}
}
return String.fromCharCode.apply(String, codePoints);
}
What you are most likely trying to do has nothing to do with trying to interpret the bytes as utf8 though.
Another example:
//UTF-8 For the black star U+2605 ★:
var a = new Uint8Array(3);
a[0] = 0xE2;
a[1] = 0x98;
a[2] = 0x85;
utf8decode(a) === String.fromCharCode(0x2605) //True
utf8decode(a) // ★
In python 2.7 (Ubuntu):
print chr(0xE2) + chr(0x98) + chr(0x85)
#prints ★
If you want this "Questionmark in a box" for every number that is not in the standard ASCII table, how about this little function?
function chr(c) {
return (c < 0 || c > 126) ? '�' : String.fromCharCode(c);
}

Javascript Base64 encoding UTF8 string fails in webkit/safari

I'm trying to base64 encode a utf8 string containing Thai characters. I'm using the browser's built in btoa function. It works for ascii text, however Thai is causing it to throw a INVALID_CHARACTER_ERR: DOM Exception 5 exception.
Here's a sample that fails (the character that looks like an "n" is Thai)
btoa('aก')
What do I need to do to base64 encode non-ascii strings?
var Base64 = {
encode: function(s) {
return btoa(unescape(encodeURIComponent(s)));
},
decode: function(s) {
return decodeURIComponent(escape(atob(s)));
}
};
Unfortunately btoa/atob aren't specified in any standard, but the implementations in firefox and webkit both fail on multibyte characters so even if they were now specified those builtin functions would not be able to support multibyte characters (as the input and output strings would necessarily change).
It would seem your only option would be to roll your own base64 encode+decode routines
check this workaround
http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html
I know this is old, but I was recently looking for a UTF8-to-Base64 encoder as well. I found a handy little script at http://www.webtoolkit.info/javascript-base64.html, and a performance improved version at http://jsbase64.codeplex.com/.
Here is the script:
var B64 = {
alphabet: 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=',
lookup: null,
ie: /MSIE /.test(navigator.userAgent),
ieo: /MSIE [67]/.test(navigator.userAgent),
encode: function (s) {
var buffer = B64.toUtf8(s),
position = -1,
len = buffer.length,
nan0, nan1, nan2, enc = [, , , ];
if (B64.ie) {
var result = [];
while (++position < len) {
nan0 = buffer[position];
nan1 = buffer[++position];
enc[0] = nan0 >> 2;
enc[1] = ((nan0 & 3) << 4) | (nan1 >> 4);
if (isNaN(nan1))
enc[2] = enc[3] = 64;
else {
nan2 = buffer[++position];
enc[2] = ((nan1 & 15) << 2) | (nan2 >> 6);
enc[3] = (isNaN(nan2)) ? 64 : nan2 & 63;
}
result.push(B64.alphabet.charAt(enc[0]), B64.alphabet.charAt(enc[1]), B64.alphabet.charAt(enc[2]), B64.alphabet.charAt(enc[3]));
}
return result.join('');
} else {
var result = '';
while (++position < len) {
nan0 = buffer[position];
nan1 = buffer[++position];
enc[0] = nan0 >> 2;
enc[1] = ((nan0 & 3) << 4) | (nan1 >> 4);
if (isNaN(nan1))
enc[2] = enc[3] = 64;
else {
nan2 = buffer[++position];
enc[2] = ((nan1 & 15) << 2) | (nan2 >> 6);
enc[3] = (isNaN(nan2)) ? 64 : nan2 & 63;
}
result += B64.alphabet[enc[0]] + B64.alphabet[enc[1]] + B64.alphabet[enc[2]] + B64.alphabet[enc[3]];
}
return result;
}
},
decode: function (s) {
if (s.length % 4)
throw new Error("InvalidCharacterError: 'B64.decode' failed: The string to be decoded is not correctly encoded.");
var buffer = B64.fromUtf8(s),
position = 0,
len = buffer.length;
if (B64.ieo) {
var result = [];
while (position < len) {
if (buffer[position] < 128)
result.push(String.fromCharCode(buffer[position++]));
else if (buffer[position] > 191 && buffer[position] < 224)
result.push(String.fromCharCode(((buffer[position++] & 31) << 6) | (buffer[position++] & 63)));
else
result.push(String.fromCharCode(((buffer[position++] & 15) << 12) | ((buffer[position++] & 63) << 6) | (buffer[position++] & 63)));
}
return result.join('');
} else {
var result = '';
while (position < len) {
if (buffer[position] < 128)
result += String.fromCharCode(buffer[position++]);
else if (buffer[position] > 191 && buffer[position] < 224)
result += String.fromCharCode(((buffer[position++] & 31) << 6) | (buffer[position++] & 63));
else
result += String.fromCharCode(((buffer[position++] & 15) << 12) | ((buffer[position++] & 63) << 6) | (buffer[position++] & 63));
}
return result;
}
},
toUtf8: function (s) {
var position = -1,
len = s.length,
chr, buffer = [];
if (/^[\x00-\x7f]*$/.test(s)) while (++position < len)
buffer.push(s.charCodeAt(position));
else while (++position < len) {
chr = s.charCodeAt(position);
if (chr < 128)
buffer.push(chr);
else if (chr < 2048)
buffer.push((chr >> 6) | 192, (chr & 63) | 128);
else
buffer.push((chr >> 12) | 224, ((chr >> 6) & 63) | 128, (chr & 63) | 128);
}
return buffer;
},
fromUtf8: function (s) {
var position = -1,
len, buffer = [],
enc = [, , , ];
if (!B64.lookup) {
len = B64.alphabet.length;
B64.lookup = {};
while (++position < len)
B64.lookup[B64.alphabet.charAt(position)] = position;
position = -1;
}
len = s.length;
while (++position < len) {
enc[0] = B64.lookup[s.charAt(position)];
enc[1] = B64.lookup[s.charAt(++position)];
buffer.push((enc[0] << 2) | (enc[1] >> 4));
enc[2] = B64.lookup[s.charAt(++position)];
if (enc[2] == 64)
break;
buffer.push(((enc[1] & 15) << 4) | (enc[2] >> 2));
enc[3] = B64.lookup[s.charAt(++position)];
if (enc[3] == 64)
break;
buffer.push(((enc[2] & 3) << 6) | enc[3]);
}
return buffer;
}
};
Disclaimer: I haven't tested this with Thai characters specifically, but assume it will work.
Sav

Categories