I am encoding an executable in go and trying to decode it in javascript.
Decoding the encoded string in javascript does not result in a matching file. I am able to encode a string like "this is a test string" and decode it in javascript and it works fine. But when i take an executable application and do the same thing, the decoded file is larger than the file before encoding.
What am i doing wrong? Thanks!
Here is the test executable i am using. It is in c++, compile it with g++ and use the output.
#include <iostream>
int main(void) {
char test1[] = "hello";
std::cout << "test1: " << test1 << std::endl;
char test2[] = "world";
std::cout << "test2: " << test2 << std::endl;
char test3[] = "foobar";
std::cout << "test3: " << test3 << std::endl;
return 0;
}
Here is the go app i am using to convert the file to bytes.
package main
import (
"fmt"
"github.com/atotto/clipboard"
"io/ioutil"
)
func main() {
bytes, err := ioutil.ReadFile("/path/to/file/a.out")
if err != nil {
fmt.Println(err)
}
enc := make([]byte, base64.RawStdEncoding.EncodedLen(len(bytes)))
base64.RawStdEncoding.Encode(enc, bytes)
fmt.Println("byte size: ", len(bytes))
fmt.Println("encoded byte size: ", len(enc))
clipboard.WriteAll(string(enc))
}
Here is how i am attempting to decode and save the file in javascript.
let decodedBytes = atob("put the bytes here from your clipboard from running the go app");
fs.writeFileSync(
"/destination/to/save/file",
decodedBytes
);
I figured it out. After some research and reading i found this question and this article. Initially the question did not help me, but after reading that article for some time, I tried a few of the examples and was able to get one of them to work. I was able to get solution 1 to work. Here is the javascript i have now to get this working.
Saved file is exactly the same as the source.
function b64ToUint6(nChr) {
return nChr > 64 && nChr < 91
? nChr - 65
: nChr > 96 && nChr < 123
? nChr - 71
: nChr > 47 && nChr < 58
? nChr + 4
: nChr === 43
? 62
: nChr === 47
? 63
: 0;
}
function base64DecToArr(sBase64, nBlockSize) {
var sB64Enc = sBase64.replace(/[^A-Za-z0-9\+\/]/g, ""),
nInLen = sB64Enc.length,
nOutLen = nBlockSize
? Math.ceil(((nInLen * 3 + 1) >>> 2) / nBlockSize) * nBlockSize
: (nInLen * 3 + 1) >>> 2,
aBytes = new Uint8Array(nOutLen);
for (
var nMod3, nMod4, nUint24 = 0, nOutIdx = 0, nInIdx = 0;
nInIdx < nInLen;
nInIdx++
) {
nMod4 = nInIdx & 3;
nUint24 |= b64ToUint6(sB64Enc.charCodeAt(nInIdx)) << (18 - 6 * nMod4);
if (nMod4 === 3 || nInLen - nInIdx === 1) {
for (nMod3 = 0; nMod3 < 3 && nOutIdx < nOutLen; nMod3++, nOutIdx++) {
aBytes[nOutIdx] = (nUint24 >>> ((16 >>> nMod3) & 24)) & 255;
}
nUint24 = 0;
}
}
return aBytes;
}
let decodedBytes = base64DecToArr("bytes to decode");
fs.writeFileSync(
"/destination/to/save/file",
decodedBytes
);
Related
I have an xml file encoded in UTF16, and I would like to convert it to UTF8 in order to process it. If I use this command:
iconv -f UTF-16 -t UTF-8 file.xml > converted_file.xml
The file is converted correctly and I'm able to process it. I want to do the same in nodejs.
Currently I have a buffer of my file and I've tried everything I could think of and what I could find on the internet but unsuccessfully.
Here is some examples of what I've tried so far:
content = new Buffer((new Buffer(content, 'ucs2')).toString('utf8'));
I've also tried using those functions:
http://jonisalonen.com/2012/from-utf-16-to-utf-8-in-javascript/
https://stackoverflow.com/a/14601808/1405208
The first one doen't change anything and the links only give me chinese characters.
var content = fs.readFileSync('myfile.xml', {encoding:'ucs2'});
fs.writeFileSync('myfile.xml', content, {encoding:'utf8'});
While the answer above me is the best answer for the question asked.
I'm hoping that this answer will help some folks that need to read a file as a binary string:
const reader = new FileReader();
reader.readAsBinaryString(this.fileToImport);
In my case the file was in utf-16 and I tried to read it into XLSX:
const wb = XLSX.read(bstr, { type: "binary" });
Combining both links from above, I first removed the first two chars that signaled it was UTF-16 (0xFFFE)
then used this link to create the right number (but I think that it actually provides UTF-7 encoding)
https://stackoverflow.com/a/14601808/1405208
Lastly, I applied the second link to get the right set of UTF-8 number: https://stackoverflow.com/a/14601808/1405208
The Code that I ended up with:
decodeUTF16LE(binaryStr) {
if (binaryStr.charCodeAt(0) != 255 || binaryStr.charCodeAt(1) != 254) {
return binaryStr;
}
const utf8 = [];
for (var i = 2; i < binaryStr.length; i += 2) {
let charcode = binaryStr.charCodeAt(i) | (binaryStr.charCodeAt(i + 1) << 8);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6), 0x80 | (charcode & 0x3f));
} else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12), 0x80 | ((charcode >> 6) & 0x3f), 0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff) << 10) | (charcode & 0x3ff));
utf8.push(
0xf0 | (charcode >> 18),
0x80 | ((charcode >> 12) & 0x3f),
0x80 | ((charcode >> 6) & 0x3f),
0x80 | (charcode & 0x3f)
);
}
}
return String.fromCharCode.apply(String, utf8);
},
The .charCodeAt function returns with the unicode code of the caracter. But I would like to get the byte array instead. I know, if the charcode is over 127, then the character is stored in two or more bytes.
var arr=[];
for(var i=0; i<str.length; i++) {
arr.push(str.charCodeAt(i))
}
The logic of encoding Unicode in UTF-8 is basically:
Up to 4 bytes per character can be used. The fewest number of bytes possible is used.
Characters up to U+007F are encoded with a single byte.
For multibyte sequences, the number of leading 1 bits in the first byte gives the number of bytes for the character. The rest of the bits of the first byte can be used to encode bits of the character.
The continuation bytes begin with 10, and the other 6 bits encode bits of the character.
Here's a function I wrote a while back for encoding a JavaScript UTF-16 string in UTF-8:
function toUTF8Array(str) {
var utf8 = [];
for (var i=0; i < str.length; i++) {
var charcode = str.charCodeAt(i);
if (charcode < 0x80) utf8.push(charcode);
else if (charcode < 0x800) {
utf8.push(0xc0 | (charcode >> 6),
0x80 | (charcode & 0x3f));
}
else if (charcode < 0xd800 || charcode >= 0xe000) {
utf8.push(0xe0 | (charcode >> 12),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
// surrogate pair
else {
i++;
// UTF-16 encodes 0x10000-0x10FFFF by
// subtracting 0x10000 and splitting the
// 20 bits of 0x0-0xFFFFF into two halves
charcode = 0x10000 + (((charcode & 0x3ff)<<10)
| (str.charCodeAt(i) & 0x3ff));
utf8.push(0xf0 | (charcode >>18),
0x80 | ((charcode>>12) & 0x3f),
0x80 | ((charcode>>6) & 0x3f),
0x80 | (charcode & 0x3f));
}
}
return utf8;
}
JavaScript Strings are stored in UTF-16. To get UTF-8, you'll have to convert the String yourself.
One way is to mix encodeURIComponent(), which will output UTF-8 bytes URL-encoded, with unescape, as mentioned on ecmanaut.
var utf8 = unescape(encodeURIComponent(str));
var arr = [];
for (var i = 0; i < utf8.length; i++) {
arr.push(utf8.charCodeAt(i));
}
The Encoding API lets you both encode and decode UTF-8 easily (using typed arrays):
var encoded = new TextEncoder().encode("Γεια σου κόσμε");
var decoded = new TextDecoder("utf-8").decode(encoded);
console.log(encoded, decoded);
Browser support isn't too bad, and there's a polyfill that should work in IE11 and older versions of Edge.
While TextEncoder can only encode to UTF-8, TextDecoder supports other encodings. I used it to decode Japanese text (Shift-JIS) in this way:
// Shift-JIS encoded text; must be a byte array due to values 129 and 130.
var arr = [130, 108, 130, 102, 130, 80, 129, 64, 130, 102, 130, 96, 130, 108, 130, 100,
129, 64, 130, 99, 130, 96, 130, 115, 130, 96, 129, 124, 130, 79, 130, 80];
// Convert to byte array
var data = new Uint8Array(arr);
// Decode with TextDecoder
var decoded = new TextDecoder("shift-jis").decode(data.buffer);
console.log(decoded);
The Google Closure library has functions to convert to/from UTF-8 and byte arrays. If you don't want to use the whole library, you can copy the functions from here. For completeness, the code to convert to a string to a UTF-8 byte array is:
goog.crypt.stringToUtf8ByteArray = function(str) {
// TODO(user): Use native implementations if/when available
var out = [], p = 0;
for (var i = 0; i < str.length; i++) {
var c = str.charCodeAt(i);
if (c < 128) {
out[p++] = c;
} else if (c < 2048) {
out[p++] = (c >> 6) | 192;
out[p++] = (c & 63) | 128;
} else if (
((c & 0xFC00) == 0xD800) && (i + 1) < str.length &&
((str.charCodeAt(i + 1) & 0xFC00) == 0xDC00)) {
// Surrogate Pair
c = 0x10000 + ((c & 0x03FF) << 10) + (str.charCodeAt(++i) & 0x03FF);
out[p++] = (c >> 18) | 240;
out[p++] = ((c >> 12) & 63) | 128;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
} else {
out[p++] = (c >> 12) | 224;
out[p++] = ((c >> 6) & 63) | 128;
out[p++] = (c & 63) | 128;
}
}
return out;
};
Assuming the question is about a DOMString as input and the goal is to get an Array, that when interpreted as string (e.g. written to a file on disk), would be UTF-8 encoded:
Now that nearly all modern browsers support Typed Arrays, it'd be ashamed if this approach is not listed:
According to the W3C, software supporting the File API should accept DOMStrings in their Blob constructor (see also: String encoding when constructing a Blob)
Blobs can be converted to an ArrayBuffer using the .readAsArrayBuffer() function of a File Reader
Using a DataView or constructing a Typed Array with the buffer read by the File Reader, one can access every single byte of the ArrayBuffer
Example:
// Create a Blob with an Euro-char (U+20AC)
var b = new Blob(['€']);
var fr = new FileReader();
fr.onload = function() {
ua = new Uint8Array(fr.result);
// This will log "3|226|130|172"
// E2 82 AC
// In UTF-16, it would be only 2 bytes long
console.log(
fr.result.byteLength + '|' +
ua[0] + '|' +
ua[1] + '|' +
ua[2] + ''
);
};
fr.readAsArrayBuffer(b);
Play with that on JSFiddle. I haven't benchmarked this yet but I can imagine this being efficient for large DOMStrings as input.
You can save a string raw as is by using FileReader.
Save the string in a blob and call readAsArrayBuffer(). Then the onload-event results an arraybuffer, which can converted in a Uint8Array.
Unfortunately this call is asynchronous.
This little function will help you:
function stringToBytes(str)
{
let reader = new FileReader();
let done = () => {};
reader.onload = event =>
{
done(new Uint8Array(event.target.result), str);
};
reader.readAsArrayBuffer(new Blob([str], { type: "application/octet-stream" }));
return { done: callback => { done = callback; } };
}
Call it like this:
stringToBytes("\u{1f4a9}").done(bytes =>
{
console.log(bytes);
});
output: [240, 159, 146, 169]
explanation:
JavaScript use UTF-16 and surrogate-pairs to store unicode characters in memory. To save unicode character in raw binary byte streams an encoding is necessary.
Usually and in the most case, UTF-8 is used for this. If you not use an enconding you can't save unicode character, just ASCII up to 0x7f.
FileReader.readAsArrayBuffer() uses UTF-8.
As there is no pure byte type in JavaScript we can represent a byte array as an array of numbers, where each number represents a byte and thus will have an integer value between 0 and 255 inclusive.
Here is a simple function that does convert a JavaScript string into an Array of numbers that contain the UTF-8 encoding of the string:
function toUtf8(str) {
var value = [];
var destIndex = 0;
for (var index = 0; index < str.length; index++) {
var code = str.charCodeAt(index);
if (code <= 0x7F) {
value[destIndex++] = code;
} else if (code <= 0x7FF) {
value[destIndex++] = ((code >> 6 ) & 0x1F) | 0xC0;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0xFFFF) {
value[destIndex++] = ((code >> 12) & 0x0F) | 0xE0;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x1FFFFF) {
value[destIndex++] = ((code >> 18) & 0x07) | 0xF0;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x03FFFFFF) {
value[destIndex++] = ((code >> 24) & 0x03) | 0xF0;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x7FFFFFFF) {
value[destIndex++] = ((code >> 30) & 0x01) | 0xFC;
value[destIndex++] = ((code >> 24) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else {
throw new Error("Unsupported Unicode character \""
+ str.charAt(index) + "\" with code " + code + " (binary: "
+ toBinary(code) + ") at index " + index
+ ". Cannot represent it as UTF-8 byte sequence.");
}
}
return value;
}
function toBinary(byteValue) {
if (byteValue < 0) {
byteValue = byteValue & 0x00FF;
}
var str = byteValue.toString(2);
var len = str.length;
var prefix = "";
for (var i = len; i < 8; i++) {
prefix += "0";
}
return prefix + str;
}
I was using Joni's solution and it worked fine, but this one is much shorter.
This was inspired by the atobUTF16() function of Solution #3 of Mozilla's Base64 Unicode discussion
function convertStringToUTF8ByteArray(str) {
let binaryArray = new Uint8Array(str.length)
Array.prototype.forEach.call(binaryArray, function (el, idx, arr) { arr[idx] = str.charCodeAt(idx) })
return binaryArray
}
function convertByte()
{
var c=document.getElementById("str").value;
var arr = [];
var i=0;
for(var ind=0;ind<c.length;ind++)
{
arr[ind]=c.charCodeAt(i);
i++;
}
document.getElementById("result").innerHTML="The converted value is "+arr.join("");
}
I have Javascript in an XHTML web page that is passing UTF-8 encoded strings. It needs to continue to pass the UTF-8 version, as well as decode it. How is it possible to decode a UTF-8 string for display?
<script type="text/javascript">
// <![CDATA[
function updateUser(usernameSent){
var usernameReceived = usernameSent; // Current value: Größe
var usernameDecoded = usernameReceived; // Decode to: Größe
var html2id = '';
html2id += 'Encoded: ' + usernameReceived + '<br />Decoded: ' + usernameDecoded;
document.getElementById('userId').innerHTML = html2id;
}
// ]]>
</script>
To answer the original question: here is how you decode utf-8 in javascript:
http://ecmanaut.blogspot.ca/2006/07/encoding-decoding-utf8-in-javascript.html
Specifically,
function encode_utf8(s) {
return unescape(encodeURIComponent(s));
}
function decode_utf8(s) {
return decodeURIComponent(escape(s));
}
We have been using this in our production code for 6 years, and it has worked flawlessly.
Note, however, that escape() and unescape() are deprecated. See this.
This should work:
// http://www.onicos.com/staff/iz/amuse/javascript/expert/utf.txt
/* utf.js - UTF-8 <=> UTF-16 convertion
*
* Copyright (C) 1999 Masanao Izumo <iz#onicos.co.jp>
* Version: 1.0
* LastModified: Dec 25 1999
* This library is free. You can redistribute it and/or modify it.
*/
function Utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3;
out = "";
len = array.length;
i = 0;
while(i < len) {
c = array[i++];
switch(c >> 4)
{
case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
// 0xxxxxxx
out += String.fromCharCode(c);
break;
case 12: case 13:
// 110x xxxx 10xx xxxx
char2 = array[i++];
out += String.fromCharCode(((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
// 1110 xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
out += String.fromCharCode(((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
}
}
return out;
}
Check out the JSFiddle demo.
Also see the related questions: here and here
Perhaps using the textDecoder will be sufficient.
Not supported in IE though.
var decoder = new TextDecoder('utf-8'),
decodedMessage;
decodedMessage = decoder.decode(message.data);
Handling non-UTF8 text
In this example, we decode the Russian text "Привет, мир!", which means "Hello, world." In our TextDecoder() constructor, we specify the Windows-1251 character encoding, which is appropriate for Cyrillic script.
let win1251decoder = new TextDecoder('windows-1251');
let bytes = new Uint8Array([207, 240, 232, 226, 229, 242, 44, 32, 236, 232, 240, 33]);
console.log(win1251decoder.decode(bytes)); // Привет, мир!
The interface for the TextDecoder is described here.
Retrieving a byte array from a string is equally simpel:
const decoder = new TextDecoder();
const encoder = new TextEncoder();
const byteArray = encoder.encode('Größe');
// converted it to a byte array
// now we can decode it back to a string if desired
console.log(decoder.decode(byteArray));
If you have it in a different encoding then you must compensate for that upon encoding.
The parameter in the constructor for the TextEncoder is any one of the valid encodings listed here.
Update #Albert's answer adding condition for emoji.
function Utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3, char4;
out = "";
len = array.length;
i = 0;
while(i < len) {
c = array[i++];
switch(c >> 4)
{
case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
// 0xxxxxxx
out += String.fromCharCode(c);
break;
case 12: case 13:
// 110x xxxx 10xx xxxx
char2 = array[i++];
out += String.fromCharCode(((c & 0x1F) << 6) | (char2 & 0x3F));
break;
case 14:
// 1110 xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
out += String.fromCharCode(((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
case 15:
// 1111 0xxx 10xx xxxx 10xx xxxx 10xx xxxx
char2 = array[i++];
char3 = array[i++];
char4 = array[i++];
out += String.fromCodePoint(((c & 0x07) << 18) | ((char2 & 0x3F) << 12) | ((char3 & 0x3F) << 6) | (char4 & 0x3F));
break;
}
return out;
}
Here is a solution handling all Unicode code points include upper (4 byte) values and supported by all modern browsers (IE and others > 5.5). It uses decodeURIComponent(), but NOT the deprecated escape/unescape functions:
function utf8_to_str(a) {
for(var i=0, s=''; i<a.length; i++) {
var h = a[i].toString(16)
if(h.length < 2) h = '0' + h
s += '%' + h
}
return decodeURIComponent(s)
}
Tested and available on GitHub
To create UTF-8 from a string:
function utf8_from_str(s) {
for(var i=0, enc = encodeURIComponent(s), a = []; i < enc.length;) {
if(enc[i] === '%') {
a.push(parseInt(enc.substr(i+1, 2), 16))
i += 3
} else {
a.push(enc.charCodeAt(i++))
}
}
return a
}
Tested and available on GitHub
This is what I found after a more specific Google search than just UTF-8 encode/decode. so for those who are looking for a converting library to convert between encodings, here you go.
https://github.com/inexorabletash/text-encoding
var uint8array = new TextEncoder().encode(str);
var str = new TextDecoder(encoding).decode(uint8array);
Paste from repo readme
All encodings from the Encoding specification are supported:
utf-8 ibm866 iso-8859-2 iso-8859-3 iso-8859-4 iso-8859-5 iso-8859-6 iso-8859-7 iso-8859-8 iso-8859-8-i iso-8859-10 iso-8859-13 iso-8859-14 iso-8859-15 iso-8859-16 koi8-r koi8-u macintosh windows-874 windows-1250 windows-1251 windows-1252 windows-1253 windows-1254 windows-1255 windows-1256 windows-1257 windows-1258 x-mac-cyrillic gb18030 hz-gb-2312 big5 euc-jp iso-2022-jp shift_jis euc-kr replacement utf-16be utf-16le x-user-defined
(Some encodings may be supported under other names, e.g. ascii, iso-8859-1, etc. See Encoding for additional labels for each encoding.)
#albert's solution was the closest I think but it can only parse up to 3 byte utf-8 characters
function utf8ArrayToStr(array) {
var out, i, len, c;
var char2, char3;
out = "";
len = array.length;
i = 0;
// XXX: Invalid bytes are ignored
while(i < len) {
c = array[i++];
if (c >> 7 == 0) {
// 0xxx xxxx
out += String.fromCharCode(c);
continue;
}
// Invalid starting byte
if (c >> 6 == 0x02) {
continue;
}
// #### MULTIBYTE ####
// How many bytes left for thus character?
var extraLength = null;
if (c >> 5 == 0x06) {
extraLength = 1;
} else if (c >> 4 == 0x0e) {
extraLength = 2;
} else if (c >> 3 == 0x1e) {
extraLength = 3;
} else if (c >> 2 == 0x3e) {
extraLength = 4;
} else if (c >> 1 == 0x7e) {
extraLength = 5;
} else {
continue;
}
// Do we have enough bytes in our data?
if (i+extraLength > len) {
var leftovers = array.slice(i-1);
// If there is an invalid byte in the leftovers we might want to
// continue from there.
for (; i < len; i++) if (array[i] >> 6 != 0x02) break;
if (i != len) continue;
// All leftover bytes are valid.
return {result: out, leftovers: leftovers};
}
// Remove the UTF-8 prefix from the char (res)
var mask = (1 << (8 - extraLength - 1)) - 1,
res = c & mask, nextChar, count;
for (count = 0; count < extraLength; count++) {
nextChar = array[i++];
// Is the char valid multibyte part?
if (nextChar >> 6 != 0x02) {break;};
res = (res << 6) | (nextChar & 0x3f);
}
if (count != extraLength) {
i--;
continue;
}
if (res <= 0xffff) {
out += String.fromCharCode(res);
continue;
}
res -= 0x10000;
var high = ((res >> 10) & 0x3ff) + 0xd800,
low = (res & 0x3ff) + 0xdc00;
out += String.fromCharCode(high, low);
}
return {result: out, leftovers: []};
}
This returns {result: "parsed string", leftovers: [list of invalid bytes at the end]} in case you are parsing the string in chunks.
EDIT: fixed the issue that #unhammer found.
// String to Utf8 ByteBuffer
function strToUTF8(str){
return Uint8Array.from(encodeURIComponent(str).replace(/%(..)/g,(m,v)=>{return String.fromCodePoint(parseInt(v,16))}), c=>c.codePointAt(0))
}
// Utf8 ByteArray to string
function UTF8toStr(ba){
return decodeURIComponent(ba.reduce((p,c)=>{return p+'%'+c.toString(16),''}))
}
Using my 1.6KB library, you can do
ToString(FromUTF8(Array.from(usernameReceived)))
This is a solution with extensive error reporting.
It would take an UTF-8 encoded byte array (where byte array is represented as
array of numbers and each number is an integer between 0 and 255 inclusive)
and will produce a JavaScript string of Unicode characters.
function getNextByte(value, startByteIndex, startBitsStr,
additional, index)
{
if (index >= value.length) {
var startByte = value[startByteIndex];
throw new Error("Invalid UTF-8 sequence. Byte " + startByteIndex
+ " with value " + startByte + " (" + String.fromCharCode(startByte)
+ "; binary: " + toBinary(startByte)
+ ") starts with " + startBitsStr + " in binary and thus requires "
+ additional + " bytes after it, but we only have "
+ (value.length - startByteIndex) + ".");
}
var byteValue = value[index];
checkNextByteFormat(value, startByteIndex, startBitsStr, additional, index);
return byteValue;
}
function checkNextByteFormat(value, startByteIndex, startBitsStr,
additional, index)
{
if ((value[index] & 0xC0) != 0x80) {
var startByte = value[startByteIndex];
var wrongByte = value[index];
throw new Error("Invalid UTF-8 byte sequence. Byte " + startByteIndex
+ " with value " + startByte + " (" +String.fromCharCode(startByte)
+ "; binary: " + toBinary(startByte) + ") starts with "
+ startBitsStr + " in binary and thus requires " + additional
+ " additional bytes, each of which shouls start with 10 in binary."
+ " However byte " + (index - startByteIndex)
+ " after it with value " + wrongByte + " ("
+ String.fromCharCode(wrongByte) + "; binary: " + toBinary(wrongByte)
+") does not start with 10 in binary.");
}
}
function fromUtf8 (str) {
var value = [];
var destIndex = 0;
for (var index = 0; index < str.length; index++) {
var code = str.charCodeAt(index);
if (code <= 0x7F) {
value[destIndex++] = code;
} else if (code <= 0x7FF) {
value[destIndex++] = ((code >> 6 ) & 0x1F) | 0xC0;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0xFFFF) {
value[destIndex++] = ((code >> 12) & 0x0F) | 0xE0;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x1FFFFF) {
value[destIndex++] = ((code >> 18) & 0x07) | 0xF0;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x03FFFFFF) {
value[destIndex++] = ((code >> 24) & 0x03) | 0xF0;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else if (code <= 0x7FFFFFFF) {
value[destIndex++] = ((code >> 30) & 0x01) | 0xFC;
value[destIndex++] = ((code >> 24) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 18) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 12) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 6 ) & 0x3F) | 0x80;
value[destIndex++] = ((code >> 0 ) & 0x3F) | 0x80;
} else {
throw new Error("Unsupported Unicode character \""
+ str.charAt(index) + "\" with code " + code + " (binary: "
+ toBinary(code) + ") at index " + index
+ ". Cannot represent it as UTF-8 byte sequence.");
}
}
return value;
}
You should take decodeURI for it.
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURI
As simple as this:
decodeURI('https://developer.mozilla.org/ru/docs/JavaScript_%D1%88%D0%B5%D0%BB%D0%BB%D1%8B');
// "https://developer.mozilla.org/ru/docs/JavaScript_шеллы"
Consider to use it inside try catch block for not missing an URIError.
Also it has full browsers support.
const decoder = new TextDecoder();
console.log(decoder.decode(new Uint8Array([97])));
MDN resource link
I reckon the easiest way would be to use a built-in js functions decodeURI() / encodeURI().
function (usernameSent) {
var usernameEncoded = usernameSent; // Current value: utf8
var usernameDecoded = decodeURI(usernameReceived); // Decoded
// do stuff
}
I searched for a simple solution and this works well for me:
//input data
view = new Uint8Array(data);
//output string
serialString = ua2text(view);
//convert UTF8 to string
function ua2text(ua) {
s = "";
for (var i = 0; i < ua.length; i++) {
s += String.fromCharCode(ua[i]);
}
return s;
}
Only issue I have is sometimes I get one character at a time. This might be by design with my source of the arraybuffer. I'm using https://github.com/xseignard/cordovarduino to read serial data on an android device.
Preferably, as others have suggested, use the Encoding API. But if you need to support IE (for some strange reason) MDN recommends this repo FastestSmallestTextEncoderDecoder
If you need to make use of the polyfill library:
import {encode, decode} from "fastestsmallesttextencoderdecoder";
Then (regardless of the polyfill) for encoding and decoding:
// takes in USVString and returns a Uint8Array object
const encoded = new TextEncoder().encode('€')
console.log(encoded);
// takes in an ArrayBuffer or an ArrayBufferView and returns a DOMString
const decoded = new TextDecoder().decode(encoded);
console.log(decoded);
The JavaScript method String.fromCharCode() behaves equivalently to Python's unichar() in the following sense:
print unichr(213) # prints Õ on the console
console.log(String.fromCharCode(213)); // prints Õ on the console as well
For my purposes, however, I need a JavaScript equivalent to the Python function chr(). Is there such a JavaScript function or a way to make String.fromCharCode() behave like chr()?
That is, I need something in JavaScript that mimics
print chr(213) # prints � on the console
So turns out you just want to work with raw bytes in node.js, there's a module for that. If you are a real wizard, you can get this stuff to work with javascript strings alone but it's harder and far less efficient.
var b = new Buffer(1);
b[0] = 213;
console.log(b.toString()); //�
var b = new Buffer(3);
b[0] = 0xE2;
b[1] = 0x98;
b[2] = 0x85;
console.log(b.toString()); //★
print chr(213) # prints � on the console
So this prints a raw byte (0xD5), that is interpreted in UTF-8 (most likely) which is not valid UTF-8 byte sequence and thus is displayed as the replacement character (�).
The interpretation as UTF-8 is not relevant here, you most likely just want raw bytes.
To create raw bytes in javascript you could use UInt8Array.
var a = new Uint8Array(1);
a[0] = 213;
You could optionally then interpret the raw bytes as utf-8:
console.log( utf8decode(a)); // "�"
//Not recommended for production use ;D
//Doesn't handle > BMP to keep the answer shorter
function utf8decode(uint8array) {
var codePoints = [],
i = 0,
byte, codePoint, len = uint8array.length;
for (i = 0; i < len; ++i) {
byte = uint8array[i];
if ((byte & 0xF8) === 0xF0 && len > i + 3) {
codePoint = ((byte & 0x7) << 18) | ((uint8array[++i] & 0x3F) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0xFFFF < codePoint && codePoint <= 0x10FFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xF0) === 0xE0 && len > i + 2) {
codePoint = ((byte & 0xF) << 12) | ((uint8array[++i] & 0x3F) << 6) | (uint8array[++i] & 0x3F);
if (!(0x7FF < codePoint && codePoint <= 0xFFFF)) {
codePoints.push(0xFFFD, 0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0xE0) === 0xC0 && len > i + 1) {
codePoint = ((byte & 0x1F) << 6) | ((uint8array[++i] & 0x3F));
if (!(0x7F < codePoint && codePoint <= 0x7FF)) {
codePoints.push(0xFFFD, 0xFFFD);
} else {
codePoints.push(codePoint);
}
} else if ((byte & 0x80) === 0x00) {
codePoints.push(byte & 0x7F);
} else {
codePoints.push(0xFFFD);
}
}
return String.fromCharCode.apply(String, codePoints);
}
What you are most likely trying to do has nothing to do with trying to interpret the bytes as utf8 though.
Another example:
//UTF-8 For the black star U+2605 ★:
var a = new Uint8Array(3);
a[0] = 0xE2;
a[1] = 0x98;
a[2] = 0x85;
utf8decode(a) === String.fromCharCode(0x2605) //True
utf8decode(a) // ★
In python 2.7 (Ubuntu):
print chr(0xE2) + chr(0x98) + chr(0x85)
#prints ★
If you want this "Questionmark in a box" for every number that is not in the standard ASCII table, how about this little function?
function chr(c) {
return (c < 0 || c > 126) ? '�' : String.fromCharCode(c);
}
When i get a json feed from a Cyrillic site, the data is in a \ufffd format instead of Cyrillic chars.
(example feed: http://jsonduit.com/v1/f/l/7sg?cb=getJsonP_1284131679846_0)
So when i set the source html to the input, i get weird boxes instead of characters.
I tried to unescape the input but that wont work too.
How do i revert the feed back to Cyrillic?
(btw, the source page encoding is set to UTF-8)
decodeURIComponent("stringToDecodeToCyrillic")
Example:
decodeURIComponent("%D0%90%D0%BB%D0%B5%D0%BA%D1%81%D0%B5%D0%B9") === "Алексей"
Fastest way to encode cyrillic letters for url
It seems you receive UTF8 string. Use the following class to decode:
UTF8 = {
encode: function(s){
for(var c, i = -1, l = (s = s.split("")).length, o = String.fromCharCode; ++i < l;
s[i] = (c = s[i].charCodeAt(0)) >= 127 ? o(0xc0 | (c >>> 6)) + o(0x80 | (c & 0x3f)) : s[i]
);
return s.join("");
},
decode: function(s){
for(var a, b, i = -1, l = (s = s.split("")).length, o = String.fromCharCode, c = "charCodeAt"; ++i < l;
((a = s[i][c](0)) & 0x80) &&
(s[i] = (a & 0xfc) == 0xc0 && ((b = s[i + 1][c](0)) & 0xc0) == 0x80 ?
o(((a & 0x03) << 6) + (b & 0x3f)) : o(128), s[++i] = "")
);
return s.join("");
}
};
Usage:
var newString = UTF8.decode( yourString );