I need to convert a base64 encode string into an ArrayBuffer.
The base64 strings are user input, they will be copy and pasted from an email, so they're not there when the page is loaded.
I would like to do this in javascript without making an ajax call to the server if possible.
I found those links interesting, but they didt'n help me:
ArrayBuffer to base64 encoded string
this is about the opposite conversion, from ArrayBuffer to base64, not the other way round
http://jsperf.com/json-vs-base64/2
this looks good but i can't figure out how to use the code.
Is there an easy (maybe native) way to do the conversion? thanks
Try this:
function _base64ToArrayBuffer(base64) {
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
}
Using TypedArray.from:
Uint8Array.from(atob(base64_string), c => c.charCodeAt(0))
Performance to be compared with the for loop version of Goran.it answer.
For Node.js users:
const myBuffer = Buffer.from(someBase64String, 'base64');
myBuffer will be of type Buffer which is a subclass of Uint8Array. Unfortunately, Uint8Array is NOT an ArrayBuffer as the OP was asking for. But when manipulating an ArrayBuffer I almost always wrap it with Uint8Array or something similar, so it should be close to what's being asked for.
Goran.it's answer does not work because of unicode problem in javascript - https://developer.mozilla.org/en-US/docs/Web/API/WindowBase64/Base64_encoding_and_decoding.
I ended up using the function given on Daniel Guerrero's blog: http://blog.danguer.com/2011/10/24/base64-binary-decoding-in-javascript/
Function is listed on github link: https://github.com/danguer/blog-examples/blob/master/js/base64-binary.js
Use these lines
var uintArray = Base64Binary.decode(base64_string);
var byteArray = Base64Binary.decodeArrayBuffer(base64_string);
Async solution, it's better when the data is big:
// base64 to buffer
function base64ToBufferAsync(base64) {
var dataUrl = "data:application/octet-binary;base64," + base64;
fetch(dataUrl)
.then(res => res.arrayBuffer())
.then(buffer => {
console.log("base64 to buffer: " + new Uint8Array(buffer));
})
}
// buffer to base64
function bufferToBase64Async( buffer ) {
var blob = new Blob([buffer], {type:'application/octet-binary'});
console.log("buffer to blob:" + blob)
var fileReader = new FileReader();
fileReader.onload = function() {
var dataUrl = fileReader.result;
console.log("blob to dataUrl: " + dataUrl);
var base64 = dataUrl.substr(dataUrl.indexOf(',')+1)
console.log("dataUrl to base64: " + base64);
};
fileReader.readAsDataURL(blob);
}
Javascript is a fine development environment so it seems odd than it doesn't provide a solution to this small problem. The solutions offered elsewhere on this page are potentially slow. Here is my solution. It employs the inbuilt functionality that decodes base64 image and sound data urls.
var req = new XMLHttpRequest;
req.open('GET', "data:application/octet;base64," + base64Data);
req.responseType = 'arraybuffer';
req.onload = function fileLoaded(e)
{
var byteArray = new Uint8Array(e.target.response);
// var shortArray = new Int16Array(e.target.response);
// var unsignedShortArray = new Int16Array(e.target.response);
// etc.
}
req.send();
The send request fails if the base 64 string is badly formed.
The mime type (application/octet) is probably unnecessary.
Tested in chrome. Should work in other browsers.
Pure JS - no string middlestep (no atob)
I write following function which convert base64 in direct way (without conversion to string at the middlestep). IDEA
get 4 base64 characters chunk
find index of each character in base64 alphabet
convert index to 6-bit number (binary string)
join four 6 bit numbers which gives 24-bit numer (stored as binary string)
split 24-bit string to three 8-bit and covert each to number and store them in output array
corner case: if input base64 string ends with one/two = char, remove one/two numbers from output array
Below solution allows to process large input base64 strings. Similar function for convert bytes to base64 without btoa is HERE
function base64ToBytesArr(str) {
const abc = [..."ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"]; // base64 alphabet
let result = [];
for(let i=0; i<str.length/4; i++) {
let chunk = [...str.slice(4*i,4*i+4)]
let bin = chunk.map(x=> abc.indexOf(x).toString(2).padStart(6,0)).join('');
let bytes = bin.match(/.{1,8}/g).map(x=> +('0b'+x));
result.push(...bytes.slice(0,3 - (str[4*i+2]=="=") - (str[4*i+3]=="=")));
}
return result;
}
// --------
// TEST
// --------
let test = "Alice's Adventure in Wonderland.";
console.log('test string:', test.length, test);
let b64_btoa = btoa(test);
console.log('encoded string:', b64_btoa);
let decodedBytes = base64ToBytesArr(b64_btoa); // decode base64 to array of bytes
console.log('decoded bytes:', JSON.stringify(decodedBytes));
let decodedTest = decodedBytes.map(b => String.fromCharCode(b) ).join``;
console.log('Uint8Array', JSON.stringify(new Uint8Array(decodedBytes)));
console.log('decoded string:', decodedTest.length, decodedTest);
Caution!
If you want to decode base64 to STRING (not bytes array) and you know that result contains utf8 characters then atob will fail in general e.g. for character 💩 the atob("8J+SqQ==") will give wrong result . In this case you can use above solution and convert result bytes array to string in proper way e.g. :
function base64ToBytesArr(str) {
const abc = [..."ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"]; // base64 alphabet
let result = [];
for(let i=0; i<str.length/4; i++) {
let chunk = [...str.slice(4*i,4*i+4)]
let bin = chunk.map(x=> abc.indexOf(x).toString(2).padStart(6,0)).join('');
let bytes = bin.match(/.{1,8}/g).map(x=> +('0b'+x));
result.push(...bytes.slice(0,3 - (str[4*i+2]=="=") - (str[4*i+3]=="=")));
}
return result;
}
// --------
// TEST
// --------
let testB64 = "8J+SqQ=="; // for string: "💩";
console.log('input base64 :', testB64);
let decodedBytes = base64ToBytesArr(testB64); // decode base64 to array of bytes
console.log('decoded bytes :', JSON.stringify(decodedBytes));
let result = new TextDecoder("utf-8").decode(new Uint8Array(decodedBytes));
console.log('properly decoded string :', result);
let result_atob = atob(testB64);
console.log('decoded by atob :', result_atob);
Snippets tested 2022-08-04 on: chrome 103.0.5060.134 (arm64), safari 15.2, firefox 103.0.1 (64 bit), edge 103.0.1264.77 (arm64), and node-js v12.16.1
I would strongly suggest using an npm package implementing correctly the base64 specification.
The best one I know is rfc4648
The problem is that btoa and atob use binary strings instead of Uint8Array and trying to convert to and from it is cumbersome. Also there is a lot of bad packages in npm for that. I lose a lot of time before finding that one.
The creators of that specific package did a simple thing: they took the specification of Base64 (which is here by the way) and implemented it correctly from the beginning to the end. (Including other formats in the specification that are also useful like Base64-url, Base32, etc ...) That doesn't seem a lot but apparently that was too much to ask to the bunch of other libraries.
So yeah, I know I'm doing a bit of proselytism but if you want to avoid losing your time too just use rfc4648.
I used the accepted answer to this question to create base64Url string <-> arrayBuffer conversions in the realm of base64Url data transmitted via ASCII-cookie [atob, btoa are base64[with +/]<->js binary string], so I decided to post the code.
Many of us may want both conversions and client-server communication may use the base64Url version (though a cookie may contain +/ as well as -_ characters if I understand well, only ",;\ characters and some wicked characters from the 128 ASCII are disallowed). But a url cannot contain / character, hence the wider use of b64 url version which of course not what atob-btoa supports...
Seeing other comments, I would like to stress that my use case here is base64Url data transmission via url/cookie and trying to use this crypto data with the js crypto api (2017) hence the need for ArrayBuffer representation and b64u <-> arrBuff conversions... if array buffers represent other than base64 (part of ascii) this conversion wont work since atob, btoa is limited to ascii(128). Check out an appropriate converter like below:
The buff -> b64u version is from a tweet from Mathias Bynens, thanks for that one (too)! He also wrote a base64 encoder/decoder:
https://github.com/mathiasbynens/base64
Coming from java, it may help when trying to understand the code that java byte[] is practically js Int8Array (signed int) but we use here the unsigned version Uint8Array since js conversions work with them. They are both 256bit, so we call it byte[] in js now...
The code is from a module class, that is why static.
//utility
/**
* Array buffer to base64Url string
* - arrBuff->byte[]->biStr->b64->b64u
* #param arrayBuffer
* #returns {string}
* #private
*/
static _arrayBufferToBase64Url(arrayBuffer) {
console.log('base64Url from array buffer:', arrayBuffer);
let base64Url = window.btoa(String.fromCodePoint(...new Uint8Array(arrayBuffer)));
base64Url = base64Url.replaceAll('+', '-');
base64Url = base64Url.replaceAll('/', '_');
console.log('base64Url:', base64Url);
return base64Url;
}
/**
* Base64Url string to array buffer
* - b64u->b64->biStr->byte[]->arrBuff
* #param base64Url
* #returns {ArrayBufferLike}
* #private
*/
static _base64UrlToArrayBuffer(base64Url) {
console.log('array buffer from base64Url:', base64Url);
let base64 = base64Url.replaceAll('-', '+');
base64 = base64.replaceAll('_', '/');
const binaryString = window.atob(base64);
const length = binaryString.length;
const bytes = new Uint8Array(length);
for (let i = 0; i < length; i++) {
bytes[i] = binaryString.charCodeAt(i);
}
console.log('array buffer:', bytes.buffer);
return bytes.buffer;
}
made a ArrayBuffer from a base64:
function base64ToArrayBuffer(base64) {
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
}
I was trying to use above code and It's working fine.
The result of atob is a string that is separated with some comma
,
A simpler way is to convert this string to a json array string and after that parse it to a byteArray
below code can simply be used to convert base64 to an array of number
let byteArray = JSON.parse('['+atob(base64)+']');
let buffer = new Uint8Array(byteArray);
Solution without atob
I've seen many people complaining about using atob and btoa in the replies. There are some issues to take into account when using them.
There's a solution without using them in the MDN page about Base64. Below you can find the code to convert a base64 string into a Uint8Array copied from the docs.
Note that the function below returns a Uint8Array. To get the ArrayBuffer version you just need to do uintArray.buffer.
function b64ToUint6(nChr) {
return nChr > 64 && nChr < 91
? nChr - 65
: nChr > 96 && nChr < 123
? nChr - 71
: nChr > 47 && nChr < 58
? nChr + 4
: nChr === 43
? 62
: nChr === 47
? 63
: 0;
}
function base64DecToArr(sBase64, nBlocksSize) {
const sB64Enc = sBase64.replace(/[^A-Za-z0-9+/]/g, "");
const nInLen = sB64Enc.length;
const nOutLen = nBlocksSize
? Math.ceil(((nInLen * 3 + 1) >> 2) / nBlocksSize) * nBlocksSize
: (nInLen * 3 + 1) >> 2;
const taBytes = new Uint8Array(nOutLen);
let nMod3;
let nMod4;
let nUint24 = 0;
let nOutIdx = 0;
for (let nInIdx = 0; nInIdx < nInLen; nInIdx++) {
nMod4 = nInIdx & 3;
nUint24 |= b64ToUint6(sB64Enc.charCodeAt(nInIdx)) << (6 * (3 - nMod4));
if (nMod4 === 3 || nInLen - nInIdx === 1) {
nMod3 = 0;
while (nMod3 < 3 && nOutIdx < nOutLen) {
taBytes[nOutIdx] = (nUint24 >>> ((16 >>> nMod3) & 24)) & 255;
nMod3++;
nOutIdx++;
}
nUint24 = 0;
}
}
return taBytes;
}
If you're interested in the reverse operation, ArrayBuffer to base64, you can find how to do it in the same link.
I need to encrypt a string using AES encryption. This encryption was happening in C# earlier, but it needs to be converted into JavaScript (will be run on a browser).
The current code in C# for encryption is as following -
public static string EncryptString(string plainText, string encryptionKey)
{
byte[] clearBytes = Encoding.Unicode.GetBytes(plainText);
using (Aes encryptor = Aes.Create())
{
Rfc2898DeriveBytes pdb = new Rfc2898DeriveBytes(encryptionKey, new byte[] { 0x49, 0x76, 0x61, 0x6e, 0x20, 0x4d, 0x65, 0x64, 0x76, 0x65, 0x64, 0x65, 0x76 });
encryptor.Key = pdb.GetBytes(32);
encryptor.IV = pdb.GetBytes(16);
using (MemoryStream ms = new MemoryStream())
{
using (CryptoStream cs = new CryptoStream(ms, encryptor.CreateEncryptor(), CryptoStreamMode.Write))
{
cs.Write(clearBytes, 0, clearBytes.Length);
cs.Close();
}
plainText = Convert.ToBase64String(ms.ToArray());
}
}
return plainText;
}
I have tried to use CryptoJS to replicate the same functionality, but it's not giving me the equivalent encrypted base64 string. Here's my CryptoJS code -
function encryptString(encryptString, secretKey) {
var iv = CryptoJS.enc.Hex.parse('Ivan Medvedev');
var key = CryptoJS.PBKDF2(secretKey, iv, { keySize: 256 / 32, iterations: 500 });
var encrypted = CryptoJS.AES.encrypt(encryptString, key,{iv:iv);
return encrypted;
}
The encrypted string has to be sent to a server which will be able to decrypt it. The server is able to decrypt the encrypted string generated from the C# code, but not the encrypted string generated from JS code. I tried to compare the encrypted strings generated by both the code and found that the C# code is generating longer encrypted strings. For example keeping 'Example String' as plainText and 'Example Key' as the key, I get the following result -
C# - eAQO+odxOdGlNRB81SHR2XzJhyWtz6XmQDko9HyDe0w=
JS - 9ex5i2g+8iUCwdwN92SF+A==
The length of JS encrypted string is always shorter than the C# one. Is there something I am doing wrong? I just have to replicated the C# code into the JS code.
Update:
My current code after Zergatul's answer is this -
function encryptString(encryptString, secretKey) {
var keyBytes = CryptoJS.PBKDF2(secretKey, 'Ivan Medvedev', { keySize: 48 / 4, iterations: 1000 });
console.log(keyBytes.toString());
// take first 32 bytes as key (like in C# code)
var key = new CryptoJS.lib.WordArray.init(keyBytes.words, 32);
// skip first 32 bytes and take next 16 bytes as IV
var iv = new CryptoJS.lib.WordArray.init(keyBytes.words.splice(32 / 4), 16);
console.log(key.toString());
console.log(iv.toString());
var encrypted = CryptoJS.AES.encrypt(encryptString, key, { iv: iv });
return encrypted;
}
As illustrated in his/her answer that if the C# code converts the plainText into bytes using ASCII instead of Unicode, both the C# and JS code will produce exact results. But since I am not able to modify the decryption code, I have to convert the code to be equivalent of the original C# code which was using Unicode.
So, I tried to see, what's the difference between both the bytes array between ASCII and Unicode byte conversion in C#. Here's what I found -
ASCII Byte Array: [69,120,97,109,112,108,101,32,83,116, 114, 105, 110, 103]
Unicode Byte Array: [69,0,120,0,97,0,109,0,112,0,108,0,101,0,32,0,83,0,116,0, 114,0, 105,0, 110,0, 103,0]
So some extra bytes are available for each character in C# (So Unicode allocates twice as much bytes to each character than ASCII).
Here's the difference between both Unicode and ASCII conversion respectively -
ASCII
clearBytes: [69,120,97,109,112,108,101,32,83,116,114,105,110,103,]
encryptor.Key: [123,213,18,82,141,249,182,218,247,31,246,83,80,77,195,134,230,92,0,125,232,210,135,115,145,193,140,239,228,225,183,13,]
encryptor.IV: [101,74,46,177,46,233,68,252,83,169,211,13,249,61,118,167,]
Result: eQus9GLPKULh9vhRWOJjog==
Unicode:
clearBytes: [69,0,120,0,97,0,109,0,112,0,108,0,101,0,32,0,83,0,116,0,114,0,105,0,110,0,103,0,]
encryptor.Key: [123,213,18,82,141,249,182,218,247,31,246,83,80,77,195,134,230,92,0,125,232,210,135,115,145,193,140,239,228,225,183,13,]
encryptor.IV: [101,74,46,177,46,233,68,252,83,169,211,13,249,61,118,167,]
Result: eAQO+odxOdGlNRB81SHR2XzJhyWtz6XmQDko9HyDe0w=
So since both the key and iv being generated have exact same byte array in both Unicode and ASCII approach, it should not have generated different output, but somehow it's doing that. I think it's because of clearBytes' length, as it's using its length to write to CryptoStream.
I tried to see what's the output of the generated bytes in the JS code is and found that it uses words which needed to be converted into Strings using toString() method.
keyBytes: 7bd512528df9b6daf71ff653504dc386e65c007de8d2877391c18cefe4e1b70d654a2eb12ee944fc53a9d30df93d76a7
key: 7bd512528df9b6daf71ff653504dc386e65c007de8d2877391c18cefe4e1b70d
iv: 654a2eb12ee944fc53a9d30df93d76a7
Since, I am not able to affect the generated encrypted string's length in the JS code (No access to the write stream directly), thus still stuck here.
Here is the example how to reproduce the same ciphertext between C# and CryptoJS:
static void Main(string[] args)
{
byte[] plainText = Encoding.Unicode.GetBytes("Example String"); // this is UTF-16 LE
string cipherText;
using (Aes encryptor = Aes.Create())
{
var pdb = new Rfc2898DeriveBytes("Example Key", Encoding.ASCII.GetBytes("Ivan Medvedev"));
encryptor.Key = pdb.GetBytes(32);
encryptor.IV = pdb.GetBytes(16);
using (MemoryStream ms = new MemoryStream())
{
using (CryptoStream cs = new CryptoStream(ms, encryptor.CreateEncryptor(), CryptoStreamMode.Write))
{
cs.Write(plainText, 0, plainText.Length);
cs.Close();
}
cipherText = Convert.ToBase64String(ms.ToArray());
}
}
Console.WriteLine(cipherText);
}
And JS:
var keyBytes = CryptoJS.PBKDF2('Example Key', 'Ivan Medvedev', { keySize: 48 / 4, iterations: 1000 });
// take first 32 bytes as key (like in C# code)
var key = new CryptoJS.lib.WordArray.init(keyBytes.words, 32);
// skip first 32 bytes and take next 16 bytes as IV
var iv = new CryptoJS.lib.WordArray.init(keyBytes.words.splice(32 / 4), 16);
// use the same encoding as in C# code, to convert string into bytes
var data = CryptoJS.enc.Utf16LE.parse("Example String");
var encrypted = CryptoJS.AES.encrypt(data, key, { iv: iv });
console.log(encrypted.toString());
Both codes return: eAQO+odxOdGlNRB81SHR2XzJhyWtz6XmQDko9HyDe0w=
TL;DR the final code looks like this -
function encryptString(encryptString, secretKey) {
encryptString = addExtraByteToChars(encryptString);
var keyBytes = CryptoJS.PBKDF2(secretKey, 'Ivan Medvedev', { keySize: 48 / 4, iterations: 1000 });
console.log(keyBytes.toString());
var key = new CryptoJS.lib.WordArray.init(keyBytes.words, 32);
var iv = new CryptoJS.lib.WordArray.init(keyBytes.words.splice(32 / 4), 16);
var encrypted = CryptoJS.AES.encrypt(encryptString, key, { iv: iv, });
return encrypted;
}
function addExtraByteToChars(str) {
let strResult = '';
for (var i = 0; i < str.length; ++i) {
strResult += str.charAt(i) + String.fromCharCode(0);
}
return strResult;
}
Explanation:
The C# code in the Zergatul's answer (Thanks to him/her) was using ASCII to convert the plainText into bytes, while my C# code was using Unicode. Unicode was assigning extra byte to each character in the resultant byte array, which was not affecting the generation of both key and iv bytes, but affecting the result since the length of the encryptedString was dependent on the length of the bytes generated from plainText.
As seen in the following bytes generated for each of them using "Example String" and "Example Key" as the plainText and secretKey respectively -
ASCII
clearBytes: [69,120,97,109,112,108,101,32,83,116,114,105,110,103,]
encryptor.Key: [123,213,18,82,141,249,182,218,247,31,246,83,80,77,195,134,230,92,0,125,232,210,135,115,145,193,140,239,228,225,183,13,]
encryptor.IV: [101,74,46,177,46,233,68,252,83,169,211,13,249,61,118,167,]
Result: eQus9GLPKULh9vhRWOJjog==
Unicode:
clearBytes: [69,0,120,0,97,0,109,0,112,0,108,0,101,0,32,0,83,0,116,0,114,0,105,0,110,0,103,0,]
encryptor.Key: [123,213,18,82,141,249,182,218,247,31,246,83,80,77,195,134,230,92,0,125,232,210,135,115,145,193,140,239,228,225,183,13,]
encryptor.IV: [101,74,46,177,46,233,68,252,83,169,211,13,249,61,118,167,]
Result: eAQO+odxOdGlNRB81SHR2XzJhyWtz6XmQDko9HyDe0w=
The JS result was similar too, which confirmed that it's using ASCII byte conversion -
keyBytes: 7bd512528df9b6daf71ff653504dc386e65c007de8d2877391c18cefe4e1b70d654a2eb12ee944fc53a9d30df93d76a7
key: 7bd512528df9b6daf71ff653504dc386e65c007de8d2877391c18cefe4e1b70d
iv: 654a2eb12ee944fc53a9d30df93d76a7
Thus I just need to increase the length of the plainText to make it use Unicode equivalent byte generation (Sorry, not familiar with the term). Since Unicode was assigning 2 space for each character in the byteArray, keeping the second space as 0, I basically created gap in the plainText's characters and filled that gap with character whose ASCII value was 0 using the addExtraByteToChars() function. And it made all the difference.
It's a workaround for sure, but started working for my scenario. I suppose this may or may not prove useful to others, thus sharing the findings. If anyone can suggest better implementation of the addExtraByteToChars() function (probably some term for this conversion instead of ASCII to Unicode or a better, efficient, and not hacky way to do that), please suggest it.