I have a string containing binary data in JavaScript. Now I want to read, for example, an integer from it. So I get the first 4 characters, use charCodeAt, do some shifting, etc. to get an integer.
The problem is that strings in JavaScript are UTF-16 (instead of ASCII) and charCodeAt often returns values higher than 256.
The Mozilla reference states that "The first 128 Unicode code points are a direct match of the ASCII character encoding." (what about ASCII values > 128?).
How can I convert the result of charCodeAt to an ASCII value? Or is there a better way to convert a string of four characters to a 4 byte integer?
I believe that you can can do this with relatively simple bit operations:
function stringToBytes ( str ) {
var ch, st, re = [];
for (var i = 0; i < str.length; i++ ) {
ch = str.charCodeAt(i); // get char
st = []; // set up "stack"
do {
st.push( ch & 0xFF ); // push byte to stack
ch = ch >> 8; // shift value down by 1 byte
}
while ( ch );
// add stack contents to result
// done because chars have "wrong" endianness
re = re.concat( st.reverse() );
}
// return an array of bytes
return re;
}
stringToBytes( "A\u1242B\u4123C" ); // [65, 18, 66, 66, 65, 35, 67]
It should be a simple matter to sum the output up by reading the byte array as if it were memory and adding it up into larger numbers:
function getIntAt ( arr, offs ) {
return (arr[offs+0] << 24) +
(arr[offs+1] << 16) +
(arr[offs+2] << 8) +
arr[offs+3];
}
function getWordAt ( arr, offs ) {
return (arr[offs+0] << 8) +
arr[offs+1];
}
'\\u' + getWordAt( stringToBytes( "A\u1242" ), 1 ).toString(16); // "1242"
Borgar's answer seems correct.
Just wanted to clarify one point. Javascript treats bitwise operations as '32-bit signed int's, where the last (left-most) bit is the sign bit. Ie,
getIntAt([0x7f,0,0,0],0).toString(16) // "7f000000"
getIntAt([0x80,0,0,0],0).toString(16) // "-80000000"
However, for octet-data processing (eg, network stream, etc), usually want the 'unsigned int' representation. This can be accomplished by adding a '>>> 0' (zero-fill right-shift) operator which internally tells Javascript to treat this as unsigned.
function getUIntAt ( arr, offs ) {
return (arr[offs+0] << 24) +
(arr[offs+1] << 16) +
(arr[offs+2] << 8) +
arr[offs+3] >>> 0;
}
getUIntAt([0x80,0,0,0],0).toString(16) // "80000000"
There are two methods for encoding and decoding utf-8 string to a byte array and back.
var utf8 = {}
utf8.toByteArray = function(str) {
var byteArray = [];
for (var i = 0; i < str.length; i++)
if (str.charCodeAt(i) <= 0x7F)
byteArray.push(str.charCodeAt(i));
else {
var h = encodeURIComponent(str.charAt(i)).substr(1).split('%');
for (var j = 0; j < h.length; j++)
byteArray.push(parseInt(h[j], 16));
}
return byteArray;
};
utf8.parse = function(byteArray) {
var str = '';
for (var i = 0; i < byteArray.length; i++)
str += byteArray[i] <= 0x7F?
byteArray[i] === 0x25 ? "%25" : // %
String.fromCharCode(byteArray[i]) :
"%" + byteArray[i].toString(16).toUpperCase();
return decodeURIComponent(str);
};
// sample
var str = "Да!";
var ba = utf8.toByteArray(str);
alert(ba); // 208, 148, 208, 176, 33
alert(ba.length); // 5
alert(utf8.parse(ba)); // Да!
While #Borgar answers the question correctly, his solution is pretty slow. It took me a while to track it down (I used his function somewhere in a larger project), so I thought I would share my insight.
I ended up having something like #Kadm. It's not some little percent faster, it's like 500 times faster (no exaggeration!). I wrote a little benchmark, so you can see it for yourself :)
function stringToBytesFaster ( str ) {
var ch, st, re = [], j=0;
for (var i = 0; i < str.length; i++ ) {
ch = str.charCodeAt(i);
if(ch < 127)
{
re[j++] = ch & 0xFF;
}
else
{
st = []; // clear stack
do {
st.push( ch & 0xFF ); // push byte to stack
ch = ch >> 8; // shift value down by 1 byte
}
while ( ch );
// add stack contents to result
// done because chars have "wrong" endianness
st = st.reverse();
for(var k=0;k<st.length; ++k)
re[j++] = st[k];
}
}
// return an array of bytes
return re;
}
Borga's solution works perfectly. In case you want a more concrete implementation, you may want to have a look at the BinaryReader class from vjeux (which, for the records, is based on the binary-parser class from Jonas Raoni Soares Silva).
How did you get the binary data into the string in the first place? How the binary data gets encoded into a string is an IMPORTANT consideration, and you need an answer to that question before you can proceed.
One way I know of to get binary data into a string, is to use the XHR object, and set it to expect UTF-16.
Once it's in utf-16, you can retrieve 16-bit numbers from the string using "....".charCodeAt(0)
which will be a number between 0 and 65535
Then, if you like, you can convert that number into two numbers between 0 and 255 like this:
var leftByte = mynumber>>>8;
var rightByte = mynumber&255;
borgars solution improvement:
...
do {
st.unshift( ch & 0xFF ); // push byte to stack
ch = ch >> 8; // shift value down by 1 byte
}
while ( ch );
// add stack contents to result
// done because chars have "wrong" endianness
re = re.concat( st );
...
One nice and quick hack is to use a combination of encodeURI and unescape :
t=[];
for(s=unescape(encodeURI("zażółć gęślą jaźń")),i=0;i<s.length;++i)
t.push(s.charCodeAt(i));
t
[122, 97, 197, 188, 195, 179, 197, 130, 196, 135, 32, 103, 196, 153, 197, 155, 108, 196, 133, 32, 106, 97, 197, 186, 197, 132]
Perhaps some explanation is necessary why the heck it works, so let me split it into steps:
encodeURI("zażółć gęślą jaźń")
returns
"za%C5%BC%C3%B3%C5%82%C4%87%20g%C4%99%C5%9Bl%C4%85%20ja%C5%BA%C5%84"
which -- if you look closely -- is the original string in which all characters with values>127 got replaced with (possibly more than one) hexadecimal bytes representations.
For example letter "ż" became "%C5%BC". The fact is encodeURI escapes also some regular ascii characters like spaces, but it does not matter. What matters is that at this point each byte of the original string is either represented verbatim (as is the case with "z", "a", "g", or "j") or as a percent-encoded sequence of bytes (as was the case with "ż" which was originaly two bytes 197 and 188 and got converted to %C5 and %BC).
Now, we apply unescape:
unescape("za%C5%BC%C3%B3%C5%82%C4%87%20g%C4%99%C5%9Bl%C4%85%20ja%C5%BA%C5%84")
which gives
"zażóÅÄ gÄÅlÄ jaźÅ"
If you are not native Polish speaker you might not notice, that this result is in fact way different from the original "zażółć gęślą jaźń". For starters, it has a different number of characters :)
For sure, you can tell, that this strange versions of big letter A do not belong to standard ascii set. In fact this "Å" has value 197. (which is exactly C5 in hexadecimal).
Now, if you are like me, you would ask yourself: wait a minute...if this is really a sequence of bytes with values 122, 97, 197, 188, and JS is really using UTF then why do I see this "ż" characters, and not the original "ż" ?
Well, the thing is (I belive) that this sequence 122, 97, 197, 188 (which we see when applying charCodeAt) is not a sequence of bytes, but a sequence of codes. The character "Å" has a code 197, but its actually two bytes long sequence: C3 85.
So, the trick works because unescape treats numbers occuring in percent-encoded string as codes, not as byte values - or, to be more specific: unescape knows nothing about multibyte characters, so when it decodes bytes one-by-one, handling values lower than 128 just great, but not-so-good when they are above 127 and multibyte -- unescape in such cases simply returns a multibyte character which happens to have a code equal to the requested byte value. This "bug" is actually useful feature.
I'm going to assume for a second that your objective is to read arbitrary bytes from a string.
My first suggestion would be to make your string representation a hexidecmal representation of the binary data.
You can read the values using conversions to numbers from hex:
var BITS_PER_BYTE = 8;
function readBytes(hexString, numBytes) {
return Number( parseInt( hexString.substr(0, numBytes * (BITS_PER_BYTE/4) ),16 ) );
}
function removeBytes(hexString, numBytes) {
return hexString.substr( numBytes * (BITS_PER_BYTE/BITS_PER_CHAR) );
}
The functions can then be used to read whatever you want:
var hex = '4ef2c3382fd';
alert( 'We had: ' + hex );
var intVal = readBytes(hex,2);
alert( 'Two bytes: ' + intVal.toString(2) );
hex = removeBytes(hex,2);
alert( 'Now we have: ' + hex );
You can then interpret the byte string however you want.
Hope this helps!
Cheers!
How do you convert decimal values to their hexadecimal equivalent in JavaScript?
Convert a number to a hexadecimal string with:
hexString = yourNumber.toString(16);
And reverse the process with:
yourNumber = parseInt(hexString, 16);
If you need to handle things like bit fields or 32-bit colors, then you need to deal with signed numbers. The JavaScript function toString(16) will return a negative hexadecimal number which is usually not what you want. This function does some crazy addition to make it a positive number.
function decimalToHexString(number)
{
if (number < 0)
{
number = 0xFFFFFFFF + number + 1;
}
return number.toString(16).toUpperCase();
}
console.log(decimalToHexString(27));
console.log(decimalToHexString(48.6));
The code below will convert the decimal value d to hexadecimal. It also allows you to add padding to the hexadecimal result. So 0 will become 00 by default.
function decimalToHex(d, padding) {
var hex = Number(d).toString(16);
padding = typeof (padding) === "undefined" || padding === null ? padding = 2 : padding;
while (hex.length < padding) {
hex = "0" + hex;
}
return hex;
}
function toHex(d) {
return ("0"+(Number(d).toString(16))).slice(-2).toUpperCase()
}
For completeness, if you want the two's-complement hexadecimal representation of a negative number, you can use the zero-fill-right shift >>> operator. For instance:
> (-1).toString(16)
"-1"
> ((-2)>>>0).toString(16)
"fffffffe"
There is however one limitation: JavaScript bitwise operators treat their operands as a sequence of 32 bits, that is, you get the 32-bits two's complement.
With padding:
function dec2hex(i) {
return (i+0x10000).toString(16).substr(-4).toUpperCase();
}
The accepted answer did not take into account single digit returned hexadecimal codes. This is easily adjusted by:
function numHex(s)
{
var a = s.toString(16);
if ((a.length % 2) > 0) {
a = "0" + a;
}
return a;
}
and
function strHex(s)
{
var a = "";
for (var i=0; i<s.length; i++) {
a = a + numHex(s.charCodeAt(i));
}
return a;
}
I believe the above answers have been posted numerous times by others in one form or another. I wrap these in a toHex() function like so:
function toHex(s)
{
var re = new RegExp(/^\s*(\+|-)?((\d+(\.\d+)?)|(\.\d+))\s*$/);
if (re.test(s)) {
return '#' + strHex( s.toString());
}
else {
return 'A' + strHex(s);
}
}
Note that the numeric regular expression came from 10+ Useful JavaScript Regular Expression Functions to improve your web applications efficiency.
Update: After testing this thing several times I found an error (double quotes in the RegExp), so I fixed that. HOWEVER! After quite a bit of testing and having read the post by almaz - I realized I could not get negative numbers to work.
Further - I did some reading up on this and since all JavaScript numbers are stored as 64 bit words no matter what - I tried modifying the numHex code to get the 64 bit word. But it turns out you can not do that. If you put "3.14159265" AS A NUMBER into a variable - all you will be able to get is the "3", because the fractional portion is only accessible by multiplying the number by ten(IE:10.0) repeatedly. Or to put that another way - the hexadecimal value of 0xF causes the floating point value to be translated into an integer before it is ANDed which removes everything behind the period. Rather than taking the value as a whole (i.e.: 3.14159265) and ANDing the floating point value against the 0xF value.
So the best thing to do, in this case, is to convert the 3.14159265 into a string and then just convert the string. Because of the above, it also makes it easy to convert negative numbers because the minus sign just becomes 0x26 on the front of the value.
So what I did was on determining that the variable contains a number - just convert it to a string and convert the string. This means to everyone that on the server side you will need to unhex the incoming string and then to determine the incoming information is numeric. You can do that easily by just adding a "#" to the front of numbers and "A" to the front of a character string coming back. See the toHex() function.
Have fun!
After another year and a lot of thinking, I decided that the "toHex" function (and I also have a "fromHex" function) really needed to be revamped. The whole question was "How can I do this more efficiently?" I decided that a to/from hexadecimal function should not care if something is a fractional part but at the same time it should ensure that fractional parts are included in the string.
So then the question became, "How do you know you are working with a hexadecimal string?". The answer is simple. Use the standard pre-string information that is already recognized around the world.
In other words - use "0x". So now my toHex function looks to see if that is already there and if it is - it just returns the string that was sent to it. Otherwise, it converts the string, number, whatever. Here is the revised toHex function:
/////////////////////////////////////////////////////////////////////////////
// toHex(). Convert an ASCII string to hexadecimal.
/////////////////////////////////////////////////////////////////////////////
toHex(s)
{
if (s.substr(0,2).toLowerCase() == "0x") {
return s;
}
var l = "0123456789ABCDEF";
var o = "";
if (typeof s != "string") {
s = s.toString();
}
for (var i=0; i<s.length; i++) {
var c = s.charCodeAt(i);
o = o + l.substr((c>>4),1) + l.substr((c & 0x0f),1);
}
return "0x" + o;
}
This is a very fast function that takes into account single digits, floating point numbers, and even checks to see if the person is sending a hex value over to be hexed again. It only uses four function calls and only two of those are in the loop. To un-hex the values you use:
/////////////////////////////////////////////////////////////////////////////
// fromHex(). Convert a hex string to ASCII text.
/////////////////////////////////////////////////////////////////////////////
fromHex(s)
{
var start = 0;
var o = "";
if (s.substr(0,2).toLowerCase() == "0x") {
start = 2;
}
if (typeof s != "string") {
s = s.toString();
}
for (var i=start; i<s.length; i+=2) {
var c = s.substr(i, 2);
o = o + String.fromCharCode(parseInt(c, 16));
}
return o;
}
Like the toHex() function, the fromHex() function first looks for the "0x" and then it translates the incoming information into a string if it isn't already a string. I don't know how it wouldn't be a string - but just in case - I check. The function then goes through, grabbing two characters and translating those in to ASCII characters. If you want it to translate Unicode, you will need to change the loop to going by four(4) characters at a time. But then you also need to ensure that the string is NOT divisible by four. If it is - then it is a standard hexadecimal string. (Remember the string has "0x" on the front of it.)
A simple test script to show that -3.14159265, when converted to a string, is still -3.14159265.
<?php
echo <<<EOD
<html>
<head><title>Test</title>
<script>
var a = -3.14159265;
alert( "A = " + a );
var b = a.toString();
alert( "B = " + b );
</script>
</head>
<body>
</body>
</html>
EOD;
?>
Because of how JavaScript works in respect to the toString() function, all of those problems can be eliminated which before were causing problems. Now all strings and numbers can be converted easily. Further, such things as objects will cause an error to be generated by JavaScript itself. I believe this is about as good as it gets. The only improvement left is for W3C to just include a toHex() and fromHex() function in JavaScript.
Without the loop:
function decimalToHex(d) {
var hex = Number(d).toString(16);
hex = "000000".substr(0, 6 - hex.length) + hex;
return hex;
}
// Or "#000000".substr(0, 7 - hex.length) + hex;
// Or whatever
// *Thanks to MSDN
Also isn't it better not to use loop tests that have to be evaluated?
For example, instead of:
for (var i = 0; i < hex.length; i++){}
have
for (var i = 0, var j = hex.length; i < j; i++){}
Combining some of these good ideas for an RGB-value-to-hexadecimal function (add the # elsewhere for HTML/CSS):
function rgb2hex(r,g,b) {
if (g !== undefined)
return Number(0x1000000 + r*0x10000 + g*0x100 + b).toString(16).substring(1);
else
return Number(0x1000000 + r[0]*0x10000 + r[1]*0x100 + r[2]).toString(16).substring(1);
}
Constrained/padded to a set number of characters:
function decimalToHex(decimal, chars) {
return (decimal + Math.pow(16, chars)).toString(16).slice(-chars).toUpperCase();
}
For anyone interested, here's a JSFiddle comparing most of the answers given to this question.
And here's the method I ended up going with:
function decToHex(dec) {
return (dec + Math.pow(16, 6)).toString(16).substr(-6)
}
Also, bear in mind that if you're looking to convert from decimal to hex for use in CSS as a color data type, you might instead prefer to extract the RGB values from the decimal and use rgb().
For example (JSFiddle):
let c = 4210330 // your color in decimal format
let rgb = [(c & 0xff0000) >> 16, (c & 0x00ff00) >> 8, (c & 0x0000ff)]
// Vanilla JS:
document.getElementById('some-element').style.color = 'rgb(' + rgb + ')'
// jQuery:
$('#some-element').css('color', 'rgb(' + rgb + ')')
This sets #some-element's CSS color property to rgb(64, 62, 154).
var number = 3200;
var hexString = number.toString(16);
The 16 is the radix and there are 16 values in a hexadecimal number :-)
function dec2hex(i)
{
var result = "0000";
if (i >= 0 && i <= 15) { result = "000" + i.toString(16); }
else if (i >= 16 && i <= 255) { result = "00" + i.toString(16); }
else if (i >= 256 && i <= 4095) { result = "0" + i.toString(16); }
else if (i >= 4096 && i <= 65535) { result = i.toString(16); }
return result
}
If you want to convert a number to a hexadecimal representation of an RGBA color value, I've found this to be the most useful combination of several tips from here:
function toHexString(n) {
if(n < 0) {
n = 0xFFFFFFFF + n + 1;
}
return "0x" + ("00000000" + n.toString(16).toUpperCase()).substr(-8);
}
AFAIK comment 57807 is wrong and should be something like:
var hex = Number(d).toString(16);
instead of
var hex = parseInt(d, 16);
function decimalToHex(d, padding) {
var hex = Number(d).toString(16);
padding = typeof (padding) === "undefined" || padding === null ? padding = 2 : padding;
while (hex.length < padding) {
hex = "0" + hex;
}
return hex;
}
And if the number is negative?
Here is my version.
function hexdec (hex_string) {
hex_string=((hex_string.charAt(1)!='X' && hex_string.charAt(1)!='x')?hex_string='0X'+hex_string : hex_string);
hex_string=(hex_string.charAt(2)<8 ? hex_string =hex_string-0x00000000 : hex_string=hex_string-0xFFFFFFFF-1);
return parseInt(hex_string, 10);
}
As the accepted answer states, the easiest way to convert from decimal to hexadecimal is var hex = dec.toString(16). However, you may prefer to add a string conversion, as it ensures that string representations like "12".toString(16) work correctly.
// Avoids a hard-to-track-down bug by returning `c` instead of `12`
(+"12").toString(16);
To reverse the process you may also use the solution below, as it is even shorter.
var dec = +("0x" + hex);
It seems to be slower in Google Chrome and Firefox, but is significantly faster in Opera. See http://jsperf.com/hex-to-dec.
I'm doing conversion to hex string in a pretty large loop, so I tried several techniques in order to find the fastest one. My requirements were to have a fixed-length string as a result, and encode negative values properly (-1 => ff..f).
Simple .toString(16) didn't work for me since I needed negative values to be properly encoded. The following code is the quickest I've tested so far on 1-2 byte values (note that symbols defines the number of output symbols you want to get, that is for 4-byte integer it should be equal to 8):
var hex = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'];
function getHexRepresentation(num, symbols) {
var result = '';
while (symbols--) {
result = hex[num & 0xF] + result;
num >>= 4;
}
return result;
}
It performs faster than .toString(16) on 1-2 byte numbers and slower on larger numbers (when symbols >= 6), but still should outperform methods that encode negative values properly.
Converting hex color numbers to hex color strings:
A simple solution with toString and ES6 padStart for converting hex color numbers to hex color strings.
const string = `#${color.toString(16).padStart(6, '0')}`;
For example:
0x000000 will become #000000
0xFFFFFF will become #FFFFFF
Check this example in a fiddle here
How to convert decimal to hexadecimal in JavaScript
I wasn't able to find a brutally clean/simple decimal to hexadecimal conversion that didn't involve a mess of functions and arrays ... so I had to make this for myself.
function DecToHex(decimal) { // Data (decimal)
length = -1; // Base string length
string = ''; // Source 'string'
characters = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' ]; // character array
do { // Grab each nibble in reverse order because JavaScript has no unsigned left shift
string += characters[decimal & 0xF]; // Mask byte, get that character
++length; // Increment to length of string
} while (decimal >>>= 4); // For next character shift right 4 bits, or break on 0
decimal += 'x'; // Convert that 0 into a hex prefix string -> '0x'
do
decimal += string[length];
while (length--); // Flip string forwards, with the prefixed '0x'
return (decimal); // return (hexadecimal);
}
/* Original: */
D = 3678; // Data (decimal)
C = 0xF; // Check
A = D; // Accumulate
B = -1; // Base string length
S = ''; // Source 'string'
H = '0x'; // Destination 'string'
do {
++B;
A& = C;
switch(A) {
case 0xA: A='A'
break;
case 0xB: A='B'
break;
case 0xC: A='C'
break;
case 0xD: A='D'
break;
case 0xE: A='E'
break;
case 0xF: A='F'
break;
A = (A);
}
S += A;
D >>>= 0x04;
A = D;
} while(D)
do
H += S[B];
while (B--)
S = B = A = C = D; // Zero out variables
alert(H); // H: holds hexadecimal equivalent
You can do something like this in ECMAScript 6:
const toHex = num => (num).toString(16).toUpperCase();
If you are looking for converting Large integers i.e. Numbers greater than Number.MAX_SAFE_INTEGER -- 9007199254740991, then you can use the following code
const hugeNumber = "9007199254740991873839" // Make sure its in String
const hexOfHugeNumber = BigInt(hugeNumber).toString(16);
console.log(hexOfHugeNumber)
To sum it all up;
function toHex(i, pad) {
if (typeof(pad) === 'undefined' || pad === null) {
pad = 2;
}
var strToParse = i.toString(16);
while (strToParse.length < pad) {
strToParse = "0" + strToParse;
}
var finalVal = parseInt(strToParse, 16);
if ( finalVal < 0 ) {
finalVal = 0xFFFFFFFF + finalVal + 1;
}
return finalVal;
}
However, if you don't need to convert it back to an integer at the end (i.e. for colors), then just making sure the values aren't negative should suffice.
I haven't found a clear answer, without checks if it is negative or positive, that uses two's complement (negative numbers included). For that, I show my solution to one byte:
((0xFF + number +1) & 0x0FF).toString(16);
You can use this instruction to any number bytes, only you add FF in respective places. For example, to two bytes:
((0xFFFF + number +1) & 0x0FFFF).toString(16);
If you want cast an array integer to string hexadecimal:
s = "";
for(var i = 0; i < arrayNumber.length; ++i) {
s += ((0xFF + arrayNumber[i] +1) & 0x0FF).toString(16);
}
In case you're looking to convert to a 'full' JavaScript or CSS representation, you can use something like:
numToHex = function(num) {
var r=((0xff0000&num)>>16).toString(16),
g=((0x00ff00&num)>>8).toString(16),
b=(0x0000ff&num).toString(16);
if (r.length==1) { r = '0'+r; }
if (g.length==1) { g = '0'+g; }
if (b.length==1) { b = '0'+b; }
return '0x'+r+g+b; // ('#' instead of'0x' for CSS)
};
var dec = 5974678;
console.log( numToHex(dec) ); // 0x5b2a96
This is based on Prestaul and Tod's solutions. However, this is a generalisation that accounts for varying size of a variable (e.g. Parsing signed value from a microcontroller serial log).
function decimalToPaddedHexString(number, bitsize)
{
let byteCount = Math.ceil(bitsize/8);
let maxBinValue = Math.pow(2, bitsize)-1;
/* In node.js this function fails for bitsize above 32bits */
if (bitsize > 32)
throw "number above maximum value";
/* Conversion to unsigned form based on */
if (number < 0)
number = maxBinValue + number + 1;
return "0x"+(number >>> 0).toString(16).toUpperCase().padStart(byteCount*2, '0');
}
Test script:
for (let n = 0 ; n < 64 ; n++ ) {
let s=decimalToPaddedHexString(-1, n);
console.log(`decimalToPaddedHexString(-1,${(n+"").padStart(2)}) = ${s.padStart(10)} = ${("0b"+parseInt(s).toString(2)).padStart(34)}`);
}
Test results:
decimalToPaddedHexString(-1, 0) = 0x0 = 0b0
decimalToPaddedHexString(-1, 1) = 0x01 = 0b1
decimalToPaddedHexString(-1, 2) = 0x03 = 0b11
decimalToPaddedHexString(-1, 3) = 0x07 = 0b111
decimalToPaddedHexString(-1, 4) = 0x0F = 0b1111
decimalToPaddedHexString(-1, 5) = 0x1F = 0b11111
decimalToPaddedHexString(-1, 6) = 0x3F = 0b111111
decimalToPaddedHexString(-1, 7) = 0x7F = 0b1111111
decimalToPaddedHexString(-1, 8) = 0xFF = 0b11111111
decimalToPaddedHexString(-1, 9) = 0x01FF = 0b111111111
decimalToPaddedHexString(-1,10) = 0x03FF = 0b1111111111
decimalToPaddedHexString(-1,11) = 0x07FF = 0b11111111111
decimalToPaddedHexString(-1,12) = 0x0FFF = 0b111111111111
decimalToPaddedHexString(-1,13) = 0x1FFF = 0b1111111111111
decimalToPaddedHexString(-1,14) = 0x3FFF = 0b11111111111111
decimalToPaddedHexString(-1,15) = 0x7FFF = 0b111111111111111
decimalToPaddedHexString(-1,16) = 0xFFFF = 0b1111111111111111
decimalToPaddedHexString(-1,17) = 0x01FFFF = 0b11111111111111111
decimalToPaddedHexString(-1,18) = 0x03FFFF = 0b111111111111111111
decimalToPaddedHexString(-1,19) = 0x07FFFF = 0b1111111111111111111
decimalToPaddedHexString(-1,20) = 0x0FFFFF = 0b11111111111111111111
decimalToPaddedHexString(-1,21) = 0x1FFFFF = 0b111111111111111111111
decimalToPaddedHexString(-1,22) = 0x3FFFFF = 0b1111111111111111111111
decimalToPaddedHexString(-1,23) = 0x7FFFFF = 0b11111111111111111111111
decimalToPaddedHexString(-1,24) = 0xFFFFFF = 0b111111111111111111111111
decimalToPaddedHexString(-1,25) = 0x01FFFFFF = 0b1111111111111111111111111
decimalToPaddedHexString(-1,26) = 0x03FFFFFF = 0b11111111111111111111111111
decimalToPaddedHexString(-1,27) = 0x07FFFFFF = 0b111111111111111111111111111
decimalToPaddedHexString(-1,28) = 0x0FFFFFFF = 0b1111111111111111111111111111
decimalToPaddedHexString(-1,29) = 0x1FFFFFFF = 0b11111111111111111111111111111
decimalToPaddedHexString(-1,30) = 0x3FFFFFFF = 0b111111111111111111111111111111
decimalToPaddedHexString(-1,31) = 0x7FFFFFFF = 0b1111111111111111111111111111111
decimalToPaddedHexString(-1,32) = 0xFFFFFFFF = 0b11111111111111111111111111111111
Thrown: 'number above maximum value'
Note: Not too sure why it fails above 32 bitsize
rgb(255, 255, 255) // returns FFFFFF
rgb(255, 255, 300) // returns FFFFFF
rgb(0,0,0) // returns 000000
rgb(148, 0, 211) // returns 9400D3
function rgb(...values){
return values.reduce((acc, cur) => {
let val = cur >= 255 ? 'ff' : cur <= 0 ? '00' : Number(cur).toString(16);
return acc + (val.length === 1 ? '0'+val : val);
}, '').toUpperCase();
}
Arbitrary precision
This solution take on input decimal string, and return hex string. A decimal fractions are supported. Algorithm
split number to sign (s), integer part (i) and fractional part (f) e.g for -123.75 we have s=true, i=123, f=75
integer part to hex:
if i='0' stop
get modulo: m=i%16 (in arbitrary precision)
convert m to hex digit and put to result string
for next step calc integer part i=i/16 (in arbitrary precision)
fractional part
count fractional digits n
multiply k=f*16 (in arbitrary precision)
split k to right part with n digits and put them to f, and left part with rest of digits and put them to d
convert d to hex and add to result.
finish when number of result fractional digits is enough
// #param decStr - string with non-negative integer
// #param divisor - positive integer
function dec2HexArbitrary(decStr, fracDigits=0) {
// Helper: divide arbitrary precision number by js number
// #param decStr - string with non-negative integer
// #param divisor - positive integer
function arbDivision(decStr, divisor)
{
// algorithm https://www.geeksforgeeks.org/divide-large-number-represented-string/
let ans='';
let idx = 0;
let temp = +decStr[idx];
while (temp < divisor) temp = temp * 10 + +decStr[++idx];
while (decStr.length > idx) {
ans += (temp / divisor)|0 ;
temp = (temp % divisor) * 10 + +decStr[++idx];
}
if (ans.length == 0) return "0";
return ans;
}
// Helper: calc module of arbitrary precision number
// #param decStr - string with non-negative integer
// #param mod - positive integer
function arbMod(decStr, mod) {
// algorithm https://www.geeksforgeeks.org/how-to-compute-mod-of-a-big-number/
let res = 0;
for (let i = 0; i < decStr.length; i++)
res = (res * 10 + +decStr[i]) % mod;
return res;
}
// Helper: multiply arbitrary precision integer by js number
// #param decStr - string with non-negative integer
// #param mult - positive integer
function arbMultiply(decStr, mult) {
let r='';
let m=0;
for (let i = decStr.length-1; i >=0 ; i--) {
let n = m+mult*(+decStr[i]);
r= (i ? n%10 : n) + r
m= n/10|0;
}
return r;
}
// dec2hex algorithm starts here
let h= '0123456789abcdef'; // hex 'alphabet'
let m= decStr.match(/-?(.*?)\.(.*)?/) || decStr.match(/-?(.*)/); // separate sign,integer,ractional
let i= m[1].replace(/^0+/,'').replace(/^$/,'0'); // integer part (without sign and leading zeros)
let f= (m[2]||'0').replace(/0+$/,'').replace(/^$/,'0'); // fractional part (without last zeros)
let s= decStr[0]=='-'; // sign
let r=''; // result
if(i=='0') r='0';
while(i!='0') { // integer part
r=h[arbMod(i,16)]+r;
i=arbDivision(i,16);
}
if(fracDigits) r+=".";
let n = f.length;
for(let j=0; j<fracDigits; j++) { // frac part
let k= arbMultiply(f,16);
f = k.slice(-n);
let d= k.slice(0,k.length-n);
r+= d.length ? h[+d] : '0';
}
return (s?'-':'')+r;
}
// -----------
// TESTS
// -----------
let tests = [
["0",2],
["000",2],
["123",0],
["-123",0],
["00.000",2],
["255.75",5],
["-255.75",5],
["127.999",32],
];
console.log('Input Standard Abitrary');
tests.forEach(t=> {
let nonArb = (+t[0]).toString(16).padEnd(17,' ');
let arb = dec2HexArbitrary(t[0],t[1]);
console.log(t[0].padEnd(10,' '), nonArb, arb);
});
// Long Example (40 digits after dot)
let example = "123456789012345678901234567890.09876543210987654321"
console.log(`\nLong Example:`);
console.log('dec:',example);
console.log('hex: ',dec2HexArbitrary(example,40));
The problem basically how many padding zeros to expect.
If you expect string 01 and 11 from Number 1 and 17. it's better to use Buffer as a bridge, with which number is turn into bytes, and then the hex is just an output format of it. And the bytes organization is well controlled by Buffer functions, like writeUInt32BE, writeInt16LE, etc.
import { Buffer } from 'buffer';
function toHex(n) { // 4byte
const buff = Buffer.alloc(4);
buff.writeInt32BE(n);
return buff.toString('hex');
}
> toHex(1)
'00000001'
> toHex(17)
'00000011'
> toHex(-1)
'ffffffff'
> toHex(-1212)
'fffffb44'
> toHex(1212)
'000004bc'
Here's my solution:
hex = function(number) {
return '0x' + Math.abs(number).toString(16);
}
The question says: "How to convert decimal to hexadecimal in JavaScript". While, the question does not specify that the hexadecimal string should begin with a 0x prefix, anybody who writes code should know that 0x is added to hexadecimal codes to distinguish hexadecimal codes from programmatic identifiers and other numbers (1234 could be hexadecimal, decimal, or even octal).
Therefore, to correctly answer this question, for the purpose of script-writing, you must add the 0x prefix.
The Math.abs(N) function converts negatives to positives, and as a bonus, it doesn't look like somebody ran it through a wood-chipper.
The answer I wanted, would have had a field-width specifier, so we could for example show 8/16/32/64-bit values the way you would see them listed in a hexadecimal editing application. That, is the actual, correct answer.