How to get value of a UInt64, toString only way? - javascript

I have this:
"ctypes.UInt64("7")"
It is returned by this:
var chars = SendMessage(hToolbar, TB_GETBUTTONTEXTW, local_tbb.idCommand, ctypes.voidptr_t(0));
so
console.log('chars=', chars, chars.toString(), uneval(chars));
gives
'chars=' 'UInt64 { }' "7" 'ctypes.UInt64("7")'
So I can get the value by going chars.toString(), but I have to run a parseInt on that, is there anyway to read it like a property? Like chars.UInt64?

The problem with 64-bit integers in js-ctypes is that Javascript lacks a compatible type. All Javascript numbers are IEEE double precision floating point numbers (double), and those can represent 53-bit integers at most. So you shouldn't even be trying to parse the int yourself, unless you know for a fact that the result would fit into a double. E.g. You cannot know this for pointers.
E.g. consider the following:
// 6 * 8-bit = 48 bit; 48 < 53, so this is OK
((parseInt("0xffffffffffff", 16) + 2) == parseInt("0xffffffffffff", 16)) == false
// However, 7 * 8-bit = 56 bit; 56 < 53, so this is not OK
((parseInt("0xffffffffffffff", 16) + 2) == parseInt("0xffffffffffffff", 16)) == true
// Oops, that compared equal, because a double precision floating point
// cannot actual hold the parseInt result, which is still well below 64-bit!
Lets deal with 64-bit integers in JS properly...
If you just want to comparisons, use UInt64.compare()/Int64.compare(), e.g.
// number == another number
ctypes.UInt64.compare(ctypes.UInt64("7"), ctypes.UInt64("7")) == 0
// number != another number
ctypes.UInt64.compare(ctypes.UInt64("7"), ctypes.UInt64("6")) != 0
// number > another number
ctypes.UInt64.compare(ctypes.UInt64("7"), ctypes.UInt64("6")) > 0
// number < another number
ctypes.UInt64.compare(ctypes.UInt64("7"), ctypes.UInt64("8")) < 0
If you need the result, but are not sure it is a 32-bit unsigned integer, you can detect if you're dealing with 32 bit unsigned integers that are just packed into Uint64:
ctypes.UInt64.compare(ctypes.UInt64("7"), ctypes.UInt64("0xffffffff")) < 0
And the analog for 32-bit signed integers in Int64, but you need to compare minimum and maximum:
ctypes.Int64.compare(ctypes.Int64("7"), ctypes.Int64("2147483647")) < 0 &&
ctypes.Int64.compare(ctypes.Int64("7"), ctypes.Int64("-2147483648")) > 0
So, once you know or detected that something will fit into a JS double, it is safe to call parseInt on it.
var number = ...;
if (ctypes.UInt64.compare(number, ctypes.UInt64("0xffffffff")) > 0) {
throw Error("Whoops, unexpectedly large value that our code would not handle correctly");
}
chars = parseInt(chars.toString(), 10);
(For the sake of completeness, there is also UInt64.hi()/Int64.hi() and UInt64.lo()/Int64.lo() to get the high and low 32-bits for real 64-bit integers and do 64-bit integer math yourself (e.g.), but beware of endianess).
PS: The return value of SendMessage is intptr_t not uintptr_t, which is important here because SendMessage(hwnd, TB_GETBUTTONTEXT, ...) may return -1 on failure!
So putting all this together (untested):
var SendMessage = user32.declare(
'SendMessageW',
ctypes.winapi_abi,
ctypes.intptr_t,
ctypes.voidptr_t, // HWND
ctypes.uint32_t, // MSG
ctypes.uintptr_t, // WPARAM
ctypes.intptr_t // LPARAM
);
// ...
var chars = SendMessage(hToolbar, TB_GETBUTTONTEXTW, local_tbb.idCommand, ctypes.voidptr_t(0));
if (ctypes.Int64.compare(chars, ctypes.Int64("0")) < 0) {
throw new Error("TB_GETBUTTONTEXT returned a failure (negative value)");
}
if (ctypes.Int64.comare(chars, ctypes.Int64("32768")) > 0) {
throw new Error("TB_GETBUTTONTEXT returned unreasonably large number > 32KiB");
}
chars = parseInt(chars.toString());

Related

How to implement unsigned right shift for BigInt in JavaScript?

I tried this sort of implementation, but it doesn't appear to be working.
function urs32(n, amount) {
const mask = (1 << (32 - amount)) - 1
return (n >> amount) & mask
}
function flip32(n) {
const mask = (1 << 32) - 1
return ~n & mask
}
log(~0b10101010 >>> 0, urs32(~0b10101010, 0))
log(~0b10101010 >>> 0, flip32(0b10101010))
function log(a, b) {
console.log(a.toString(2), b.toString(2))
}
I would expect for a to equal b in both cases, if done right. Basically I am trying to flip 32-bits (so 1's become 0s, 0's become 1s). I see that 1 << 32 === 0, so to get the value, I do 2 ** 32, but still doesn't work.
How do you implement the equivalent of ~n >>> 0 on a BigInt?
Basically what I am trying to do is create the countLeadingOnes functions (out of the countLeadingZeroes functions), like so:
const LEADING_ZERO_BIT_TABLE = makeLeadingZeroTable()
function makeLeadingZeroTable() {
let i = 0
const table = new Uint8Array(256).fill(0)
while (i < 256) {
let count = 8
let index = i
while (index > 0) {
index = (index / 2) | 0
count--
}
table[i] = count
i++
}
return table
}
function countLeadingZeroes32JS(n)
{
let accum = LEADING_ZERO_BIT_TABLE[n >>> 24];
if (accum === 8) {
accum += LEADING_ZERO_BIT_TABLE[(n >>> 16)]
}
if (accum === 16) {
accum += LEADING_ZERO_BIT_TABLE[(n >>> 8)]
}
if (accum === 24) {
accum += LEADING_ZERO_BIT_TABLE[ n ]
}
return accum;
}
function countLeadingZeroes16JS(n)
{
let accum = LEADING_ZERO_BIT_TABLE[n >>> 8]
if (accum === 8) {
accum += LEADING_ZERO_BIT_TABLE[n]
}
return accum;
}
function countLeadingZeroes8JS(n)
{
return LEADING_ZERO_BIT_TABLE[n]
}
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b10100010001000100010001000100010))
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
console.log('countLeadingZeroes32JS', countLeadingZeroes32JS(0b00000010001000100010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b1010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0010001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0000001000100010))
console.log('countLeadingZeroes16JS', countLeadingZeroes16JS(0b0000000000100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b10100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b00100010))
console.log('countLeadingZeroes8JS', countLeadingZeroes8JS(0b00000010))
function countLeadingOnes32JS(n) {
return countLeadingZeroes32JS(~n >>> 0)
}
function countLeadingOnes16JS(n) {
return countLeadingZeroes16JS(~n >>> 0)
}
function countLeadingOnes8JS(n) {
return countLeadingZeroes8JS(~n >>> 0)
}
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b11100010001000100010001000100010))
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b11111100001000100010001000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b0100001000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b1111110000100010))
console.log('countLeadingOnes16JS', countLeadingOnes16JS(0b1111111111000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b01000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b11000010))
console.log('countLeadingOnes8JS', countLeadingOnes8JS(0b11111100))
But it appears that ~n >>> 0 doesn't work on 32-bit integers. How to get this working properly?
How to implement unsigned right shift for BigInt in JavaScript?
Unsigned right-shift is difficult to define meaningfully for arbitrary-size integers, so before you (or anyone) can implement it, you'll have to decide how you want it to behave.
That said, considering the rest of this question, I don't see why you would even need this.
I would expect for a to equal b in both cases
Why would it? Unsigned right-shift and bit flipping are different operations and produce different results.
I see that 1 << 32 === 0
Nope, 1 << 32 === 1. JavaScript (like x86 CPUs) performs an implicit &31 on the shift amount, so since 32 & 31 === 0, ... << 32 is the same as ... << 0.
How do you implement the equivalent of ~n >>> 0 on a BigInt?
The equivalent of ~n is ~n. (That's not a typo. It's literally the same thing.)
The equivalent of ... >>> 0 is BigInt.asUintN(32, ...). (Note that neither the Number version nor the BigInt version shifts anything, so this doesn't answer your headline question "how to implement USR for BigInt".)
it appears that ~n >>> 0 doesn't work on 32-bit integers.
It sure does work. In fact, it only works on 32-bit integers.
The >>> 0 part is completely unnecessary though, you could just drop it.
The reason why this line:
console.log('countLeadingOnes32JS', countLeadingZeroes32JS(0b00100010001000100010001000100010))
isn't producing the number of leading ones is because the function it's calling is ...Zeroes...; an apparent copy-paste bug.
The reason why countLeadingOnes16JS isn't working correctly is because ~ in JavaScript always flips 32 bits. Since a 16-bit number's 32-bit representation has (at least) 16 leading zeros, those all become ones after flipping, and countLeadingZeroes16JS gets an input that's far bigger than it can handle: LEADING_ZERO_BIT_TABLE[n >>> 8] looks up an element that doesn't exist in the table, because the result of n >>> 8 is a 24-bit number in this case, not an 8-bit number. The solution is to use a mask after flipping; a valid implementation of clo16 might be:
function countLeadingOnes16(n) {
return countLeadingZeroes16(~n & 0xFFFF);
}
No BigInts and no >>> 0 required.
countLeadingOnes8 is similar.
You may want to read https://en.wikipedia.org/wiki/Two%27s_complement (or some other description of that concept) to understand what's going on with bitwise operations on negative numbers.
You may also want to learn how to debug your own code. There's a range of techniques: for example, you could have:
inserted console.log statements for intermediate results,
or stepped through execution in a debugger,
or simply evaluated small snippets in the console,
any of which would have made it very easy for you to see what's happening on the path from input number to end result.
For anyone else reading this: there's Math.clz32, which is highly efficient because it gets compiled to a machine instruction, so implementing countLeadingZeros by hand is unnecessary and wasteful. For smaller widths, just subtract: function clz8(n) { return Math.clz32(n) - 24; }

How to convert number start with 0 to string equivalent of the value?

I want to convert a number start with 0 to string equivalent of the value.
If I run
var num = 12;
var int = num.toString();
console.log(int);
it logs 12 as expected but if I apply the toString() to a number start with 0 like,
var num = 012;
var int = num.toString();
console.log(int);
it logs 10, why?
Number starting with 0 is interpreted as octal (base-8).
In sloppy mode (the default) numbers starting with 0 are interpreted as being written in octal (base 8) instead of decimal (base 10). If has been like that from the first released version of Javascript, and has this syntax in common with other programming languages. It is confusing, and have lead to many hard to detect buggs.
You can enable strict mode by adding "use strict" as the first non-comment in your script or function. It removes some of the quirks. It is still possible to write octal numbers in strict mode, but you have to use the same scheme as with hexadecimal and binary: 0o20 is the octal representation of 16 decimal.
The same problem can be found with the function paseInt, that takes up to two parameters, where the second is the radix. If not specified, numbers starting with 0 will be treated as octal up to ECMAScript 5, where it was changed to decimal. So if you use parseInt, specify the radix to be sure that you get what you expected.
"use strict";
// Diffrent ways to write the same number:
const values = [
0b10000, // binary
0o20, // octal
16, // decimal,
0x10 // hexadecimal
];
console.log("As binary:", values.map( value => value.toString(2)).join());
console.log("As decimal:", values.join());
console.log("As ocal", values.map( value => value.toString(8)).join());
console.log("As hexadecimal:", values.map( value => value.toString(16)).join());
console.log("As base36:", values.map( value => value.toString(36)).join());
All you have to do is add String to the front of the number that is
var num = 12;
var int = String(num);
console.log(int);
And if you want it to look like this 0012 all you have to do is
var num = 12;
var int = String(num).padStart(4, '0');
console.log(int);

parseInt not converting decimal to binary?

From my understanding the binary number system uses as set of two numbers, 0's and 1's to perform calculations.
Why does:
console.log(parseInt("11", 2)); return 3 and not 00001011?
http://www.binaryhexconverter.com/decimal-to-binary-converter
Use toString() instead of parseInt:
11..toString(2)
var str = "11";
var bin = (+str).toString(2);
console.log(bin)
According JavaScript's Documentation:
The following examples all return NaN:
parseInt("546", 2); // Digits are not valid for binary representations
parseInt(number, base) returns decimal value of a number presented by number parameter in base base.
And 11 is binary equivalent of 3 in decimal number system.
var a = {};
window.addEventListener('input', function(e){
a[e.target.name] = e.target.value;
console.clear();
console.log( parseInt(a.number, a.base) );
}, false);
<input name='number' placeholder='number' value='1010'>
<input name='base' placeholder='base' size=3 value='2'>
As stated in the documentation for parseInt: The parseInt() function parses a string argument and returns an integer of the specified radix (the base in mathematical numeral systems).
So, it is doing exactly what it should do: converting a binary value of 11 to an integer value of 3.
If you are trying to convert an integer value of 11 to a binary value than you need to use the Number.toString method:
console.log(11..toString(2)); // 1011
.toString(2) works when applied to a Number type.
255.toString(2) // syntax error
"255".toString(2); // 255
var n=255;
n.toString(2); // 11111111
// or in short
Number(255).toString(2) // 11111111
// or use two dots so that the compiler does
// mistake with the decimal place as in 250.x
255..toString(2) // 11111111
The parseInt() function parses a string argument and returns an integer of the specified radix (the base in mathematical numeral systems).
So you are telling the system you want to convert 11 as binary to an decimal.
Specifically to the website you are referring, if you look closer it is actually using JS to issue a HTTP GET to convert it on web server side. Something like following:
http://www.binaryhexconverter.com/hesapla.php?fonksiyon=dec2bin&deger=11&pad=false
The shortes method I've found for converting a decimal string into a binary is:
const input = "54654";
const output = (input*1).toString(2);
print(output);
I think you should understand the math behind decimal to binary conversion. Here is the simple implementation in javascript.
main();
function main() {
let input = 12;
let result = decimalToBinary(input);
console.log(result);
}
function decimalToBinary(input) {
let base = 2;
let inputNumber = input;
let quotient = 0;
let remainderArray = [];
let resultArray = [];
if (inputNumber) {
while (inputNumber) {
quotient = parseInt(inputNumber / base);
remainderArray.push(inputNumber % base);
inputNumber = quotient;
}
for (let i = remainderArray.length - 1; i >= 0; i--) {
resultArray.push(remainderArray[i]);
}
return parseInt(resultArray.join(''));
} else {
return `${input} is not a valid input`;
}
}
This is an old question, however I have another solution that might contribute a little bit. I usually use this function to convert a decimal number into a binary:
function dec2bin(dec) {
return (dec >>> 0).toString(2);
}
The dec >>> 0 converts the number into a byte and then toString(radix) function is called to return a binary string. It is simple and clean.
Note: a radix is used for representing a numeric value. Must be an integer between 2 and 36. For example:
2 - The number will show as a binary value
8 - The number will show as an octal value
16 - The number will show as an hexadecimal value
function num(n){
return Number(n.toString(2));
}
console.log(num(5));
This worked for me: parseInt(Number, original_base).toString(final_base)
Eg: parseInt(32, 10).toString(2) for decimal to binary conversion.
Source: https://www.w3resource.com/javascript-exercises/javascript-math-exercise-3.php
Here is a concise recursive version of a manual decimal to binary algorithm:
Divide decimal number in half and aggregate remainder per operation until value==0 and print concatenated binary string
Example using 25: 25/2 = 12(r1)/2 = 6(r0)/2 = 3(r0)/2 = 1(r1)/2 = 0(r1) => 10011 => reverse => 11001
function convertDecToBin(input){
return Array.from(recursiveImpl(input)).reverse().join(""); //convert string to array to use prototype reverse method as bits read right to left
function recursiveImpl(quotient){
const nextQuotient = Math.floor(quotient / 2); //divide subsequent quotient by 2 and take lower limit integer (if fractional)
const remainder = ""+quotient % 2; //use modulus for remainder and convert to string
return nextQuotient===0?remainder:remainder + recursiveImpl(nextQuotient); //if next quotient is evaluated to 0 then return the base case remainder else the remainder concatenated to value of next recursive call
}
}
To get better understanding, I think you should try to do the math of that conversion by yourself.
(1) 11 / 2 = 5
(1) 5 / 2 = 2
(0) 2 / 2 = 1
(1) 1 / 2 = 0
I made a function based on that logic
function decimalToBinary(inputNum) {
let binary = [];
while (inputNum > 0) {
if (inputNum % 2 === 1) {
binary.splice(0,0,1);
inputNum = (inputNum - 1) / 2;
} else {
binary.splice(0,0,0);
inputNum /= 2;
}
}
binary = binary.join('');
console.log(binary);
}
This is what I did to get the solution:
function addBinary(a,b) {
// function that converts decimal to binary
function dec2bin(dec) {
return (dec >>> 0).toString(2);
}
var sum = a+b; // add the two numbers together
return sum.toString(2); //converts sum to binary
}
addBinary(2, 3);
I first converted the decimal number to binary like it said, and I got the function from w3schools under the JavaScript Bitwise lesson. Then to make it easier on myself, I created the variable "sum" which does the addition and finally, I made the addBinary function return the sum as a binary code, then called it. It passed in CodeWars. I hope this makes sense and it helps you.
Just use Number(x).toString(base). Where base needs to be equals 2.
var num1=13;
Number(num1).toString(2)
result: "1101"
Number(11).toString(2)
result: "1011"
It seems like the conversion with the string radix (dec >>> 0).toString(2) is returning the binary number formatted in the wrong direction. I have validated this solution in Chrome. In case anyone wants to manually calculate binary for validation, from left to right you add the numbers together that correspond to a 1 position in your binary number mapping to [1][2][4][8][16][32][64][128] ....
For example:
10 in binary is 0101 OR 0 + 2 + 0 + 8.
13 in binary is 1011 OR 1 + 0 + 4 + 8.
255 in binary is 11111111 OR 1 + 2 + 4 + 8 + 16 + 32 + 64 + 128
function dec2bin(dec){
return (dec >>> 0).toString(2).split('').reverse().join('');
}
This will give the decimal to binary:
let num = "1234"
console.log(num.toString(2));
This will give binary to decimal:
let num = "10011010010";
console.log(parseInt(num, 2));

Javascript: parsing negative Number(hexadecimal | binary)

It works with a lot number types, but not with negatives hexadecimal or binary.
Too, Number(octal) doesn't parse an octal number.
Number("15") === 15; // OK
Number("-15") === -15; // OK
Number("0x10") === 16; // OK
Number("0b10") === 2; // OK
Number("-0x10") === NaN; // FAIL (expect -16)
Number("-0b10") === NaN; // FAIL (expect -2)
Number("0777") === 777; // FAIL (expect 511)
Number("-0777") === -777; // FAIL (expect -511)
Question: how I can parse all valid Javascript numbers correctly?
Edit A
parseInt() don't help me because I need check by each possibility (if start with 0x I use 16, for instance).
Edit B
If I write on Chrome console 0777 it turns to 511, and too allow negative values. Even works if I write directly into javascript code. So I expect basically a parser that works like javascript parser. But I think that the negative hexadecimal, for instance, on really is 0 - Number(hex) in the parser, and not literraly Number(-hex). But octal values make not sense.
Try this:
parseInt(string, base):
parseInt("-0777", 8)
parseInt("-0x10", 16)
You could write a function to handle the negative value.
function parseNumber (num) {
var neg = num.search('-') > -1;
var num = Number(num.replace('-', ''));
return num * (neg ? -1 : 1);
}
It's not parsing octal and the other examples because they're not valid Javascript numbers, at least within the constraints of Number. So the technically correct answer is: use Number!
If you want to parse other formats, then you can use parseInt, but you will have to provide the base.
This gets a little ugly, but you could inspect the values to determine the right radix for parseInt. In particular, the b for binary doesn't seem to be support by my browser (Chrome) at all, so unlike the OP, Number("0b10") gives me NaN. So you need to remove the b for it to work at all.
var numbers = [
"15", "-15", "0x10", "0b10", "-0x10", "-0b10", "0777", "-0777"
];
function parser(val) {
if (val.indexOf("x") > 0) {
// if we see an x we assume it's hex
return parseInt(val, 16);
} else if (val.indexOf("b") > 0) {
// if we see a b we assume it's binary
return parseInt(val.replace("b",""),2);
} else if (val[0] === "0") {
// if it has a leading 0, assume it's octal
return parseInt(val, 8);
}
// anything else, we assume is decimal
return parseInt(val, 10);
}
for (var i = 0; i < numbers.length; i++) {
console.log(parser(numbers[i]));
}
Note this obviously isn't foolproof (for example, I'm checking for x but not X), but you can make it more robust if you need to.

What does ~~ ("double tilde") do in Javascript?

I was checking out an online game physics library today and came across the ~~ operator. I know a single ~ is a bitwise NOT, would that make ~~ a NOT of a NOT, which would give back the same value, wouldn't it?
It removes everything after the decimal point because the bitwise operators implicitly convert their operands to signed 32-bit integers. This works whether the operands are (floating-point) numbers or strings, and the result is a number.
In other words, it yields:
function(x) {
if(x < 0) return Math.ceil(x);
else return Math.floor(x);
}
only if x is between -(231) and 231 - 1. Otherwise, overflow will occur and the number will "wrap around".
This may be considered useful to convert a function's string argument to a number, but both because of the possibility of overflow and that it is incorrect for use with non-integers, I would not use it that way except for "code golf" (i.e. pointlessly trimming bytes off the source code of your program at the expense of readability and robustness). I would use +x or Number(x) instead.
How this is the NOT of the NOT
The number -43.2, for example is:
-43.210 = 111111111111111111111111110101012
as a signed (two's complement) 32-bit binary number. (JavaScript ignores what is after the decimal point.) Inverting the bits gives:
NOT -4310 = 000000000000000000000000001010102 = 4210
Inverting again gives:
NOT 4210 = 111111111111111111111111110101012 = -4310
This differs from Math.floor(-43.2) in that negative numbers are rounded toward zero, not away from it. (The floor function, which would equal -44, always rounds down to the next lower integer, regardless of whether the number is positive or negative.)
The first ~ operator forces the operand to an integer (possibly after coercing the value to a string or a boolean), then inverts the lowest 31 bits. Officially ECMAScript numbers are all floating-point, but some numbers are implemented as 31-bit integers in the SpiderMonkey engine.
You can use it to turn a 1-element array into an integer. Floating-points are converted according to the C rule, ie. truncation of the fractional part.
The second ~ operator then inverts the bits back, so you know that you will have an integer. This is not the same as coercing a value to boolean in a condition statement, because an empty object {} evaluates to true, whereas ~~{} evaluates to false.
js>~~"yes"
0
js>~~3
3
js>~~"yes"
0
js>~~false
0
js>~~""
0
js>~~true
1
js>~~"3"
3
js>~~{}
0
js>~~{a:2}
0
js>~~[2]
2
js>~~[2,3]
0
js>~~{toString: function() {return 4}}
4
js>~~NaN
0
js>~~[4.5]
4
js>~~5.6
5
js>~~-5.6
-5
In ECMAScript 6, the equivalent of ~~ is Math.trunc:
Returns the integral part of a number by removing any fractional digits. It does not round any numbers.
Math.trunc(13.37) // 13
Math.trunc(42.84) // 42
Math.trunc(0.123) // 0
Math.trunc(-0.123) // -0
Math.trunc("-1.123")// -1
Math.trunc(NaN) // NaN
Math.trunc("foo") // NaN
Math.trunc() // NaN
The polyfill:
function trunc(x) {
return x < 0 ? Math.ceil(x) : Math.floor(x);
}
The ~ seems to do -(N+1). So ~2 == -(2 + 1) == -3 If you do it again on -3 it turns it back: ~-3 == -(-3 + 1) == 2 It probably just converts a string to a number in a round-about way.
See this thread: http://www.sitepoint.com/forums/showthread.php?t=663275
Also, more detailed info is available here: http://dreaminginjavascript.wordpress.com/2008/07/04/28/
Given ~N is -(N+1), ~~N is then -(-(N+1) + 1). Which, evidently, leads to a neat trick.
Just a bit of a warning. The other answers here got me into some trouble.
The intent is to remove anything after the decimal point of a floating point number, but it has some corner cases that make it a bug hazard. I'd recommend avoiding ~~.
First, ~~ doesn't work on very large numbers.
~~1000000000000 == -727279968
As an alternative, use Math.trunc() (as Gajus mentioned, Math.trunc() returns the integer part of a floating point number but is only available in ECMAScript 6 compliant JavaScript). You can always make your own Math.trunc() for non-ECMAScript-6 environments by doing this:
if(!Math.trunc){
Math.trunc = function(value){
return Math.sign(value) * Math.floor(Math.abs(value));
}
}
I wrote a blog post on this for reference: http://bitlords.blogspot.com/2016/08/the-double-tilde-x-technique-in.html
Converting Strings to Numbers
console.log(~~-1); // -1
console.log(~~0); // 0
console.log(~~1); // 1
console.log(~~"-1"); // -1
console.log(~~"0"); // 0
console.log(~~"1"); // 1
console.log(~~true); // 1
console.log(~~false); // 0
~-1 is 0
if (~someStr.indexOf("a")) {
// Found it
} else {
// Not Found
}
source
~~ can be used as a shorthand for Math.trunc()
~~8.29 // output 8
Math.trunc(8.29) // output 8
Here is an example of how this operator can be used efficiently, where it makes sense to use it:
leftOffset = -(~~$('html').css('padding-left').replace('px', '') + ~~$('body').css('margin-left').replace('px', '')),
Source:
See section Interacting with points
Tilde(~) has an algorihm -(N+1)
For examle:
~0 = -(0+1) = -1
~5 = -(5+1) = -6
~-7 = -(-7+1) = 6
Double tilde is -(-(N+1)+1)
For example:
~~5 = -(-(5+1)+1) = 5
~~-3 = -(-(-3+1)+1) = -3
Triple tilde is -(-(-(N+1)+1)+1)
For example:
~~~2 = -(-(-(2+1)+1)+1) = -3
~~~3 = -(-(-(3+1)+1)+1) = -4
Same as Math.abs(Math.trunc(-0.123)) if you want to make sure the - is also removed.
In addition to truncating real numbers, ~~ can also be used as an operator for updating counters in an object. The ~~ applied to an undefined object property will resolve to zero, and will resolve to the same integer if that counter property already exists, which you then increment.
let words=["abc", "a", "b", "b", "bc", "a", "b"];
let wordCounts={};
words.forEach( word => wordCounts[word] = ~~wordCounts[word] + 1 );
console.log("b count == " + wordCounts["b"]); // 3
The following two assignments are equivalent.
wordCounts[word] = (wordCounts[word] ? wordCounts[word] : 0) + 1;
wordCounts[word] = ~~wordCounts[word] + 1;

Categories