int32 to IPv4 in javascript - javascript

This address has 4 octets where each octet is a single byte (or 8 bits).
Because the above IP address has 32 bits, we can represent it as the unsigned 32 bit number: 2149583361
Complete the function that takes an unsigned 32 bit number and returns a string representation of its IPv4 address.
Examples
2149583361 - "128.32.10.1"
32 - "0.0.0.32"
0 - "0.0.0.0"
I tried to implement this way. It works on some tests, but for example, the program crashes on the number 1069972272 (because it starts translating everything from the beginning, not the end). How can I fix this program? I know it can be easier, but I would like to finish my version
function int32ToIp(int32){
let binaryNumber = int32.toString(2);
let buffer = [];
let result = [];
let i = 8;
for (let index = 0; index < 4; index++) {
buffer[index] = binaryNumber.slice(i - 8,i);
i += 8;
if(!buffer[index]) {
buffer[index] = '0';
}
}
for (let i = 0; i < buffer.length; i++) {
result[i] = parseInt(buffer[i], 2);
}
return result.toString().replace(/,/g, '.');
}

You need to left-pad the binary string with leading zeroes so that it is always 32 characters in length otherwise your code to extract the octet values doesn't work.
For example:
function int32ToIp(int32) {
let binaryNumber = int32.toString(2).padStart(32, '0');
let buffer = [];
let result = [];
let i = 8;
for (let index = 0; index < 4; index++) {
buffer[index] = binaryNumber.slice(i - 8, i);
i += 8;
if (!buffer[index]) {
buffer[index] = '0';
}
}
for (let i = 0; i < buffer.length; i++) {
result[i] = parseInt(buffer[i], 2);
}
return result.toString().replace(/,/g, '.');
}
console.log(int32ToIp(2149583361), 'expect', '128.32.10.1');
console.log(int32ToIp(1069972272), 'expect', '63.198.123.48');

Related

How to manipulate more than 21bit Decimal/Integer Number in Javascript

I want to manipulate a 32bit binary number in order to count the amount of "1".
The input for my function is binary number like this 11111111111111111111111111111101.
The problem is when this number is received from my function it generates a complete different binary string example ('10001100001111011110111110110001111011011011100110001000000000000000000000000000000000000000000000000000') or an exponential number. Both situations do not allow me to manipulate and work with a binary number imput.
Here is my code:
var countingOnes = function (n) {
let binx = { n: `${n.toString(2)}` };
console.log(binx);
let counter = 0;
for (let j = 0; j < binx.n.length; j++) {
if (binx.n.charAt(j) === "0") {
counter = counter;
} else {
counter = counter + 1;
}
}
console.log(counter);
};
countingOnes(11111111111111111111111111111101);
Many thanks in advance
I am not sure what your goal is but Bitwise operators may be what you are looking.
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Bitwise_AND_assignment
they allow you to perform bit manipulation to the actual numbers. Is there a specific reason why you need the output as a string?
have a look at this I have not fully tested it but seems to work assuming a 32bit integer...
function count1s(num = 5986) {
//parse to an integer
const integer = parseInt(num);
//loop through and count
let count = 0;
for(let i = 0; i < 32; i++) {
let test = integer >> (32 - i);
if((test % 2) === 1) {
count++;
}
}
//return the results
return count;
}
console.log(count1s());
after a few changes I have applied the following solution:
var countingOnes = function (n) {
let binx = n.toString(2);
let counter = 0;
for (let j = 0; j < binx.length; j++) {
if (binx.charAt(j) === "0") {
counter = counter;
} else {
counter = counter + 1;
}
}
return counter;
};
countingOnes("11111111111111111111111111111101");
Instead of considering the input as a number of 32 caracteres, I have considered the input as a string of 32 caracteres. Easier to manipulate.
Thanks everyone for the contribution and suggestions!

Javascript Vernam Cipher - ensure output between unicode 32 and 126

I've implemented the below as mask for some data:
function jEncryptDecrypt(data,key) {
let jKey = ''
let bytKey = [];
let bytData = [];
for (let i = 1; i < (data.length/key.length) +1; i++) {
jKey = jKey + key;
}
let str = jKey.substring(0, data.length);
for (let i = 0; i < str.length; ++i) {
var code = str.charCodeAt(i);
bytKey = bytKey.concat([code & 0xff, code / 256 >>> 0]);
}
let str2 = data
for (let i = 0; i < str2.length; ++i) {
var code = str2.charCodeAt(i);
bytData = bytData.concat([code & 0xff, code / 256 >>> 0]);
}
for (let i = 0; i < bytData.length; ++i) {
bytData[i] = bytData[i] ^ bytKey[i];
}
str3 = String.fromCharCode(...bytData)
str3 = str3.replace(/\0/g, '');
return str3;
}
For some outputs the bytes in bytData map to escape characters - depending on where I run the code I either get the character (JSFiddle) or I get \uXXXX (third party application). The output could end up being dealt with on different platforms/languages, so ideally I'd like to avoid special characters and just have characters in the 32 to 126 unicode range?
Is this possible? The application I'm implementing this in is pretty restrictive, so I can't use any libraries, just pure JS.
Edit
I've changed the code to the below, which outputs an array of numbers on the encrypt, and accepts them as an input on the decrypt
function jEncryptDecrypt(data,key) {
let jKey = ''
let bytKey = [];
let bytData = [];
//expand key to cover length of input
for (let i = 1; i < (data.length/key.length) +1; i++) {
jKey = jKey + key;
}
//shorten key to same lenght as input
let str = jKey.substring(0, data.length);
//loop over key to create array of numbers from unicode value
for (let i = 0; i < str.length; ++i) {
var code = str.charCodeAt(i);
bytKey = bytKey.concat([code & 0xff, code / 256 >>> 0]);
}
//if data input is array no need to do anything
if (Array.isArray(data)) {
bytData = data;
//otherwise loop over data to create array of numbers from unicode value
} else {
let str2 = data
for (let i = 0; i < str2.length; ++i) {
var code = str2.charCodeAt(i);
bytData = bytData.concat([code & 0xff, code / 256 >>> 0]);
}
}
//XOR each data value with each key value in turn
for (let i = 0; i < bytData.length; ++i) {
bytData[i] = (bytData[i] ^ bytKey[i]);
}
//if input was array return string, otherwise return array
if (Array.isArray(data)) {
str3 = String.fromCharCode(...bytData)
str3 = str3.replace(/\0/g, '');
return str3;
} else {
return bytData;
}
}
It's clunky and hacky but works for my needs. If there is an answer to the original question that would still be much appreciated!

How to calculate what is probability of getting same result 8 times in a row, when flipping coin 1000 times?

I've tried to use this code:
function calc (n, c) {
let a = 0
const omega = Math.pow(2, n)
let search1 = ''
let search2 = ''
for (let i = 0; i < c; i++) {
search1 += '0'
}
for (let i = 0; i < c; i++) {
search2 += '1'
}
for (let i = 0; i < omega; i++) {
if (i.toString(2).includes(search1) || i.toString(2).includes(search2)) {
a++
}
}
const prob = a * 100 / omega
console.log({ a: a, omega: omega, prob: prob.toFixed(2) })
}
calc(1000, 8)
Which works, but is slow when it comes to big numbers. How can I optimize my code to make it faster? Or maybe there exists a Mathematical solution, that doesn't require to code at all? I just want to know the solution for this problem.
First a Monte Carlo simulation answer:
You can find a confidence interval for this simulation by doing some statistical inference on the Bernoulli distribution which I won't do here.
function doesItHappen(l,r){
var lastValue = null;
var lastN = 0;
for(var i = 0; i < l; i++){
var currentValue = Math.random() > 0.5 ? 1 : 0;
if(lastValue === currentValue) {
lastN++;
} else {
lastValue = currentValue;
lastN = 1;
}
if(lastN === r) return true;
}
return false;
}
function rep(n,l,r){
var t = 0;
for(var i = 0; i < n; i++) {
if(doesItHappen(l,r)) t++;
}
return t/n;
}
console.log(rep(100000,1000,8))
Finally the actual Mathematical answer
I couldn't find a solution to this question online so I came up with my own method to calculate this in o(n) time and space complexity, you can even get it down to o(1) space complexity by discarding valueStore objects older than the length of consecutive sequence you want. The key thing is to recognise you have to computer all the combinations prior to the current length, similar to a Fibonacci sequence.
function calculateProbability(l,r) {
var valueStore = [
{ // Initialize it
totalNumberOfCombinations: 2,
numberOfCombinationsWithSequence: 0
}
];
function getValues(index) {
// combinations with the sequence in it
// There are two ways a consecutive sequence of r length can occur, it either occured in the previous combination and we flipped a new heads or tails(doesn't matter)
// Or this flip resulted in a new consecutive sequence of r length occuring (let's say theres k combinations of this)
// Heres the genius, k must end in a sequence of heads or tails so theres 2 possible endings, the beginnings of k cannot contain the sequence of r consecutive flips
// If this previous combination ends in a head then the new sequence is all tails and vice versa
// So k = the combinations of flips without the consective flips before the current sequence
// k = the totalNumberOfCombinations 8 flips ago - numberOfCombinationsWithSequence 8 flips ago
if (index === r - 1) {
// All heads or all tails
var numberOfCombinationsWithSequence = 2;
} else if(index < r) {
var numberOfCombinationsWithSequence = 0;
} else {
var numberOfCombinationsWithSequence = valueStore[index - 1].numberOfCombinationsWithSequence * 2 + (valueStore[index - r].totalNumberOfCombinations - valueStore[index - r].numberOfCombinationsWithSequence)
}
return {
// total possible combinations
// this is just the previous number of combinations but times 2 since we flip again
totalNumberOfCombinations: valueStore[index - 1].totalNumberOfCombinations * 2,
numberOfCombinationsWithSequence: numberOfCombinationsWithSequence
}
}
for(var i = 1; i < l; i++) {
var values = getValues(i);
valueStore.push(values);
}
return valueStore[valueStore.length - 1].numberOfCombinationsWithSequence / valueStore[valueStore.length - 1].totalNumberOfCombinations;
}
console.log(calculateProbability(1000,8));
The 100% accurate answer is 0.9817098435878764 or 98.17%
how about a simulation?
function simulate(throws, streak, runs) {
let win = "".padStart(streak, "1")
let win2 = "".padStart(streak, "0")
let hits = 0
for (let n = 0; n < runs; n++) {
let res = "";
for (let i = 0; i < throws; i++) {
let val = Math.round(Math.random())
res += val
}
if (res.includes(win) || res.includes(win2)) {
hits++
}
}
console.log({
hits,
runs,
prob: ((hits / runs) * 100).toFixed(2)
})
}
simulate(1000, 8, 10000)

Why am I getting random undefined chars in my decryption output in my Vigenere Cipher algorithm?

I am working on my own Vigenere Cipher in JavaScript. I enjoy it. Anyway, the encryption and decryption is the same except decrypt() is '-' keyStr instead of '+' towards the bottom. The encryption works perfectly. But, for some reason, when decrypting, some of the chars come out as undefined randomly. I know the algorithm works for C++, Python, Java, and Swift. What is the error here?
I have tried printing the char indices in the alphabet array and the index values in decrypt() come out odd and I can't figure out why.
function ascii(x) {
return x.charCodeAt(0);
}
function decrypt() {
var alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
var msgStr = "";
var keyTemp = "";
var keyStr = "";
var output = "";
var input = document.getElementById("inMsg").value;
var key = document.getElementById("key").value;
input = input.toUpperCase();
key = key.toUpperCase();
for(let i = 0; i < input.length; i++) {
for(let x = 0; x < alpha.length; x++) {
if (input[i] == alpha[x]) {
msgStr += alpha[x];
}
}
}
for(let i = 0; i < msgStr.length; i++) {
keyTemp += key[i % key.length]
}
for(let i = 0; i < keyTemp.length; i++) {
for(let x = 0; x < alpha.length; x++) {
if (keyTemp[i] == alpha[x]) {
keyStr += alpha[x];
}
}
}
for(let i = 0; i < msgStr.length; i++) {
let x = (ascii(msgStr[i]) - ascii(keyStr[i])) % 26;
output += alpha[x];
}
document.getElementById("outMsg").value = output;
}
The problem you are having is being caused by this line:
let x = (ascii(msgStr[i]) - ascii(keyStr[i])) % 26;
because
ascii(msgStr[i]) - ascii(keyStr[i])
can be negative.
The % operator isn't really a modulus operator in javascript its a remainder operator and it works a little differently.
From the link above, you should be able to do something more like this to get it to work:
let x = ((ascii(msgStr[i]) - ascii(keyStr[i])) % 26) + 26) % 26

Convert string to array of integers and vice versa in JavaScript

I have an array, each cell of which can keep 4 bytes (2**32). This array presents memory of my VM I write in JS. Also I have string. Only one place I can keep this string - memory which I describe above.
I decided to present strings in memory as C-strings (with special symbol NUL as the end of string). My current implementation looks ugly and I asking you advice, is there any way to improve this approach? Maybe there are any other way to do it?
Part of code, which converts string to array:
// demoMemory presents memory model
var DEMO_VOLUME = 16;
var demoMemory = new Array(DEMO_VOLUME);
for (var i = 0; i < DEMO_VOLUME; i++) demoMemory[i] = 0;
// convert string to hexidecimal string
var string = "Hello, World!", hexString = "";
for (var i = 0; i < string.length; i++) {
hexString += string.charCodeAt(i).toString(16);
}
// convert hexidecimal string to array of strings
// each element of this array presents 4 symbols
var hexStringArray = hexString.match(/.{1,8}/g);
// add NUL (0x00) symbols to complete strings
while (hexStringArray[hexStringArray.length - 1].length != 8) {
hexStringArray[hexStringArray.length - 1] += "00";
}
// convert to integer array
for (var i = 0; i < hexStringArray.length; i++) {
demoMemory[i] = parseInt(hexStringArray[i], 16);
}
...and back to string:
// decode back to string
var resultString = "", decSymbolCode = 0;
for (var i = 0; i < demoMemory.length; i++) {
hexString = demoMemory[i].toString(16);
var hexSymbolCodeArray = hexString.match(/.{1,2}/g);
for (var j = 0; j < hexSymbolCodeArray.length; j++) {
decSymbolCode = parseInt(hexSymbolCodeArray[j], 16);
resultString += String.fromCharCode(decSymbolCode);
}
}
This code is inappreciable because I'm using JS Strings to build hexadecimal strings. I think it is possible to do with bitwise operations and masks, but I don't know, how. Maybe I'm wrong.
Here is a code that converts string to array of 32bit numbers and vice versa using masks and bitwise operations:
var demoMemory = [];
function stringToArray(str) {
var i,
length = str.length,
arr = [];
for(i=0; i<length; i+=4) {
arr.push(
(((str.charCodeAt(i) || 0) << 24)
|((str.charCodeAt(i+1) || 0) << 16)
|((str.charCodeAt(i+2) || 0) << 8)
|((str.charCodeAt(i+3) || 0)))
);
}
if(length % 4 === 0) {
arr.push(0);
}
return arr;
}
function arrayToString(arr) {
var i, j, chrCode,
length = arr.length,
str = [];
label:
for(i=0; i<length; i++) {
for(j=24; j>=0; j-=8) {
chrCode = (arr[i] >> j) & 0xFF;
if(chrCode) {
str.push(String.fromCharCode(chrCode));
} else {
break label;
}
}
}
return str.join('');
}
console.log(demoMemory = stringToArray('Hello, World!')); // => [1214606444, 1865162839, 1869769828, 553648128]
console.log(arrayToString(demoMemory)); // "Hello, World!"
Working example you can find here: http://jsbin.com/aselug/2/edit

Categories