How to convert base13 string to base64 - javascript

I have to make a URL shortener for query strings. Have spent few days trying to compress array data into base64 strings. Thinking that the best approach may be to interpret something like "[[1,2,9,3],[1,0,2],[39,4]]" as base13 with numbers 0-9 and [], symbols.
how the current algorithm works:
convert the stringified arrays into an array of base13, where each element represents 1 unique character, convert this array to base10 number, convert this number to base 64 string.
but the problem is when converting the base13 array to base10 number, it makes large numbers like 5.304781188371057e+86 which cant be held in js.
I am open to alternative solutions of course, but please do not suggest something like creating a database of URLs as it won't work as I have up to 51!*51! unique URLs, better to just make a compact encodable and decodable query string and decode it as soon as the website is accessed.
//convert stringified array to array of base13(each element = each digit of base13 number)
function stringToArray(string)
{
let charSet = "[],1234567890";
let array = [];
for(let i = 0; i < string.length; i++)
{
array.push(charSet.indexOf(string[i]));
}
return array;
}
//convert base13 array to one large decimal number
function arrayToDecimal(array, base)
{
var decimal = 0;
for(let i = 0; i < array.length; i++)
{
decimal += array[i] * Math.pow(base, i)
}
return decimal;
}
//convert decimal number back to array
function decimalToArray(decimal, base)
{
var quotient = decimal;
var remainder = [];
while(quotient > base)
{
remainder.push(quotient % base)
quotient = Math.floor(quotient / base);
}
remainder.push(quotient % base)
return remainder;
}
const alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
// binary to string lookup table
const b2s = alphabet.split('');
// string to binary lookup table
// 123 == 'z'.charCodeAt(0) + 1
const s2b = new Array(123);
for(let i = 0; i < alphabet.length; i++)
{
s2b[alphabet.charCodeAt(i)] = i;
}
// number to base64
const ntob = (number) =>
{
if(number < 0) return `-${ntob(-number)}`;
let lo = number >>> 0;
let hi = (number / 4294967296) >>> 0;
let right = '';
while(hi > 0)
{
right = b2s[0x3f & lo] + right;
lo >>>= 6;
lo |= (0x3f & hi) << 26;
hi >>>= 6;
}
let left = '';
do {
left = b2s[0x3f & lo] + left;
lo >>>= 6;
} while(lo > 0);
return left + right;
};
// base64 to number
const bton = (base64) =>
{
let number = 0;
const sign = base64.charAt(0) === '-' ? 1 : 0;
for(let i = sign; i < base64.length; i++)
{
number = number * 64 + s2b[base64.charCodeAt(i)];
}
return sign ? -number : number;
};
console.log(decimalToArray(bton(ntob(arrayToDecimal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 13))), 13))
//encoded and decoded, works output:[1,1,1,1,1,1,1,1,1,1,1,1,1]
console.log(arrayToDecimal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 13))
//encoding doesnt work, array to decimal converts to 5.304781188371057e+86```

An interesting problem... The first thing you will need to assess is whether the base conversion compression you're seeking is worthwhile. Ie, how many base 64 characters are required to represent n characters of base 13? This involves solving...
13 ** n = 64 ** x
Solving for x, we get...
x = n * log(13) / log(64)
Ie, for every n digits of base 13, how many digits of base 64 are required. A sampling of a few values of n returns...
n = 6, x = 3.70
n = 7, x = 4.31
n = 8, x = 4.93
n = 9, x = 5.55
n = 10, x = 6.17
n = 11, x = 6.78
n = 12, x = 7.40
n = 13, x = 8.01
n = 14, x = 8.63
n = 15, x = 9.25
n = 16, x = 9.86
So how to interpret this? If you have 10 digits of base 13, you're going to need 7 digits (6.17 rounded up) of base 64. So the best ratio is when x is equal to, or just under, a whole number. So, 8 digits of base 13 requires 5 digits of base 64, achieving a best case of 5/8 or 62.5% compression ratio.
Assuming that's good enough to meet your requirement, then the following function converts the "base13" string to base 64.
const base13Chars = "0123456789[],";
const base64Chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_';
// see https://en.wikipedia.org/wiki/Query_string for URL parameter allowable characters.
function base13toBase64(x13) {
base13 = x13.split("").map( c => base13Chars.indexOf(c) );
// Make the array an even multiple of 8
for (i = base13.length; i % 8 !==0; i++) {
base13[i] = 0;
}
x64 = "";
for (i = 0; i < base13.length; i += 8) {
// Calculate base13 value of the next 8 characters.
let n = 0;
for (j = 0; j < 8; j++) {
n = n * 13 + base13[i + j];
}
// Now calculate the base64 of n.
for (j = 0; j < 5; j++) {
x64 = x64 + base64Chars.substr(n % 64,1);
n = Math.floor(n / 64);
}
}
return x64;
}
Running the above...
base13toBase64( "[[1,2,9,3],[1,0,2],[39,4]]" ) returns "ilYKerYlgEJ4PxAAjaJi"
Note that the original value is a length of 26 characters, and the base64 value is 20 characters, so the compression ratio is 77%, not quite the optimal 62.5%. This is because of the padding to bring the original array to 32 characters, an even multiple of 8. The longer the string to encode, though, the closer the ratio will be to 62.5%.
Then, on the server side you'll need the constants above plus the following function to "uncompress" the base64 to the base13 stringified URL...
function base64toBase13(x64) {
base64 = x64.split("").map( c => base64Chars.indexOf(c) );
x13 = "";
for (i = 0; i < base64.length; i += 5) {
// Calculate base64 value of the next 5 characters.
let n = 0;
for (j = 5 - 1; 0 <= j; j--) {
n = n * 64 + base64[i + j];
}
// Now calculate the base13 of n.
let x = "";
for (j = 0; j < 8; j++) {
x = base13Chars.substr(n % 13,1) + x;
n = Math.floor(n / 13);
}
x13 = x13 + x;
}
// Removed the trailing 0's as a result of the buffering in
// base13toBase64 to make the array an even multiple of 8.
while (x13.substr(-1,1) === "0") {
x13 = x13.substr(0, x13.length - 1);
}
return x13;
}
Running the above...
base64toBase13 ( "ilYKerYlgEJ4PxAAjaJi" ) returns "[[1,2,9,3],[1,0,2],[39,4]]"
Hope this helps...

The best compression is when you can leave stuff out.
assuming your data structure is Array<Array<int>> given by the one sample, we can leave out pretty much everything that doesn't contribute to the data itself.
I'm not compressing the string, but the data itself with 1 b64Character / 5 bits needed to represent a number. as for the structure we only store the number of sub-arrays and their respective lengths; so more or less an additional character per Array in your data.
boils down to:
function encode(data) {
const alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
let str = "";
function encode(nr, hasMoreDigits) {
if (nr > 31) {
// I need more bits/characters to encode this number.
//encode the more significant bits with the 0b100000 flag
encode(nr >>> 5, 32);
}
// 0b011111 payload | 0b100000 flag
const index = nr & 31 | hasMoreDigits;
str += alphabet[index];
}
encode(data.length);
data.forEach(arr => {
encode(arr.length);
arr.forEach(v => encode(v >>> 0 /* int32 -> uint32 */));
});
return str;
}
function decode(str) {
const alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
let i = 0;
function parse() {
let nr = 0,
hasMoreDigits;
do {
const index = alphabet.indexOf(str.charAt(i++));
nr = nr << 5 | index & 31; // 0b011111 payload
hasMoreDigits = index & 32; // 0b100000 flag
} while (hasMoreDigits);
return nr; // int32 due to the bit operations above
}
let data = Array(parse());
for (let j = 0; j < data.length; ++j) {
let arr = data[j] = Array(parse());
for (let k = 0; k < arr.length; ++k) {
arr[k] = parse();
}
}
return data;
}
let data = [
[1, 2, 9, 3],
[1, 0, 2],
[39, 4]
];
let text = encode(data);
let data2 = decode(text);
console.log("input:", data);
console.log("encoded:", text, "length:", text.length);
console.log("output:", data2);
console.log("equal:", JSON.stringify(data) === JSON.stringify(data2));
.as-console-wrapper{top:0;max-height:100%!important}
The encoding of the numbers. Ideally you would encode a number as binary with a static size, but this means 32bit/int which would be 6characters/number, so multibytes.
We split the number into chunks of 'n' bits, ignore the leading zeroes and encode the rest. ideally we can encode small number with very few characters, downside: we loose 1bit/chunk if n is too small and the average numbers are big. It's a tradeoff; that's why I left this configurable.
the current format is 6bits/number. 1 for the Structure, 5 bits as payload. In the format (1.....)*0.....

I would suggest you directly encode the Base13 string into Base64.
Although that might not result in better compression than your solution, it removes the heavy multiplications you are performing. Moreover how do you guarantee that no collisions happen when converting through arrayToDecimal ?

Related

Find consecutive numbers sequence in a string

I have a string of numbers, and in it, there is always a random sequence, like:
891011 -> 8, 9, 10, 11
123124 -> 123, 124
how can i find the sequence? my current code is this:
var digits = [8, 9, 1, 0, 1, 1];
var number;
for (var j = 0; j < digits.length; j++) {
number = digits.slice(0, j + 1).join("");
if (
Number(number) === Number(digits.slice(j, number.length).join("")) &&
digits
.join("")
.match(new RegExp(".{1," + number.length + "}", "g"))
.every(function(num, index) {
return index === digits.length - 1 || num < digits[index + 1];
})
) {
break;
}
}
console.log(number)
You can increment the number of digits of the starting number and see if we can generate a matching output with consecutive numbers. Try like below:
const findSequence = (input) => {
// starting number cannot be more than half of the length
let maxLengthOfStartingNumber = input.length / 2;
for(let i = 1; i <= maxLengthOfStartingNumber; i++) {
// starting number can be a number with a few digits
let startingNumber = parseInt(input.substring(0, i));
// data holder for consecutive numbers we generate
let generatedNumbers = [startingNumber]
// current string we create by concatinating generated numbers
let currentString = startingNumber.toString();
// we can generate integers until we surpass the length of the actual input and check
while(currentString.length < input.length){
const nextNumber = generatedNumbers[generatedNumbers.length-1]+1;
generatedNumbers.push(nextNumber)
currentString += nextNumber;
}
// check whether we could generate a string with consecutive numbers which matches with the input
if(currentString === input){
return generatedNumbers;
}
}
// input is invalid and return null
return null;
}
const testInputs = ["891011", "123124", "9991000", "123454"]
testInputs.forEach(testInput => {
console.log(findSequence(testInput));
})

conversion of Byte array to 320 bits and then bit extraction 16 bits then 10 bits

i am hoping some clever people here would be able to help me , i am new to JavaScript and trying to figure out how to code the following as i get stuck when the payload string is not standard 8 bit 1 byte groups, so i receive 320 bit 40 byte array. the first few values are fine to extract as they are 16 bits , then 8 bits then 32 bits of info but the next value is only 10 bits and i get stuck here. so i should probably be converting the 40 bytes into 320 bits and then shifting right for each number of bit per variable? i am not sure how to do this. This is my payload format and in brackets is the number of bits for that variable. any help would be appreciated.
https://www.iotsoundsensor.com/wp-content/uploads/2021/11/310_payload_parser_manual.html
function Decoder(bytes, port) {
//8 bit single byte values
var messageinfo = bytes[2]>>[2];
var battery = bytes[2]>>[0];
var latitude = bytes[4]>>[4];
var longatude = bytes[4]>>[4];
//now have 10 bit values to decode
var LAfast = bytes[15]>>>[0];
var LAslow = bytes[16]>>>[2];
return {
messageinfo: messageinfo,
battery: battery/10,
LAfast : LAfast/10,
LAslow : LAslow/10,
latitude: latitude,
longatude: longatude
}
}
I've re-written the Decode() function based on the JavaScript code at iot-source (which is really the bible for decoding these messages).
I've created a getField() function for decoding fields at a given offset, length, and resolution and also a getBits() function to retrieve an array of bits for later decoding (used for the LAxxx fields).
I've used the example from the page above as an input to Decode():
function hexToByteArray(hex) {
return Array.from({ length: hex.length/2}, (v,n) => parseInt(hex.slice(n * 2, n*2 + 2), 16));
}
function getField(offset, length, bytes, resolution = 1) {
return bytesToNum(bytes.slice(offset, offset + length)) * resolution;
}
function bytesToNum(bytes) {
return bytes.reduce((acc, b, x) => acc | (b << (x * 8)), 0);
}
// Convert a byte segment to bits
function getBits(offset, length, bytes) {
return bytes.slice(offset, offset + length).map((byt) => {
return Array.from({ length: 8 }).map((x, i) => {
return ((byt * (2 ** i) & 0x80) !== 0 ? 1 : 0);
});
}, []).flat();
}
function parseBits(bits) {
return bits.reduce((acc, b, i) => {
return acc + 0.1*(bits[bits.length - 1 - i] ? 2**(i): 0);
}, 30);
}
function Decoder(bytes, port) {
let offset = 0;
let messageinfo = getField(offset, 2, bytes, 1);
let battery = getField(offset += 2, 1, bytes, 0.1);
// NB: We should check messageinfo to ensure these are actually present...
let latitude = getField(offset += 1, 4, bytes, 1e-7);
let longitude = getField(offset += 4, 4, bytes, 1e-7);
// Skip parsing timestamp for now...
offset += 4
let bitArray = getBits(offset += 4, 20, bytes);
let bitIndex = 0;
let LAfast = parseBits(bitArray.slice(bitIndex, bitIndex += 10));
let LAslow = parseBits(bitArray.slice(bitIndex, bitIndex += 10));
let LCfast = parseBits(bitArray.slice(bitIndex, bitIndex += 10));
let LCslow = parseBits(bitArray.slice(bitIndex, bitIndex += 10));
return {
messageinfo,
battery,
LAfast,
LAslow,
LCfast,
LCslow,
latitude,
longitude
}
}
let bytes = (hexToByteArray('7FFF3F6599341FE38C11036A608C60370DA561584699336CDB55CDB390F44BD4B5B9D5390E34BCE3'))
console.log(Decoder(bytes))
.as-console-wrapper { max-height: 100% !important; top: 0; }

Syntax Error: unexpected identifier - Binary algorithm code. Help me improve it

What I would like to do:
some how create a variable that represents the binary system more accurately because I have only included the first 12 numbers, but what if I need many of the higher numbers! It needs to be more generic.
What I have tried to do: push the binary numbers that go into n to a new array called one, then return the length of the array as the answer.
var countBits = function(n) {
Let binary = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048];
var one = [];
for( let i = 0; i < binary.length; i++) {
if( n = [i]) { one.push([i])},
if( n > [i]) { one.push([i])},
else { return "negative"};
return one.length;
};
Syntactic problem
Following my comment, here is the correction:
let instead of Let
remove the , on both if statements
a missing }
I would recommend you use some kind of editor to help tidy and validate your code.
It will save you a lot of time!
var countBits = function(n) {
let binary = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048];
var one = [];
for (let i = 0; i < binary.length; i++) {
if (n == [i]) {
one.push([i])
}
if (n > [i]) {
one.push([i])
} else {
return "negative"
}
}
return one.length;
};
Solving the actual bit-counter problem
Now onto the problem, you would like to:
count the number of set bits on the binary representation of a given number
We will run our loop as long as n is not positive or equal to zero. On each loop, we will check if n is odd in which case we will add 1 to our counter. Then we will divide n by 2.
const countBits = function(n) {
let count = 0;
while (n > 0) {
count += (n % 2 != 0); // add one to the counter if n is pair
n = Math.floor(n/2); // divide n by 2
}
return count;
}
You can do the exact same using binary operators: & (bitwise AND) and >> (right-shift). We will be working on n directly and counting down to determine the number of set bits.
Here are our two lines, now using & and >>:
On each loop, we will compare n with 1 bit-wise. Essentially n & 1 will equal to 1 if n is dividable by 2, else it will equal to 0.
Then we will shift n by one, i.e. move all bits to the right. n >>= 1 is the same as assigning the result of n >> 1 to n: n = n >> 1.
const countBits = function(n) {
let count = 0;
while (n > 0) {
count += n & 1;
n >>= 1;
}
return count;
}
More on bitwise operators.
One-liner
If you want to test your code out, here's a cheated version to compare your results to:
const countBits = function(dec) {
return (dec >>> 0).toString(2).split('').filter(Number).length;
}
It first converts the number to a string of its binary representation, then converts it to an array of 0s and 1s, remove the zeros with a filter, then counts the number of remaining elements.

Return a unique and sorted string from number

I have an input which is a number of base 10: 1, 2, 3, 52, 53 ...
I would like to convert the number to a string built with the letters a to z. For each number there should be only one letter combination and they should be sorted alphabetically.
0 => A
1 => B
2 => C
26 => AA
27 => AB
52 => BA
53 => BB
...
At the moment I build this snippet:
var possibleIndexes = "abcdefghijklmnopqrstuvwxyz".split("");
var result = '';
var index10 = 52;
var index26 = (index10).toString(26);
for (var i = 0; i < index26.length ; i++) {
result += possibleIndexes[(parseInt(index26[i], 26)) % 26];
}
console.log(result);
It's not so far from the correct answer but it's still wrong.
What would be the correct form of the function?
I generalized the solution and I provided some data for test:
function f(xx) {
var v = "abcdefghijklmnopqrstuvwxyz".split("");
var result = '';
var nr = Math.floor(xx/26);
var mod = xx%26;
for ( var j = 1, jj = 1 ; j <= nr ; j=Math.pow(26,jj)+1, jj++ ) {
result += v[(nr-1)%26];
}
result += v[mod];
return result;
}
/* USEFUL FOR TESTS */
var arr = [0, 1, 25, 26, 52, 53, 701, 702, 17601, 17602, 457001, 457002];
var expected = ['a','b','z','aa', 'ba', 'bb', 'zz', 'aaa', 'zzz', 'aaaa', 'zzzz', 'aaaaa'];
for ( var k = 0 ; k < arr.length ; k++ ) {
console.log(arr[k] + " --> " + f(arr[k]) + "-->" + expected[k]);
}
Idea:
Take into account that the number of possible solutions grow exponentially related to the length of the output string:
- A-Z +26^1 1-26
- AA-ZZ +26^2 27-702
- AAA-ZZZ +26^3 703-17602
- AAAA-ZZZZ +26^4 17603-457002
...
Details:
mod keeps the latest character for the output string
j grows exponentially ( 26^0, 26^1, 26^2 ... ) and it assures that result will have the suitable number of characters:
26^1+1 - 26^2 -> add one letter
26^2+1 - 26^3 -> add another letter
...
jj is used only as an exponent ( 1 - 26^1, 2 - 26^2, 3 - 26^3, ... )

Convert numbers to letters beyond the 26 character alphabet

I'm creating some client side functions for a mappable spreadsheet export feature.
I'm using jQuery to manage the sort order of the columns, but each column is ordered like an Excel spreadsheet i.e. a b c d e......x y z aa ab ac ad etc etc
How can I generate a number as a letter? Should I define a fixed array of values? Or is there a dynamic way to generate this?
I think you're looking for something like this
function colName(n) {
var ordA = 'a'.charCodeAt(0);
var ordZ = 'z'.charCodeAt(0);
var len = ordZ - ordA + 1;
var s = "";
while(n >= 0) {
s = String.fromCharCode(n % len + ordA) + s;
n = Math.floor(n / len) - 1;
}
return s;
}
// Example:
for(n = 0; n < 125; n++)
document.write(n + ":" + colName(n) + "<br>");
This is a very easy way:
function numberToLetters(num) {
let letters = ''
while (num >= 0) {
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[num % 26] + letters
num = Math.floor(num / 26) - 1
}
return letters
}
function getColumnDescription(i) {
const m = i % 26;
const c = String.fromCharCode(65 + m);
const r = i - m;
return r > 0
? `${getColumnDescription((r - 1) / 26)}${c}`
: `Column ${c}`
}
Usage:
getColumnDescription(15)
"Column P"
getColumnDescription(26)
"Column AA"
getColumnDescription(4460)
"Column FOO"
If you have your data in a two-dimensional array, e.g.
var data = [
['Day', 'score],
['Monday', 99],
];
you can map the rows/columns to spreadsheet cell numbers as follows (building on the code examples above):
function getSpreadSheetCellNumber(row, column) {
let result = '';
// Get spreadsheet column letter
let n = column;
while (n >= 0) {
result = String.fromCharCode(n % 26 + 65) + result;
n = Math.floor(n / 26) - 1;
}
// Get spreadsheet row number
result += `${row + 1}`;
return result;
};
E.g. the 'Day' value from data[0][0] would go in spreadsheet cell A1.
> getSpreadSheetCellNumber(0, 0)
> "A1"
This also works when you have 26+ columns:
> getSpreadSheetCellNumber(0, 26)
> "AA1"
You can use code like this, assuming that numbers contains the numbers of your columns. So after this code you'll get the string names for your columns:
var letters = ['a', 'b', 'c', ..., 'z'];
var numbers = [1, 2, 3, ...];
var columnNames = [];
for(var i=0;i<numbers.length;i++) {
var firstLetter = parseInt(i/letters.length) == 0 ? '' : letters[parseInt(i/letters.length)];
var secondLetter = letters[i%letters.length-1];
columnNames.push(firstLetter + secondLetter);
}
Simple recursive solution:
function numberToColumn(n) {
const res = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[n % 26];
return n >= 26 ? numberToColumn(Math.floor(n / 26) - 1) + res : res;
}
Here is an alternative approach that relies on .toString(26). It uses conversion to base-26 and then translates the characters so they are in the a..z range:
const conv = ((base, alpha) => { // Closure for preparing the function
const map = Object.fromEntries(Array.from(alpha, (c, i) => [c, alpha[i + 10]]));
return n => (n + base).toString(26).replace(/o*p/, "").replace(/./g, m => map[m]);
})(parseInt("ooooooooop0", 26), "0123456789abcdefghijklmnopqrstuvwxyz");
// Example:
for (let n = 0; n < 29; n++) console.log(n, conv(n));
console.log("...");
for (let n = 690; n < 705; n++) console.log(n, conv(n));
About the magical number
The magical value "ooooooooop0" is derived as follows:
It is a number expressed in radix 26, in the standard way, i.e. where the ten digits also play a role, and then the first letters of the alphabet.
The greatest "digit" in this radix 26 is "p" (the 16th letter of the Latin alphabet), and "o" is the second greatest.
The magical value is formed by a long enough series of the one-but-greatest digit, followed by the greatest digit and ended by a 0.
As JavaScript integer numbers max out around Number.MAX_SAFE_INTEGER (greater integers numbers would suffer from rounding errors), there is no need to have a longer series of "o" than was selected. We can see that Number.MAX_SAFE_INTEGER.toString(26) has 12 digits, so precision is ensured up to 11 digits in radix 26, meaning we need 9 "o".
This magical number ensures that if we add units to it (in radix 26), we will always have a representation which starts with a series of "o" and then a "p". That is because at some point the last digit will wrap around to 0 again, and the "p" will also wrap around to 0, bringing the preceding "o" to "p". And so we have this invariant that the number always starts with zero or more "o" and then a "p".
More generic
The above magic number could be derived via code, and we could make it more generic by providing the target alphabet. The length of that target alphabet then also directly determines the radix (i.e. the number of characters in that string).
Here is the same output generated as above, but with a more generic function:
function createConverter(targetDigits) {
const radix = targetDigits.length,
alpha = "0123456789abcdefghijklmnopqrstuvwxyz",
map = Object.fromEntries(Array.from(alpha,
(src, i) => [src, targetDigits[i]]
)),
base = parseInt((alpha[radix-1]+'0').padStart(
Number.MAX_SAFE_INTEGER.toString(radix).length - 1, alpha[radix-2]
), radix),
trimmer = RegExp("^" + alpha[radix-2] + "*" + alpha[radix-1]);
return n => (n + base).toString(radix)
.replace(trimmer, "")
.replace(/./g, m => map[m]);
}
// Example:
const conv = createConverter("abcdefghijklmnopqrstuvwxyz");
for (let n = 0; n < 29; n++) console.log(n, conv(n));
console.log("...");
for (let n = 690; n < 705; n++) console.log(n, conv(n));
This can now easily be adapted to use a more reduced target alphabet (like without the letters "l" and "o"), giving a radix of 24 instead of 26:
function createConverter(targetDigits) {
const radix = targetDigits.length,
alpha = "0123456789abcdefghijklmnopqrstuvwxyz",
map = Object.fromEntries(Array.from(alpha,
(src, i) => [src, targetDigits[i]]
)),
base = parseInt((alpha[radix-1]+'0').padStart(
Number.MAX_SAFE_INTEGER.toString(radix).length - 1, alpha[radix-2]
), radix),
trimmer = RegExp("^" + alpha[radix-2] + "*" + alpha[radix-1]);
return n => (n + base).toString(radix)
.replace(trimmer, "")
.replace(/./g, m => map[m]);
}
// Example without "l" and "o" in target alphabet:
const conv = createConverter("abcdefghijkmnpqrstuvwxyz");
for (let n = 0; n < 29; n++) console.log(n, conv(n));
console.log("...");
for (let n = 690; n < 705; n++) console.log(n, conv(n));
This covers the range from 1 to 1000. Beyond that I haven't checked.
function colToletters(num) {
let a = " ABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (num < 27) return a[num % a.length];
if (num > 26) {
num--;
let letters = ''
while (num >= 0) {
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[num % 26] + letters
num = Math.floor(num / 26) - 1
}
return letters;
}
}
I could be wrong but I've checked the other functions in this answer and they seem to fail at 26 which should be Z. Remember there are 26 letters in the alphabet not 25.

Categories