Call stack size within JavaScript - javascript

I want to test for large call stacks. Specifically, I want a console warning when the call stack length reaches 1000. This usually means I did something stupid, and can lead to subtle bugs.
Can I compute the call stack length within JavaScript?

Here's a function that will work in all major browsers, although it won't work in ECMAScript 5 strict mode because arguments.callee and caller have been removed in strict mode.
function getCallStackSize() {
var count = 0, fn = arguments.callee;
while ( (fn = fn.caller) ) {
count++;
}
return count;
}
Example:
function f() { g(); }
function g() { h(); }
function h() { alert(getCallStackSize()); }
f(); // Alerts 3
UPDATE 1 November 2011
In ES5 strict mode, there is simply no way to navigate the call stack. The only option left is to parse the string returned by new Error().stack, which is non-standard, not universally supported and obviously problematic, and even this may not be possible for ever.
UPDATE 13 August 2013
This method is also limited by the fact that a function that is called more than once in a single call stack (e.g. via recursion) will throw getCallStackSize() into an infinite loop (as pointed out by #Randomblue in the comments). An improved version of getCallStackSize() is below: it keeps track of functions it has seen before to avoid going into an infinite loop. However, the returned value is the number of different function objects in the callstack before encountering a repeat rather than the true size of the complete call stack. This is the best you can do, unfortunately.
var arrayContains = Array.prototype.indexOf ?
function(arr, val) {
return arr.indexOf(val) > -1;
} :
function(arr, val) {
for (var i = 0, len = arr.length; i < len; ++i) {
if (arr[i] === val) {
return true;
}
}
return false;
};
function getCallStackSize() {
var count = 0, fn = arguments.callee, functionsSeen = [fn];
while ( (fn = fn.caller) && !arrayContains(functionsSeen, fn) ) {
functionsSeen.push(fn);
count++;
}
return count;
}

You can use this module:
https://github.com/stacktracejs/stacktrace.js
Calling printStackTrace returns the stack trace inside an array, then you can check its length:
var trace = printStackTrace();
console.log(trace.length());

A different approach is measuring the available size on the stack in the top-level stack frame and then determining the used space on the stack by observing how much less space is available. In code:
function getRemainingStackSize()
{
var i = 0;
function stackSizeExplorer() {
i++;
stackSizeExplorer();
}
try {
stackSizeExplorer();
} catch (e) {
return i;
}
}
var baselineRemStackSize = getRemainingStackSize();
var largestSeenStackSize = 0;
function getStackSize()
{
var sz = baselineRemStackSize - getRemainingStackSize();
if (largestSeenStackSize < sz)
largestSeenStackSize = sz;
return sz;
}
For example:
function ackermann(m, n)
{
if (m == 0) {
console.log("Stack Size: " + getStackSize());
return n + 1;
}
if (n == 0)
return ackermann(m - 1, 1);
return ackermann(m - 1, ackermann(m, n-1));
}
function main()
{
var m, n;
for (var m = 0; m < 4; m++)
for (var n = 0; n < 5; n++)
console.log("A(" + m + ", " + n + ") = " + ackermann(m, n));
console.log("Deepest recursion: " + largestSeenStackSize + " (" +
(baselineRemStackSize-largestSeenStackSize) + " left)");
}
main();
There are of course two major downsides to this approach:
(1) determining the used up stack space is a potentially an expensive operation when the VM has a large stack size and
(2) the numbers reported are not necessarily the number of recursions, but instead are a measurement of the actual space used on the stack (of course, this can also be an advantage). I've seen auto-generated code that contains functions that use the same space on the stack per recursion as 2000 recursions of the stackSizeExplorer function above.
Note: I have only tested the code above with node.js. But I assume it would work with all VMs that use a static stack size.

Related

Higher-order function

I have an exercise about JavaScript. This exercise requires me to use higher-order functions. I have managed to specify some of the functions so far, but when I try to execute the code, the result does not seem to work properly. I have some images to give you an idea, hopefully, you can help me correct this.
The thread is: Write the function loop(loops, number, func), which runs the given function the given number of times. Also write the simple functions halve() and square().
This is my code:
function loop(loops, number, func) {
var loops = function(n) {
for (var i = 0; i < n; i++) {
if (i < 0) {
console.log('Programme ended')
}
if (i > 0) {
return n;
}
}
}
}
var halve = function(n) {
return n / 2
}
var square = function(n) {
return n ** 2;
}
console.log(halve(50));
console.log(loop(5, 200, halve));
console.log(loop(3, 5, square));
console.log(loop(-1, 99, halve));
Your current loop function declares an inner function and then exits. Ie, nothing actually happens -
function loop(loops,number,func){
// declare loops function
var loops= function(n){
// ...
}
// exit `loop` function
}
One such fix might be to run the supplied func a number of times in a for loop, like #code_monk suggest. Another option would be to use recursion -
function loop (count, input, func) {
if (count <= 0)
return input
else
return loop(count - 1, func(input), func)
}
function times10 (num) {
return num * 10
}
console.log(loop(3, 5, times10))
// 5000
so first things first: Higher-Order functions are functions that work on other functions.
The reason why you get undefined is because you are calling a function which doesn't return anything.
function x(parameter){
result = parameter + 1;
}
// -> returns undefined every time
console.log(x(5));
// -> undefined
function y(parameter){
return parameter+1;
}
// -> returns a value that can be used later, for example in console.log
console.log(y(5));
// -> 6
Second, you are using n for your for loop when you should probably use loops so it does the intended code as many times as "loops" indicates instead of the number you insert (i.e. 200, 5, 99).
By having the "console.log" inside a loop you may get a lot of undesired "programme ended" in your output so in my version I kept it out of the loop.
The other two answers given are pretty complete I believe but if you want to keep the for loop here goes:
function loop(loops, number, func){
if(loops>0){
for(let i = 0; i< loops; i++){ // let and const are the new ES6 bindings (instead of var)
number = func(number)
}
return number
}
else{
return "Programme ended"
}
}
function halve(n) { // maybe it's just me but using function declarations feels cleaner
return n / 2;
}
function square(n) {
return n ** 2;
}
console.log(halve(50));
console.log(loop(5, 200, halve));
console.log(loop(3, 5, square));
console.log(loop(-1, 99, halve));
Here's one way
const loop = (loops, n, fn) => {
for (let i=0; i<loops; i++) {
console.log( fn(n) );
}
};
const halve = (n) => {
return n / 2;
};
const square = (n) => {
return n ** 2;
};
loop(2,3,halve);
loop(4,5,square);

Javascript - For loop vs Linked List vs ES6 Set to find two matching integers

I have prepared 2 Javascript functions to find matching integer pairs that add up to a sum and returns a boolean.
The first function uses a binary search like that:
function find2PairsBySumLog(arr, sum) {
for (var i = 0; i < arr.length; i++) {
for (var x = i + 1; x < arr.length; x++) {
if (arr[i] + arr[x] == sum) {
return true;
}
}
}
return false;
}
For the second function I implemented my own singly Linked List, in where I add the complementary integer to the sum and search for the value in the Linked List. If value is found in the Linked List we know there is a match.
function find2PairsBySumLin(arr, sum) {
var complementList = new LinkedList();
for (var i = 0; i < arr.length; i++) {
if (complementList.find(arr[i])) {
return true;
} else {
complementList.add(sum - arr[i]);
}
}
return false;
}
When I run both functions I clearly see that the Linked List search executes ~75% faster
var arr = [9,2,4,1,3,2,2,8,1,1,6,1,2,8,7,8,2,9];
console.time('For loop search');
console.log(find2PairsBySumLog(arr, 18));
console.timeEnd(‘For loop search’);
console.time('Linked List search');
console.log(find2PairsBySumLin(arr, 18));
console.timeEnd('Linked List search');
true
For loop search: 4.590ms
true
Linked List search: 0.709ms
Here my question: Is the Linked List approach a real linear search? After all I loop through all the nodes, while my outer loop iterates through the initial array.
Here is my LinkedList search function:
LinkedList.prototype.find = function(data) {
var headNode = this.head;
if(headNode === null) {
return false;
}
while(headNode !== null) {
if(headNode.data === data) {
return true;
} else {
headNode = headNode.next;
}
}
return false;
}
UPDATE:
It was a good idea to go back and have another think of the problem based the comments so far.
Thanks to #nem035 comment on small datasets, I ran another test but this time with 100,000 integers between 1 and 8. I assigned 9 to the first and last position and searched for 18 to make sure the entire array will be searched.
I also included the relatively new ES6 Set function for comparison thanks to #Oriol.
Btw #Oriol and #Deepak you are right. The first function is not a binary search but rather a O(n*n) search, which has no logarithmic complexity.
It turns out my Linked List implementation was the slowest of all searches. I ran 10 iterations for each function individually. Here the result:
For loop search: 24.36 ms (avg)
Linked List search: 64328.98 ms (avg)
Set search: 35.63 ms (avg)
Here the same test for a dataset of 10,000,000 integers:
For loop search: 30.78 ms (avg)
Set search: 1557.98 ms (avg)
Summary:
So it seems the Linked List is really fast for smaller dataset up to ~1,000, while ES6 Set is great for larger datasets.
Nevertheless the For loop is the clear winner in all tests.
All 3 methods will scale linearly with the amount of data.
Please note: ES6 Set is not backward compatible with old browsers in case this operation has to be done client side.
Don't use this. Use a set.
function find2PairsBySum(arr, sum) {
var set = new Set();
for(var num of arr) {
if (set.has(num)) return true;
set.add(sum - num);
}
return false;
}
That's all. Both add and has are guaranteed to be sublinear (probably constant) in average.
You can optimize this substantially, by pre-sorting the array and then using a real binary search.
// Find an element in a sorted array.
function includesBinary(arr, elt) {
if (!arr.length) return false;
const middle = Math.floor(arr.length / 2);
switch (Math.sign(elt - arr[middle])) {
case -1: return includesBinary(arr.slice(0, middle - 1), elt);
case 0: return true;
case +1: return includesBinary(arr.slice(middle + 1), elt);
}
}
// Given an array, pre-sort and return a function to detect pairs adding up to a sum.
function makeFinder(arr) {
arr = arr.slice().sort((a, b) => a - b);
return function(sum) {
for (let i = 0; i < arr.length; i++) {
const remaining = sum - arr[i];
if (remaining < 0) return false;
if (includesBinary(arr, remaining)) return true;
}
return false;
};
}
// Test data: 100 random elements between 0 and 99.
const arr = Array.from(Array(100), _ => Math.floor(Math.random() * 100));
const finder = makeFinder(arr);
console.time('test');
for (let i = 0; i < 1000; i++) finder(100);
console.timeEnd('test');
According to this rough benchmark, one lookup into an array of 100 elements costs a few microseconds.
Rewriting includesBinary to avoid recursion would probably provide a further performance win.
first of all find2PairsBySumLog function is not a binary search, it's a kind of brute force method which parses all the elements of array and it's worst case time complexity should be O(n*n), and the second function is a linear search that' why you are getting the second method to run fastly, for the first function i.e. find2PairsBySumLog what you can do is initialize binary HashMap and check for every pair of integers in array kind of like you are doing in the second function probably like
bool isPairsPresent(int arr[], int arr_size, int sum)
{
int i, temp;
bool binMap[MAX] = {0};
for (i = 0; i < arr_size; i++)
{
temp = sum - arr[i];
if (temp >= 0 && binMap[temp] == 1)
return true;
binMap[arr[i]] = 1;
}
}

JavaScript - Different function-runtime processing the same data-structure - How is that possible?

My task is to write a test-function. So that the runtime of two different function-implementation (doing the same task) can be compared. Task is to change a dash-separated string to camel case notation. But that's secondary here.
I guess I should show the whole test-setup first:
// The array with the test-data. I have shorten it a lot.
// The original-array, used for the test, is much larger.
var test = ["Alpha-North-blue-teal-West-pink-crimson-Delta",
"crimson-Gamma-blue-Delta",
"white-cyan-South-blue-East-East-South-blue",
"teal-black-East-East",
"South-black",
"black-cyan",
"West-white-Beta-Gamma-red-North-Alpha-Beta",
"Gamma-North-West-lime-North-crimson-North",
"blue-red-orange",
"red-West-South"];
// -- Implementation 1 ----------
function dashedToCamelCase( dashed ) {
var ret;
var parts;
if (typeof dashed !== 'string' || !dashed) {
return '';
}
parts = dashed.split('-');
ret = parts[0].toLowerCase();
parts = parts.slice(1);
ret = parts.reduce(function(previous, current) {
return previous +
current.slice(0, 1).toUpperCase() +
current.slice(1).toLowerCase();
}, ret);
return ret;
}
// -- Implementation 2 ----------
function dashedToCamelCase2( dashed ) {
if( typeof dashed != "string" || dashed.length==0 )
return "";
return dashed.toLowerCase().replace(/\-([a-z]?)/g, function(match, letter) {
return letter.toUpperCase();
});
}
function getRuntime(testData, func, countRunningTests) {
var i;
var tmp = 0;
var sum = 0;
var min = 0;
var max = 0;
var ret = {};
var getRuntimeSingleTest = function() {
var start = Date.now();
testData.forEach( function( item ) {
func(item);
});
return (Date.now() - start);
}
for (i = 1; i <= countRunningTests; i++) {
tmp = getRuntimeSingleTest( testData, func );
sum += tmp;
if (min === 0 || tmp < min) {
min = tmp;
} else if (tmp > max) {
max = tmp;
}
}
ret.averageRuntime = sum / countRunningTests;
ret.minimalRuntime = min;
ret.maximalRuntime = max;
return ret;
}
function displayResults( results, funcName ) {
funcName = funcName || '';
console.log('\n%s', funcName);
for ( var i in results ) {
console.log('%s : %s ms', i, results[i]);
}
}
displayResults(getRuntime(test, dashedToCamelCase, 100), ' - Implementation using reduce() - ');
displayResults(getRuntime(test, dashedToCamelCase2, 100), ' - Implementation using replace() - ');
What I don't understand:
I let the functions process the whole string-array many times. The results for the minimum and the maximum runtime differ a lot. With a lot I mean the maximal-runtime is six or seven times higher then the minimal-runtime.
The average-runtime also differs a lot. But not multiple times.
How is that possible?
It's always the same data which are used. The results should be at least similar.
Test have been runned on a computer with Windows 7 and Internet Explorer 11.
CPU: Intel i5-3230M 2.60 GHz
RAM: 8 GB
UPDATE
Like in the accepted answer suggested I increased in count of test-runs.
What I can say now is that the more test-runs used the more stable becomes the average-runtime.
So it's likely as supposed: The variations in the minimum- and maximum-results are an result of other processes which demand CPU-time.
Run time depends on running processes on your computer. So if your antivirus starts to do some stuff, than your browser process must wait.
Better run these tests like 100000 times same function to calculate average.

add a memoization to recursive algorithm

I have written a function for partitioning a number:
var combinations = function (i) {
var mem = [];
function inner(n, r, m) {
for (var k = m; k <= n; k++) {
if (k == n) {
r.push(k);
mem[r] = 1;
return mem;
}
else {
var copy = r.slice(0);
copy.push(k);
inner(n - k, copy, k);
}
}
}
return inner(i, [], 1);
}
In second step I would like to add a memoization to this algorithm, but can't think of implementing it the right way, beause there is no return statement until the very end (when return is specified e.g. in faactorial or fibbinacci I can add the memoization).
Can anybody drive me to the right direction?
[edit]
I need this algorithm to be as fast as possible. This is a competition for a kata at codewars: link
There is a requirement it must be executed under 6000ms for input up to 330.
That's the best algorithm I can think of, except how to store the partial results.
Here's a much simpler code that works:
function nr_partitions(n) { return p(n, n); }
function p(sum,largest) {
if (largest == 0) { return 0; }
if (sum == 0) { return 1; }
if (sum < 0) { return 0; }
return p(sum, largest-1) + p(sum-largest, largest);
}
It uses a well-known recurrence, p(n,k) = p(n,k-1) + p(n-k, k), where p(n.k) denotes the number of partitions of n where the largest part is at most k (e.g. p(3, 2)=2 because we only count 1+1+1,1+2, but not 3). For k=n we get the number of all partitions of n.
Adding memozation involves storing dictionary mapping pairs (sum, largest) to p(sum, largest).
I would go along the lines of:
var cache = {};
var combinations = function (i) {
if ( cache[i] ){
return cache[i];
};
var mem = [];
function inner(n, r, m) {
for (var k = m; k <= n; k++) {
if (k == n) {
r.push(k);
mem[r] = 1;
return mem;
}
else {
var copy = r.slice(0);
copy.push(k);
inner(n - k, copy, k);
}
}
}
cache[i] = inner(i, [], 1);
return cache[i];
}
But you'll have to modify your algorithm to make use of that cache (compute the biggest terms first ?)
Depending on your other requirements, you might want to consider using underscore.js which has its own _.memoize function.
The secret of memoization is that it exploits how closures work. When you define a function inside another scope, it has access to everything in that scope. When you return that function to somewhere outside the scope, it carries references to everything it can see inside the scope.
So to implement memorization, you need to make a function that returns another function, one that does the memorization check before calling the inner one.
Your code will look something like this:
/**
* Because we'll be returning "a function that returns a function" below,
* this needs to be executed immediately so combinations() is just
* a standalone function.
*/
var combinations = (function(i) {
/**
* mem needs to stay outside the scope of your inner function.
* If it's in a closure like this, JavaScript will keep its value
* around as long as combinations still exists.
*/
var mem = [];
/**
* A memoization wrapper should return a memoized function
*/
return function(i) {
/**
* Check if mem[i] is set and return it if it has been
*/
if(mem[i] !== undefined) {
console.log('returning memoized value');
return mem[i];
}
function inner(n, r, m) {
for (var k = m; k <= n; k++) {
if (k == n) {
r.push(k);
mem[r] = 1;
return mem;
}
else {
var copy = r.slice(0);
copy.push(k);
inner(n - k, copy, k);
}
}
}
/**
* If the value needs to be computed, we can set it at the same time
* as we return it instead of putting it in a temporary variable.
*/
console.log('computed');
return mem[i] = inner(i, [], 1);
}
}()); /** <--- That's the rest of the automatic execution */
console.log(combinations(5));
console.log(combinations(5));

Fast nextafter function in JavaScript

I'm trying to iterate through all 32 bit floating point numbers in JavaScript to visually compare some methods for polynomial evaluation for accuracy. To do so, I've implemented the code shown below. Unfortunately, this code is way too slow.
Would there be any way to improve performance?
In C/C++ the equivalent code runs in a bit over a minute on my computer, whereas I haven't had the patience to see how long this code takes.
function nextFloat(f) {
// Note that this moves away from 0.0
// It will fail at +/- infinity and result in an NaN
var bitRepr = floatToBits(f);
bitRepr++;
return bitsToFloat(bitRepr);
}
function prevFloat(f) {
// Note that this moves towards 0.0
// This will fail at 0.0 and result in an NaN
var bitRepr = floatToBits(f);
bitRepr--;
return bitsToFloat(bitRepr);
}
function floatToBits(f) {
var buf = new ArrayBuffer(4);
(new Float32Array(buf))[0] = f;
return (new Uint32Array(buf))[0];
}
function bitsToFloat(b) {
var buf = new ArrayBuffer(4);
(new Uint32Array(buf))[0] = b;
return (new Float32Array(buf))[0];
}
Another method I might consider is using is multiplying the number by (1 + epsilon), though I believe that has edge cases that I would need to resolve at the bit level anyways.
If your code is synchronous you don't need to be calling new all the time, this means that you can keep your Uint32Array and Float32Array which are linked through the same buffer across all functions, for example
var obj = (function () {
var int = new Uint32Array(1),
float = new Float32Array(int.buffer);
return {
i2f: function (i) {
int[0] = i;
return float[0];
},
f2i: function (f) {
float[0] = f;
return int[0];
},
next: function () {
int[0] = int[0] + 1;
return float[0];
},
prev: function () {
int[0] = int[0] - 1;
return float[0];
}
};
}());
Something like this should work, and doesn't require allocating arrays:
function testall(f) {
var M = Math.pow(2,-126);
var x;
for (p = -1; p <= 1; p +=2) {
for (s = 0; s < 1<<23; s++) {
// subnormals (including zeros)
x = p*M*(s/(1<<23));
f(x);
}
for (b = M; b <= 2/M; b *= 2) {
for (s = 0; s < 1<<23; s++) {
// normals
x = p*b*(1+s/(1<<23));
f(x);
}
}
}
}
This will iterate through all real-valued floats (subnormals and normals). It won't handle Infs (there are only two of those, so I leave them up to you), or NaNs (as far as I know, there is no efficient way to iterate through all the NaN bit patterns in JavaScript).

Categories