How do I optimise this javascript function for speed? - javascript

I just took the Codility tape equilibrium test here
As you can see from my score I didn't get good enough on how fast the function executes. Can anybody give me some pointers so I can optimise this code and get closer to 100%?
Here is the code...
function solution(A) {
var minimumAbsDiff = null;
for(var currentIndex =1;currentIndex < A.length;currentIndex ++){
var bottomHalf = getTotal(0,currentIndex-1,A);
var topHalf = getTotal(currentIndex,A.length-1,A);
var absDiff = Math.abs(bottomHalf - topHalf);
if(minimumAbsDiff == null){
minimumAbsDiff = absDiff;
}else{
if(absDiff < minimumAbsDiff) minimumAbsDiff = absDiff;
}
}
return minimumAbsDiff;
}
function getTotal(start,end,arrayTocheck){
var total = 0;
for(var currentIndex = start;currentIndex <= end;currentIndex++){
total = total + arrayTocheck[currentIndex];
}
return total;
}

You don't want to optimise speed. You want to lower the algorithmic complexity. Your current algorithm is O(n²), while the taks description explicitly stated that
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage
(not counting the storage required for input arguments).
So what's the insight to make that possible? Each total difference is only a small distance from the others for P. If you compare the value |(A[0] + ... + A[P-1]) - (A[P] + ... + A[N-1])| for P and P+1, there is only a constant amount of work difference to be done.
function solution(A) {
var left = 0,
right = A.reduce(function(a, b) { return a + b; }, 0);
var min = Infinity;
for (var p = 0; p<A.length-1; p++) {
left += A[p];
right -= A[p];
min = Math.min(min, Math.abs(left - right));
}
return min;
}

Related

Finding total number of maximum number in array fail only for big data

I doing some Hackerrank challange to improve my problem solving skills, so one of the challanges was about finding the total maximum numbers from an array of numbers. For example if we have 3 2 1 3 1 3 it should return 3
This is what I did :
function birthdayCakeCandles(ar) {
let total= 0
let sortedArray = ar.sort((cur,next)=>{
return cur<next
})
ar.map(item => {
if(item===sortedArray[0]) {
total ++;
}
})
return total
}
So I sorted the given array and then map through the array and check how many of the numbers are equal to the maximum number in that array and count the total.
This will pass 8/9 test cases, one of the test cases, have a array with length of 100000 and for this one it failed, this is the given data for this test case.
Really can't get it why it fails in this test, is it possible that this happened because of JavaScript which is always synchronous and single-threaded?
I tried to use Promise and async await, but hackerrank will consider the first return as the output ( Which is the Promise itself ) and it not use the resolve value as a output, so can't really test this.
Is it something wrong with my logic?
The sorting approach is too slow (O(n log n) time complexity). For algorithmic challenges on HR, it's unlikely that features somewhat particular to your language choice like promises/async are going to rescue you.
You can do this in one pass using an object to keep track of how many times you've "seen" each number and the array's maximum number, then simply index into the object to get your answer:
function birthdayCakeCandles(ar) {
let best = -Infinity;
const seen = {};
for (let i = 0; i < ar.length; i++) {
if (ar[i] > best) {
best = ar[i];
}
seen[ar[i]] = ++seen[ar[i]] || 1;
}
return seen[best];
}
Time and space complexity: O(n).
Edit:
This answer is even better, with constant space (here it is in JS):
function birthdayCakeCandles(ar) {
let best = -Infinity;
let count = 0;
for (const n of ar) {
if (n > best) {
best = n;
count = 1;
}
else if (n === best) {
count++;
}
}
return count;
}
In your case, the build in function sort is using the resource heavily. Maybe that's the reason it is failing for a space/time complexity.
BTW, This problem can be solved easily using a for loop. The idea is
Pseudocode
var maxNum = -999999; // put here the highest limit of int or what ever data type
int count = 0;
for(x in arr)
{
if (x > maxNum)
{
maxNum = x;
count = 1;
}
if(x==maxNum) count ++;
}
Here count will be the output.
The full code is
function birthdayCakeCandles(ar) {
var maxNum = -1;
var count = 0;
for(var i=0; i< ar.length; i++){
var x = ar[i];
if(x<maxNum) continue;
if(x>maxNum){
maxNum = x;
count = 1;
}
else{
count++;
}
}
return count;
}

JavaScript: Faster roulette selection

I am implementing a selection algorithm that selects an object based on a probability proportional to its score value. This makes higher-scoring objects more likely to be chosen.
My implementation is as follows:
var pool = [];
for (var i = 0; i < 100; i++)
pool.push({ Id: i, Score: Math.random() * 100 << 0 });
const NUM_RUNS = 100000;
var start = new Date();
for( var i = 0; i < NUM_RUNS; i++ )
rouletteSelection(pool);
var end = new Date();
var runningTime = (end.getTime() - start.getTime()) / 1000;
var avgExecutionTime = ( runningTime / NUM_RUNS ) * Math.pow(10,9);
console.log('Running Time: ' + runningTime + ' seconds');
console.log('Avg. Execution Time: ' + avgExecutionTime + ' nanoseconds');
function rouletteSelection(pool) {
// Sum all scores and normalize by shifting range to minimum of 0
var sumScore = 0, lowestScore = 0;
pool.forEach((obj) => {
sumScore += obj.Score;
if (obj.Score < lowestScore)
lowestScore = obj.Score;
})
sumScore += Math.abs(lowestScore * pool.length);
var rouletteSum = 0;
var random = Math.random() * sumScore << 0;
for (var i in pool) {
rouletteSum += pool[i].Score + lowestScore;
if (random < rouletteSum)
return pool[i];
}
//Failsafe
console.warn('Could not choose roulette winner, selecting random');
return pool[Math.random() * pool.length];
};
When run, the performance isn't bad: on my machine, each call to rouleteSelection() takes about 2500-3200 nanoseconds. However, before being used in production, I want to optimize this and shave off as much overhead as I can, as this function could be potentially called tens of millions of times in extreme cases.
An obvious optimization would be to somehow merge everything into a single loop so the object array is only iterated over once. The problem is, in order to normalize the scores (negative scores are shifted to 0), I need to know the lowest score to begin with.
Does anyone have any idea as to how to get around this?
At the risk of stating the obvious: just don't do the normalisation in every call to rouletteSelection. Do it once, after you constructed the pool.

v8 JavaScript performance implications of const, let, and var?

Regardless of functional differences, does using the new keywords 'let' and 'const' have any generalized or specific impact on performance relative to 'var'?
After running the program:
function timeit(f, N, S) {
var start, timeTaken;
var stats = {min: 1e50, max: 0, N: 0, sum: 0, sqsum: 0};
var i;
for (i = 0; i < S; ++i) {
start = Date.now();
f(N);
timeTaken = Date.now() - start;
stats.min = Math.min(timeTaken, stats.min);
stats.max = Math.max(timeTaken, stats.max);
stats.sum += timeTaken;
stats.sqsum += timeTaken * timeTaken;
stats.N++
}
var mean = stats.sum / stats.N;
var sqmean = stats.sqsum / stats.N;
return {min: stats.min, max: stats.max, mean: mean, spread: Math.sqrt(sqmean - mean * mean)};
}
var variable1 = 10;
var variable2 = 10;
var variable3 = 10;
var variable4 = 10;
var variable5 = 10;
var variable6 = 10;
var variable7 = 10;
var variable8 = 10;
var variable9 = 10;
var variable10 = 10;
function varAccess(N) {
var i, sum;
for (i = 0; i < N; ++i) {
sum += variable1;
sum += variable2;
sum += variable3;
sum += variable4;
sum += variable5;
sum += variable6;
sum += variable7;
sum += variable8;
sum += variable9;
sum += variable10;
}
return sum;
}
const constant1 = 10;
const constant2 = 10;
const constant3 = 10;
const constant4 = 10;
const constant5 = 10;
const constant6 = 10;
const constant7 = 10;
const constant8 = 10;
const constant9 = 10;
const constant10 = 10;
function constAccess(N) {
var i, sum;
for (i = 0; i < N; ++i) {
sum += constant1;
sum += constant2;
sum += constant3;
sum += constant4;
sum += constant5;
sum += constant6;
sum += constant7;
sum += constant8;
sum += constant9;
sum += constant10;
}
return sum;
}
function control(N) {
var i, sum;
for (i = 0; i < N; ++i) {
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
sum += 10;
}
return sum;
}
console.log("ctl = " + JSON.stringify(timeit(control, 10000000, 50)));
console.log("con = " + JSON.stringify(timeit(constAccess, 10000000, 50)));
console.log("var = " + JSON.stringify(timeit(varAccess, 10000000, 50)));
.. My results were the following:
ctl = {"min":101,"max":117,"mean":108.34,"spread":4.145407097016924}
con = {"min":107,"max":572,"mean":435.7,"spread":169.4998820058587}
var = {"min":103,"max":608,"mean":439.82,"spread":176.44417700791374}
However discussion as noted here seems to indicate a real potential for performance differences under certain scenarios: https://esdiscuss.org/topic/performance-concern-with-let-const
TL;DR
In theory, an unoptimized version of this loop:
for (let i = 0; i < 500; ++i) {
doSomethingWith(i);
}
might be slower than an unoptimized version of the same loop with var:
for (var i = 0; i < 500; ++i) {
doSomethingWith(i);
}
because a different i variable is created for each loop iteration with let, whereas there's only one i with var.
Arguing against that is the fact the var is hoisted so it's declared outside the loop whereas the let is only declared within the loop, which may offer an optimization advantage.
In practice, here in 2018, modern JavaScript engines do enough introspection of the loop to know when it can optimize that difference away. (Even before then, odds are your loop was doing enough work that the additional let-related overhead was washed out anyway. But now you don't even have to worry about it.)
Beware synthetic benchmarks as they are extremely easy to get wrong, and trigger JavaScript engine optimizers in ways that real code doesn't (both good and bad ways). However, if you want a synthetic benchmark, here's one:
const now = typeof performance === "object" && performance.now
? performance.now.bind(performance)
: Date.now.bind(Date);
const btn = document.getElementById("btn");
btn.addEventListener("click", function() {
btn.disabled = true;
runTest();
});
const maxTests = 100;
const loopLimit = 50000000;
const expectedX = 1249999975000000;
function runTest(index = 1, results = {usingVar: 0, usingLet: 0}) {
console.log(`Running Test #${index} of ${maxTests}`);
setTimeout(() => {
const varTime = usingVar();
const letTime = usingLet();
results.usingVar += varTime;
results.usingLet += letTime;
console.log(`Test ${index}: var = ${varTime}ms, let = ${letTime}ms`);
++index;
if (index <= maxTests) {
setTimeout(() => runTest(index, results), 0);
} else {
console.log(`Average time with var: ${(results.usingVar / maxTests).toFixed(2)}ms`);
console.log(`Average time with let: ${(results.usingLet / maxTests).toFixed(2)}ms`);
btn.disabled = false;
}
}, 0);
}
function usingVar() {
const start = now();
let x = 0;
for (var i = 0; i < loopLimit; i++) {
x += i;
}
if (x !== expectedX) {
throw new Error("Error in test");
}
return now() - start;
}
function usingLet() {
const start = now();
let x = 0;
for (let i = 0; i < loopLimit; i++) {
x += i;
}
if (x !== expectedX) {
throw new Error("Error in test");
}
return now() - start;
}
<input id="btn" type="button" value="Start">
It says that there's no significant difference in that synthetic test on either V8/Chrome or SpiderMonkey/Firefox. (Repeated tests in both browsers have one winning, or the other winning, and in both cases within a margin of error.) But again, it's a synthetic benchmark, not your code. Worry about the performance of your code when and if your code has a performance problem.
As a style matter, I prefer let for the scoping benefit and the closure-in-loops benefit if I use the loop variable in a closure.
Details
The important difference between var and let in a for loop is that a different i is created for each iteration; it addresses the classic "closures in loop" problem:
function usingVar() {
for (var i = 0; i < 3; ++i) {
setTimeout(function() {
console.log("var's i: " + i);
}, 0);
}
}
function usingLet() {
for (let i = 0; i < 3; ++i) {
setTimeout(function() {
console.log("let's i: " + i);
}, 0);
}
}
usingVar();
setTimeout(usingLet, 20);
Creating the new EnvironmentRecord for each loop body (spec link) is work, and work takes time, which is why in theory the let version is slower than the var version.
But the difference only matters if you create a function (closure) within the loop that uses i, as I did in that runnable snippet example above. Otherwise, the distinction can't be observed and can be optimized away.
Here in 2018, it looks like V8 (and SpiderMonkey in Firefox) is doing sufficient introspection that there's no performance cost in a loop that doesn't make use of let's variable-per-iteration semantics. See this test.
In some cases, const may well provide an opportunity for optimization that var wouldn't, especially for global variables.
The problem with a global variable is that it's, well, global; any code anywhere could access it. So if you declare a variable with var that you never intend to change (and never do change in your code), the engine can't assume it's never going to change as the result of code loaded later or similar.
With const, though, you're explicitly telling the engine that the value cannot change¹. So it's free to do any optimization it wants, including emitting a literal instead of a variable reference to code using it, knowing that the values cannot be changed.
¹ Remember that with objects, the value is a reference to the object, not the object itself. So with const o = {}, you could change the state of the object (o.answer = 42), but you can't make o point to a new object (because that would require changing the object reference it contains).
When using let or const in other var-like situations, they're not likely to have different performance. This function should have exactly the same performance whether you use var or let, for instance:
function foo() {
var i = 0;
while (Math.random() < 0.5) {
++i;
}
return i;
}
It's all, of course, unlikely to matter and something to worry about only if and when there's a real problem to solve.
"LET" IS BETTER IN LOOP DECLARATIONS
With a simple test (5 times) in navigator like that:
// WITH VAR
console.time("var-time")
for(var i = 0; i < 500000; i++){}
console.timeEnd("var-time")
The mean time to execute is more than 2.5ms
// WITH LET
console.time("let-time")
for(let i = 0; i < 500000; i++){}
console.timeEnd("let-time")
The mean time to execute is more than 1.5ms
I found that loop time with let is better.
T.J. Crowder's answer is so excellent.
Here is an addition of: "When would I get the most bang for my buck on editing existing var declarations to const ?"
I've found that the most performance boost had to do with "exported" functions.
So if file A, B, R, and Z are calling on a "utility" function in file U that is commonly used through your app, then switching that utility function over to "const" and the parent file reference to a const can eak out some improved performance. It seemed for me that it wasn't measurably faster, but the overall memory consumption was reduced by about 1-3% for my grossly monolithic Frankenstein-ed app. Which if you're spending bags of cash on the cloud or your baremetal server, could be a good reason to spend 30 minutes to comb through and update some of those var declarations to const.
I realize that if you read into how const, var, and let work under the covers you probably already concluded the above... but in case you "glanced" over it :D.
From what I remember of the benchmarking on node v8.12.0 when I was making the update, my app went from idle consumption of ~240MB RAM to ~233MB RAM.
T.J. Crowder's answer is very good but :
'let' is made to make code more readable, not more powerful
by theory let will be slower than var
by practice the compiler can not solve completely (static analysis) an uncompleted program so sometime it will miss the optimization
in any-case using 'let' will require more CPU for introspection, the bench must be started when google v8 starts to parse
if introspection fails 'let' will push hard on the V8 garbage collector, it will require more iteration to free/reuse. it will also consume more RAM. the bench must take these points into account
Google Closure will transform let in var...
The effect of the performance gape between var and let can be seen in real-life complete program and not on a single basic loop.
Anyway, to use let where you don't have to, makes your code less readable.
Just did some more tests, Initially I concluded that there is a substantial difference in favor of var. My results initially showed that between Const / Let / Var there was a ratio from 4 / 4 / 1 to 3 / 3 / 1 in execution time.
After Edit in 29/01/2022 (according to jmrk's remark to remove global variables in let and const tests) now results seem similar 1 / 1 / 1.
I give the code used below. Just let me mention that I started from the code of AMN and did lots of tweaking, and editing.
I did the tests both in w3schools_tryit editor and in Google_scripts
My Notes:
In GoogleScripts there seems that the 1st test ALWAYS takes longer, no-matter which one, especially for reps<5.000.000 and before separating them in individual functions
For Reps < 5.000.000 JS engine optimizations are all that matters, results go up and down without safe conclusions
GoogleScripts constantly does ~1.5x time longer, I think it is expected
There was a BIG difference when all tests where separated in individual functions, execution speed was at-least doubled and 1st test's delay almost vanished!
Please don't judge the code, I did try but don't pretend to be any expert in JS.
I would be delighted to see your tests and opinions.
function mytests(){
var start = 0;
var tm1=" Const: ", tm2=" Let: ", tm3=" Var: ";
start = Date.now();
tstLet();
tm2 += Date.now() - start;
start = Date.now();
tstVar();
tm3 += Date.now() - start;
start = Date.now();
tstConst();
tm1 += (Date.now() - start);
var result = "TIMERS:" + tm1 + tm2 + tm3;
console.log(result);
return result;
}
// with VAR
function tstVar(){
var lmtUp = 50000000;
var i=0;
var item = 2;
var sum = 0;
for(i = 0; i < lmtUp; i++){sum += item;}
item = sum / 1000;
}
// with LET
function tstLet(){
let lmtUp = 50000000;
let j=0;
let item = 2;
let sum=0;
for( j = 0; j < lmtUp; j++){sum += item;}
item = sum/1000;
}
// with CONST
function tstConst(){
const lmtUp = 50000000;
var k=0;
const item = 2;
var sum=0;
for( k = 0; k < lmtUp; k++){sum += item;}
k = sum / 1000;
}
code with 'let' will be more optimized than 'var' as variables declared with var do not get cleared when the scope expires but variables declared with let does. so var uses more space as it makes different versions when used in a loop.

JavaScript - Large Index Values Lead to Infinite Loops - Not Sure Why?

I'm working on a Project Euler problem (#3) - Largest Prime Factor.
The code I have written seems to work for smaller numbers but not for bigger numbers (see 2nd invocation of the function). If I try that, it goes into an infinite loop and I can't figure out why. I've tried JSBin to test and it does the same thing ('potential infinite loop') so I have no idea how to debug this issue.
Would appreciate help with this.
Here's my code:
//Prime Checker
var isPrime = function (num) {
for(var i = 2; i < num; i++) {
if(num % i === 0) {
return false;
}
}
return true;
}; //end function
//Largest Prime Factor
var primeFactor = function (num) {
var result = 0;
var temp = 0;
var primeArr = [];
for (var i = 2; i <= num; i++) {
if (num % i === 0) {
temp = i;
if (isPrime(temp)) {
primeArr.push(temp);
}
}
}
console.log("primeArr: " + primeArr);
//sort
primeArr.sort(function(a,b) {
return b - a;
});
result = parseInt(primeArr[0]);
console.log("result: " + result);
return result;
}; //end function
primeFactor(13195); //WORKS FINE
primeFactor(600851475143); //CAUSES INFINITE LOOP
As mentioned in one of the comments, if JavaScript runs out of memory in a math equation it can sometimes lead to infinite loops through things such as memory links. If you need a solution, there are some JavaScript libraries such as big.js that handle large math equations, however I don't know if it will be able to help this. The only way to find out is to try. Hope this helps.

dygraph rolling average for data with holes

Dygraphs options provide 'rollPeriod' to support rolling averages and 'stepPlot' to support step plots. When set together when some data is missing in between, they give very unexpected results. For example, attached image link shows graph for original data (rollPeriod=1) and rollPeriod=5. (http://imgur.com/a/9ajh8)
At 40,000 for example, the rolling average must be zero. But, dygraphs takes average of last 5 datapoints instead of last 5 seconds.
Is it possible to get rolling average that maintains notion of time rather than data points. Thanks in advance !
PS- Sorry for image link. SO won't allow me to directly post images due to lack of reputation. :(
As you noticed, dygraphs averages the last five data points, not the last five seconds. This is all it can do, since it doesn't know the cadence of your data. Fortunately, you can fix this by adding explicit missing values:
Datetime,Value
2014-08-01 12:34:55,0
2014-08-01 12:34:56,
2014-08-01 12:34:57,0
2014-08-01 12:34:58,
2014-08-01 12:34:59,0
The zeros are values, the blanks are missing values.
See http://dygraphs.com/data.html for more information, or one of these two demos for examples.
Due to lack of this functionality, I implemented it by myself. I am putting the code here for someone in similar situation. Code uses internal function extractSeries_ in dygraph library and Queue.js. Use with extreme caution !
function calcAvg_(minDate, maxDate, dispData){
var windowSize = Math.round((maxDate-minDate)/100);
if(windowSize <= 1){
return dispData;
}
var energy = 0;
var lastS = new Queue();
var series = dispData;
var lastAvg = 0;
// Initially lastS elements are all 0
// lastS shall always be maintained of windowSize.
// Every enqueue after initialization in lastS shall be matched by a dequeue
for(j=0; j<windowSize; j++){
lastS.enqueue(0);
}
var avg_series = [];
var prevTime = minDate - windowSize;
var prevVal = 0;
avg_series.push([prevTime, prevVal]);
//console.log( "calcAvg_ min: " + minDate + " max: " + maxDate + " win: " + windowSize );
for(j=0; j<series.length; j++){
var time = series[j][0];
var value = series[j].slice(1);
if(time > minDate){
var k = 0;
while(k < windowSize && prevTime + k < time){
var tail = lastS.dequeue();
lastS.enqueue(prevVal);
lastAvg = lastAvg + (prevVal - tail)/windowSize;
avg_series.push([prevTime+k, lastAvg]);
k++;
}
}
prevTime = time;
prevVal = value;
if(time > maxDate){
break;
}
}
if(j == series.length){
//console.log("Fix last value");
var k = 0;
while(k < windowSize && prevTime + k < maxDate){
var tail = lastS.dequeue();
lastS.enqueue(prevVal);
lastAvg = lastAvg + (prevVal - tail)/windowSize;
avg_series.push([prevTime+k, lastAvg]);
k++;
}
}
//console.log(avg_series);
avg_series.push([maxDate, 0]);
return avg_series;
}
var blockRedraw = false;
myDrawCallback_ = function(gs, initial) {
if (blockRedraw) return;
blockRedraw = true;
var range = gs.xAxisRange();
var yrange = gs.yAxisRange();
var series = calcAvg_(range[0], range[1],
gs.extractSeries_(gs.rawData_, 0, false));
gs.updateOptions( {
dateWindow: range,
valueRange: yrange,
file: series } );
blockRedraw = false;
}

Categories