How to test two parameters to be numbers with Jest - javascript

I got a function which receives two numbers (integer), returning an array with the firsts (numbers) multiple of (value) like countBy(value, number).
e.g.
countBy(1, 10) returns [1, 2, 3, 4, 5, 5, 7, 8, 9, 10]
countyBy(2, 5) returns [2, 4, 6, 8, 10]
So, what matcher should be used to test if the function receives only numbers (integer) ?
I did a lot of tests but still did not find a solution.
it ('Verify if are received and returned only numbers', () => {
expect(typeof countBy(2, 5)).toBe('number');
});
Does anybody can give me a light?

try this
const targetArray = [1, 2, 3, 4, 5, 5, 7, 8, 9, 10];
test("Verify if are received and returned only numbers", () => {
targetArray.forEach((target) => {
expect(typeof target).toBe("number");
});
});

Related

Javascript Get the common elements of three arrays

I am trying to filter the common elements of 3 arrays. But instead of getting the common elements of 3 arrays, it only reads the 2 arrays and not the 3rd array. Here is my code, thank you:
function commonElementsOfArray(arr1, arr2, arr3) {
return arr1.filter(function (n) {
return arr2.indexOf(n) !== -1;
return arr3.indexOf(n) !== -1;
});
}
As mentioned by #Titus, the issue in your code is the double return statements - once the first return is found the filter function will exit.
However, there is also an issue worth pointing out in your approach to finding common elements regarding Array.indexOf. The problem is that Array.indexOf is an O(n) operation, meaning the parameter will be checked against every element of arr2 and every element of arr3. On face value that sounds like the right approach, but if the arrays are large then this will be a very slow function. For instance, if each array has 1,000 entries (n) then your function will take each element and compare against everything in arr2 and arr3, resulting in thousands of operations per element (O(n^2) time complexity).
One alternative is to create a Map and populate it as you iterate through each array to track the number of times an entry has been seen. Finding values now has O(1) runtime. There is still the cost of iterating through each array which yields O(n) but because of the fast lookup this becomes n * 1 operations or O(n) time complexity.
function commonElementsOfArray(...arrays) {
const size = arrays.length;
const map = new Map();
arrays.forEach(arr => {
arr.forEach(entry => {
if (!map.has(entry)) {
map.set(entry, 1);
} else {
let timesSeen = map.get(entry);
map.set(entry, ++timesSeen);
}
});
});
const commonElements = [];
map.forEach((count, key) => {
if (count === size) {
commonElements.push(key);
}
});
return commonElements;
}
console.log(commonElementsOfArray([1, 2, 3], [1, 2, 4], [2, 4, 5]));
How about refactoring the OP's code into a generic intersection functionality which implements a simplified intersection function of two arrays and generates the overall intersection of more than 2 arrays by a reduce task which processes the generic function's (array type) arguments?
Thus the intersection of two arrays would be based on the OP's code filter approach but using includes instead of indexOf. Something like ...
function getIntersectionOfTwo(a, b) {
return a.filter(function (n) {
return b.includes(n);
});
}
A generic getIntersection then just needs to assure the type safety of its arguments and the correct return value for too less arguments as well as the intersection result for the minimum amount of correctly provided arguments ...
function getIntersection(...listOfArrays) {
function getIntersectionOfTwo(a, b) {
return a.filter(function (n) {
return b.includes(n);
});
}
// assure only array type arguments.
listOfArrays = listOfArrays.filter(Array.isArray);
return (listOfArrays[1] ?? listOfArrays[0])
&& listOfArrays.reduce(getIntersectionOfTwo);
}
console.log(
'getIntersection() ...',
getIntersection()
);
console.log(
'getIntersection(9, "foo", 0) ...',
getIntersection(9, "foo", 0)
);
console.log(
'getIntersection([2, 7, 0], "bar") ...',
getIntersection([2, 7, 0], "bar")
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3])
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9, 1, 2]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9, 1, 2])
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9])
);
.as-console-wrapper { min-height: 100%!important; top: 0; }
The above example's implementation of getIntersectionOfTwo of cause is kept simple for a better understanding of the overall task's refactoring process.
Within a next refactoring step this function could be improved too in order to handle/process vast array data more efficiently. Thus one would use within the filter callback a Map based lookup instead of searching within each filter iteration whether b.includes(n).
function getIntersection(...listOfArrays) {
function getIntersectionOfTwo(intersection, iterableItem) {
// in order to compare huge arrays more efficiently access ...
const [
comparisonBase, // ... the shorter one as comparison base
comparisonList, // ... and the longer one to filter from.
] = [intersection, iterableItem]
.sort((a, b) => a.length - b.length);
// create a `Map` based lookup table from the shorter array.
const itemLookup = comparisonBase
.reduce((map, item) => map.set(item, true), new Map)
// the intersection is the result of following filter task.
return comparisonList.filter(item => itemLookup.has(item));
}
// assure only array type arguments.
listOfArrays = listOfArrays.filter(Array.isArray);
return (listOfArrays[1] ?? listOfArrays[0])
&& listOfArrays.reduce(getIntersectionOfTwo);
}
console.log(
'getIntersection() ...',
getIntersection(),
);
console.log(
'getIntersection(9, "foo", 0) ...',
getIntersection(9, "foo", 0),
);
console.log(
'getIntersection([2, 7, 0], "bar") ...',
getIntersection([2, 7, 0], "bar"),
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3]),
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9, 1, 2]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9, 1, 2]),
);
console.log(
'getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9]) ...',
getIntersection([2, 7, 0, 4], [6, 2, 7, 3], [9]),
);
.as-console-wrapper { min-height: 100%!important; top: 0; }

what is the difference between filtering on accumulator/current in reduce method

When chaining the Array.prototype.reduce with Array.prototype.filter what is the difference (conceptually and under the hood) when filtering on the current value instead of the accumulator value?
// function union creates a union of all values that appear among arrays
// example A
const union = (...arrays) => {
return arrays.reduce((acc, curr) => {
const newElements = acc.filter(el => !curr.includes(el));
return curr.concat(newElements);
});
};
console.log(union([1, 10, 15, 20], [5, 88, 1, 7], [1, 10, 15, 5]));
// output (7) [1, 10, 15, 5, 88, 7, 20]
// example B
const union = (...arrays) => {
return arrays.reduce((acc, curr) => {
const newElements = curr.filter(el => !acc.includes(el));
return acc.concat(newElements);
});
};
console.log(union([1, 10, 15, 20], [5, 88, 1, 7], [1, 10, 15, 5]));
//output (7) [1, 10, 15, 20, 5, 88, 7]
The difference in output would suggest that the order in which the arrays are being evaluated is 'opposite'. As far as I can tell when using arr.filter the values are evaluated from end to beginning with the opposite being true for curr.filter . Besides from that are they any other consequences dependent on if you filter through the accumulator or current value? Could this throw an error in a different context?
The issue isn't about the use of filter inside of reduce, so much as it is about the order in which you're using acc and curr.
When I'm running into seemingly strange inconsistencies like this, the first step I usually take is to create a test case and run through it manually. Here, you've already created a test case for us...
const testData = [
[1, 10, 15, 20],
[5, 88, 1, 7],
[1, 10, 15, 5],
]
Now we need to run through each version of the function and see what the output is at each stage.
One thing to note (which I didn't know until this evening!) is that if reduce doesn't receive an initialValue as the second argument, it will use the first item in the array as the initialValue. This means we only need to consider 2 executions of each function instead of 3. 😅
Example A
const union = (...arrays) => {
return arrays.reduce((acc, curr) => {
const newElements = acc.filter(el => !curr.includes(el))
return curr.concat(newElements)
})
}
In the first version of the function, the short description of what's happening is that we're looping over the accumulator (acc) and removing all items that already exist in the array that we're currently comparing (curr). Then we add that list to the end of curr.
The fact that we’re pushing newElements onto the end of curr is important. This is why the order is different for the 2 different versions.
First execution
const acc = [1, 10, 15, 20]
const curr = [5, 88, 1, 7]
const newElements = [10, 15, 20] // these elements exist in acc but not in curr
curr.concat(newElements) === [5, 88, 1, 7, 10, 15, 20]
Second execution
const acc = [5, 88, 1, 7, 10, 15, 20] // carried over from first execution
const curr = [1, 10, 15, 5]
const newElements = [88, 7, 20] // these elements exist in acc but not in curr
curr.concat(newElements) === [1, 10, 15, 5, 88, 7, 20]
Example B
const union = (...arrays) => {
return arrays.reduce((acc, curr) => {
const newElements = curr.filter(el => !acc.includes(el))
return acc.concat(newElements)
})
}
In the first version of the function, the short description of what's happening is that we're looping over the array that we’re currently comparing (curr) and removing all items that already exist in the accumulator (acc). Then we add that list to the end of acc.
You can already see at the end of the first execution below that the results are turning out in a much different order.
First execution
const acc = [1, 10, 15, 20]
const curr = [5, 88, 1, 7]
const newElements = [5, 88, 7] // these elements exist in curr but not in acc
acc.concat(newElements) === [1, 10, 15, 20, 5, 88, 7]
Second execution
const acc = [1, 10, 15, 20, 5, 88, 7] // carried over from first execution
const curr = [1, 10, 15, 5]
const newElements = [] // these elements exist in acc but not in curr
acc.concat(newElements) === [1, 10, 15, 20, 5, 88, 7]
Conclusion
The short answer to your question is that the difference between filtering on the accumulator and the current array is that the results are going to be different so long as the inputs are different. 🤷🏻‍♂️
Besides from that are they any other consequences dependent on if you filter through the accumulator or current value? Could this throw an error in a different context?
Fortunately, there’s not any concern about errors. It is notable, however, that the second version of your function is ~10% faster than the first version. I’d guess that this is purely circumstantial. A different test data set may produce different performance results.
In example 1, by the time you concat the two lists, you make sure that the accumulator won't contain any element from current.
In example 2, on the other hand, you make sure that current won't contain any element that is already present in accumulator.
The difference is on the final order in which the elements will appear
I think both examples are not efficient since they both involve O(n2) time complexity, since you are nesting iterations. The second one, as stated by others, might be a little more performant since the nested iterations would be made on a chunk that is presumably shorter than the accumulator.
I'd rather write more or less like this:
const union = (...tuples) => Array.from(
new Set(
tuples.flatMap(n => n),
)
);
console.log(
union([1, 10, 15, 20], [5, 88, 1, 7], [1, 10, 15, 5]),
);

Why doesn't JS complier recognize this call to Array.prototype.filter

I am trying to figure out why my call to .prototype.filter is giving me a TypeError: curr.filter is not a function.
const intersection = (arrays) => {
return arrays.reduce((acc, curr) => {
return curr.filter(el => acc.includes(el));
});
};
console.log(intersection([5, 10, 15, 20], [15, 88, 1, 5, 7], [1, 10, 15, 5, 20]));
To my understanding I am declaring a function const intersection which takes in arrays and then returns the result of calling arrays.reduce which 'reduces' the results of filtering the current value and creating a new array that includes all instances of accumulator acc including the current value curr.
Since filter creates a new array on runtime I figured this would work as is yet it does not. What am I not seeing?
Use array rest parameter to get all parameter as an array. In the given code you are taking just first argument and ignoring the rest.
try this.
const intersection = (...arrays) => {
console.log("arrays: ", arrays);
return arrays.reduce((acc, curr) => {
return curr.filter(el => acc.includes(el));
});
};
console.log("Result:" , intersection([5, 10, 15, 20], [15, 88, 1, 5, 7], [1, 10, 15, 5, 20]));

LoDash: How to remove null values and index of null values from two arrays with _.flow?

Problem
I have an array of arrays:
const multiple = [[1, 2, null, 7], [6, 8, 9, 1]]
Now I'd like to remove all null values and the corresponding element from the other array which results in:
[[1, 2, 7], [6, 8, 1]]
I'm able to do that but I'm looking for a solution with _.flow.
Approach
This is my approach that doesn't return an array of arrays and also doesn't remove the element from the other array.
_.flow([
xorWith(_.isNull)
])([1, 2, null, 7], [6, 8, 9, 1])
1. Update
My input will always be [[ // Elements], [ // Elements]]. It wasn't clear at my approach.
const multiple = [[1, 2, null, 7], [6, 8, 9, 1]];
const withoutNulls = (arr) => _.every(arr, _.negate(_.isNull));
const result = _.flow(
_.zip,
(tuples) => _.filter(tuples, withoutNulls),
_.unzip
)(...multiple)
console.log(result);
<script src="https://unpkg.com/lodash#4.17.5/lodash.js"></script>
Does that suit your needs, or you want it exactly with your functions set?
I can't get the basic idea of yours, because the flow function accepts functions and call them in chain, but it is not as in your case.
_.flow((...args) => _.map(args, arr => _.filter(arr, v => !_.isNull(v))), console.log)([1, 2, null, 7], [6, 8, 9, 1])
<script src="https://cdn.jsdelivr.net/npm/lodash#4.17.5/lodash.min.js"></script>

Using indexOf to get index of array in a collection of arrays (Javascript)

What's the best way to find the index of an array in a collection of arrays? Why doesn't indexOf() return the correct index? I'm guessing it's something to do with object equality?
I've seen other solutions loop through the collection and return the index reached when the equality check is met, but I'm still curious as to why indexOf() doesn't do the same thing. Additionally I can't use ES6's find / findIndex due to IE 11 support (as always). I've included my test code below. Many thanks.
var numbers = [ [1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12] ];
function getIndex (numbersToTest) {
return numbers.indexOf(numbersToTest);
};
function test() {
console.log( getIndex( [1, 2, 3, 4, 5, 6] ) ); // Except 0
console.log( getIndex( [7, 8, 9, 10, 11, 12] ) ); // Expect 1
console.log( getIndex( [2, 1, 3, 4, 5, 6] ) ); // Expect -1 (not in same order)
}
test();
Object references (including array references) are compared as reference values; one object reference is equal to another only if both references are to the exact same object. Comparison is not performed based on the content of the arrays, in your case. Even though those arrays you pass in have the same values, they're distinct arrays, and so are not equal to any of the arrays in the original list.
Instead, you need to use something like Array#find (to find the entry) or Array#findIndex (to find the entry's index), passing in a callback that compares the array in numbers with numbersToTest to see if they're equivalent arrays. This question's answers talk about various ways to efficiently compare arrays for equivalence.
For example:
var numbers = [ [1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12] ];
function getIndex (numbersToTest) {
return numbers.findIndex(function(entry) {
// Simple comparison that works for an array of numbers
return entry.length === numbersToTest.length && entry.every(function(number, index) {
return numbersToTest[index] === number;
});
});
};
function test() {
console.log( getIndex( [1, 2, 3, 4, 5, 6] ) ); // Expect 0
console.log( getIndex( [7, 8, 9, 10, 11, 12] ) ); // Expect 1
console.log( getIndex( [2, 1, 3, 4, 5, 6] ) ); // Expect -1 (not in same order)
}
test();
Note that both Array#find and Array#findIndex are newish (ES2015, aka "ES6"), but can be polyfilled for older JavaScript engines.

Categories