I have a generator called generateNumbers in JavaScript and another generator generateLargerNumbers which takes each value generated by generateNumbers and applies a function addOne to it, as such:
function addOne(value) {
return value + 1
}
function* generateNumbers() {
yield 1
yield 2
yield 3
}
function* generateLargerNumbers() {
for (const number of generateNumbers()) {
yield addOne(number)
}
}
Is there any terser way to do this without building an array out of the generated values? I'm thinking something like:
function* generateLargerNumbers() {
yield* generateNumbers().map(addOne) // obviously doesn't work
}
higher-order generators
You can choose to manipulate the generator functions themselves
const Generator =
{
map: (f,g) => function* (...args)
{
for (const x of g (...args))
yield f (x)
},
filter: (f,g) => function* (...args)
{
for (const x of g (...args))
if (f (x))
yield x
}
}
// some functions !
const square = x =>
x * x
const isEven = x =>
(x & 1) === 0
// a generator !
const range = function* (x = 0, y = 1)
{
while (x < y)
yield x++
}
// higher order generator !
for (const x of Generator.map (square, Generator.filter (isEven, range)) (0,10))
console.log('evens squared', x)
higher-order iterators
Or you can choose to manipulate iterators
const Iterator =
{
map: (f, it) => function* ()
{
for (const x of it)
yield f (x)
} (),
filter: (f, it) => function* ()
{
for (const x of it)
if (f (x))
yield x
} ()
}
// some functions !
const square = x =>
x * x
const isEven = x =>
(x & 1) === 0
// a generator !
const range = function* (x = 0, y = 1)
{
while (x < y)
yield x++
}
// higher-order iterators !
for (const x of Iterator.map (square, Iterator.filter (isEven, range (0, 10))))
console.log('evens squared', x)
recommendation
In most cases, I think it's more practical to manipulate the iterator because of it's well-defined (albeit kludgy) interface. It allows you to do something like
Iterator.map (square, Iterator.filter (isEven, [10,11,12,13]))
Whereas the other approach is
Generator.map (square, Generator.filter (isEven, Array.from)) ([10,11,12,13])
Both have a use-case, but I find the former much nicer than the latter
persistent iterators
JavaScript's stateful iterators annoy me – each subsequent call to .next alters the internal state irreversibly.
But! there's nothing stopping you from making your own iterators tho and then creating an adapter to plug into JavaScript's stack-safe generator mechanism
If this interests you, you might like some of the other accompanying examples found here: Loop to a filesystem structure in my object to get all the files
The only gain isn't that we can reuse a persistent iterator, it's that with this implementation, subsequent reads are even faster than the first because of memoisation – score: JavaScript 0, Persistent Iterators 2
// -------------------------------------------------------------------
const Memo = (f, memo) => () =>
memo === undefined
? (memo = f (), memo)
: memo
// -------------------------------------------------------------------
const Yield = (value, next = Return) =>
({ done: false, value, next: Memo (next) })
const Return = value =>
({ done: true, value })
// -------------------------------------------------------------------
const MappedIterator = (f, it = Return ()) =>
it.done
? Return ()
: Yield (f (it.value), () => MappedIterator (f, it.next ()))
const FilteredIterator = (f, it = Return ()) =>
it.done
? Return ()
: f (it.value)
? Yield (it.value, () => FilteredIterator (f, it.next ()))
: FilteredIterator (f, it.next ())
// -------------------------------------------------------------------
const Generator = function* (it = Return ())
{
while (it.done === false)
(yield it.value, it = it.next ())
return it.value
}
// -------------------------------------------------------------------
const Range = (x = 0, y = 1) =>
x < y
? Yield (x, () => Range (x + 1, y))
: Return ()
const square = x =>
x * x
const isEven = x =>
(x & 1) === 0
// -------------------------------------------------------------------
for (const x of Generator (MappedIterator (square, FilteredIterator (isEven, Range (0,10)))))
console.log ('evens squared', x)
There isn't a built-in way to map over Generator objects, but you could roll your own function:
const Generator = Object.getPrototypeOf(function* () {});
Generator.prototype.map = function* (mapper, thisArg) {
for (const val of this) {
yield mapper.call(thisArg, val);
}
};
Now you can do:
function generateLargerNumbers() {
return generateNumbers().map(addOne);
}
const Generator = Object.getPrototypeOf(function* () {});
Generator.prototype.map = function* (mapper, thisArg) {
for (const val of this) {
yield mapper.call(thisArg, val);
}
};
function addOne(value) {
return value + 1
}
function* generateNumbers() {
yield 1
yield 2
yield 3
}
function generateLargerNumbers() {
return generateNumbers().map(addOne)
}
console.log(...generateLargerNumbers())
How about composing an iterator object, instead of using the nested generators?
function* generateNumbers(){
yield 1;
yield 2;
yield 3;
}
function generateGreaterNumbers(){
return { next(){ var r = this.gen.next(); r.value+=1; return r; }, gen: generateNumbers() };`
}
If you need to actually pass values to your generator then you can't do it with for...of, you have to pass each value through
const mapGenerator = (generatorFunc, mapper) =>
function*(...args) {
let gen = generatorFunc(...args),
i = 0,
value;
while (true) {
const it = gen.next(value);
if (it.done) return mapper(it.value, i);
value = yield mapper(it.value, i);
i++;
}
};
function* generator() {
console.log('generator received', yield 1);
console.log('generator received', yield 2);
console.log('generator received', yield 3);
return 4;
}
const mapGenerator = (generatorFunc, mapper) =>
function*(...args) {
let gen = generatorFunc(...args),
i = 0,
value;
while (true) {
const it = gen.next(value);
if (it.done) return mapper(it.value, i);
value = yield mapper(it.value, i);
i++;
}
};
const otherGenerator = mapGenerator(generator, x => x + 1)
const it = otherGenerator();
console.log(
it.next().value,
it.next('a').value,
it.next('b').value,
it.next('c').value
);
Related
I am trying to re-implement redux compose function, instead of using reduce i use a for loop, here is my code:
function compose(...funcs) {
if (funcs.length === 0) {
return (arg) => arg;
}
if (funcs.length === 1) {
return funcs[0];
}
let result;
for (let i = funcs.length - 1; i > -1; i--) {
result = result
? (...args) => funcs[i](result(...args))
: (...args) => funcs[i](...args);
}
return result;
}
// test
function fn1(x) {
return x + 1;
}
function fn2(x) {
return x * 10;
}
function fn3(x) {
return x - 1;
}
console.log(compose(fn3, fn2, fn1)(10)); // 109
It is expected to log 109 since (10 + 1) * 10 - 1 is 109, however it gives me such error:
RangeError: Maximum call stack size
Looks like i am doing some recursion but all i did is just a for loop, no sure where is the problem of my code?
I think the issue is like the below example:
a = () => 2;
a = () => 3 * a();
console.log(a);
// this prints () => 3 * a() in console
// so when you call a(), it will call 3 * a(), which will again call 3 * a() and so on
// leading to infinite recursion
My solution is slightly different using bind function based on this reference link: https://stackoverflow.com/a/6772648/4688321.
I think bind creates a new copy of the function result and binds it to a new object. Not using bind leads to recursion because then the code becomes like the above example, result calls result.
function compose(...funcs) {
if (funcs.length === 0) {
return (arg) => arg;
}
if (funcs.length === 1) {
return funcs[0];
}
let result;
for (let i = funcs.length - 1; i > -1; i--) {
if (i == funcs.length - 1)
result = (...args) => funcs[i](...args);
else {
let temp = result.bind({});
result = (...args) => funcs[i](temp(...args));
}
}
return result;
}
// test
function fn1(x) {
console.log("fn1");
return x + 1;
}
function fn2(x) {
console.log("fn2");
return x * 10;
}
function fn3(x) {
console.log("fn3");
return x - 1;
}
//console.log(compose(fn3, fn2, fn1));
let ret = compose(fn3, fn2, fn1);
console.log(ret(10)); // 109
Rather than trying to combine functions at the time of compose, it seems much easier to combine them at the time the resulting function is called:
function compose(...funcs) {
if (funcs.length === 0) {
return (arg) => arg
}
return function (...args) {
let result = funcs .at (-1) (...args)
for (let i = funcs.length - 2; i > -1; i--) {
result = funcs [i] (result)
}
return result
}
}
// test
function fn1(x) {
return x + 1;
}
function fn2(x) {
return x * 10;
}
function fn3(x) {
return x - 1;
}
console.log(compose(fn3, fn2, fn1)(10)); // 109
However, again, reduce make for a much cleaner implementation:
const compose = (...fns) => (arg) =>
fns .reduceRight ((a, fn) => fn (a), arg)
or if you want to allow the rightmost function to receive multiple variables, then
const compose = (...fns) => (...args) =>
fns .reduceRight ((a, fn) => [fn (...a)], args) [0]
You can regard trampolines as compiler optimizations reified in the program. So what is stopping us from adapting more general optimization techniques in exactly the same manner.
Here is a sketch of tail recursion modulo cons:
const loop = f => {
let step = f();
while (step && step[step.length - 1] && step[step.length - 1].type === recur) {
let step_ = step.pop();
step.push(...f(...step_.args));
}
return step;
};
const recur = (...args) =>
({type: recur, args});
const push = (xs, x) => (xs.push(x), xs);
const map = f => xs =>
loop((i = 0) =>
i === xs.length
? []
: push([f(xs[i])], recur(i + 1)));
const xs =
map(x => x * 2) (Array(1e6).fill(0).map((x, i) => i))
.slice(0,5);
console.log(xs); // [0, 2, 4, 6, 8]
This kind of optimization depends on the associativity property of an expression. Multiplication is associative too and hence there is tail recursion modulo multiplication. However, I have to cheat to implement it in Javascript:
const loop = f => {
let step = f();
const acc = [];
while (step && step[1] && step[1].type === recur) {
acc.push(step[0]);
step = f(...step[1].args);
}
return acc.reduce((acc, f) => f(acc), step);
};
const recur = (...args) =>
({type: recur, args});
const mul = x => step => [y => x * y, step];
const pow = (x_, n_) =>
loop((x = x_, n = n_) =>
n === 0 ? 1
: n === 1 ? x
: mul(x) (recur(x, n - 1)));
console.log(
pow(2, 1e6)); // Infinity, no stack overflow
As you can see I cannot use a regular mul, which isn't particular satisfying. Is this connected with Javascript beeing a strict language? Is there a better way to achieve tail recursion modulo multiplication in JS without having to introduce awkward binary operators?
Instead of using loop/recur (which I consider an ugly and unnecessary hack), consider using folds:
const recNat = (zero, succ) => n => {
let result = zero;
while (n > 0) {
result = succ(result);
n = n - 1;
}
return result;
};
const mul = x => y => x * y;
const pow = x => recNat(1, mul(x));
console.log([0,1,2,3,4,5,6,1e6].map(pow(2))); // [1,2,4,8,16,32,64,Infinity]
Almost every recursive function can be defined using folds (a.k.a. structural recursion, a.k.a. induction). For example, even the Ackermann function can be defined using folds:
const recNat = (zero, succ) => n => {
let result = zero;
while (n > 0) {
result = succ(result);
n = n - 1;
}
return result;
};
const add = x => y => x + y;
const ack = recNat(add(1),
ackPredM => recNat(ackPredM(1),
ackMPredN => ackPredM(ackMPredN)));
console.time("ack(4)(1)");
console.log(ack(4)(1)); // 65533
console.timeEnd("ack(4)(1)");
The above code snippet takes about 18 seconds to compute the answer on my laptop.
Now, you might ask why I implemented recNat using iteration instead of natural recursion:
const recNat = (zero, succ) => function recNatZS(n) {
return n <= 0 ? zero : succ(recNatZS(n - 1));
};
I used iteration for the same reason you used iteration to implement loop. Trampolining. By implementing a different trampoline for every data type you're going to fold, you can write functional code without having to worry about stack overflows.
Bottom line: Use folds instead of explicit recursion. They are a lot more powerful than you think.
Here is a coroutine that avoids nested patterns like (chain(m) (chain(...)) for monadic computations:
const some = x => none => some => some(x);
const none = none => some => none;
const option = none => some => tx => tx(none) (some);
const id = x => x;
const of = some;
const chain = fm => m => none => some => m(none) (x => fm(x) (none) (some));
const doM = (chain, of) => gf => {
const it = gf();
const loop = ({done, value}) =>
done
? of(value)
: chain(x => loop(it.next(x))) (value);
return loop(it.next());
};
const tx = some(4),
ty = some(5),
tz = none;
const data = doM(chain, of) (function*() {
const x = yield tx,
y = yield ty,
z = yield tz;
return x + y + z;
});
console.log(
option(0) (id) (data)); // 0
But I'm not able to implement an equivalent coroutine for applicative computations:
const some = x => none => some => some(x);
const none = none => some => none;
const option = none => some => tx => tx(none) (some);
const id = x => x;
const of = some;
const map = f => t => none => some => t(none) (x => some(f(x)));
const ap = tf => t => none => some => tf(none) (f => t(none) (x => some(f(x))));
const doA = (ap, of) => gf => {
const it = gf();
const loop = ({done, value}, initial) =>
done
? value
: ap(of(x => loop(it.next(x)))) (value);
return loop(it.next());
};
const tx = some(4),
ty = some(5),
tz = none;
const data = doA(ap, of) (function*() {
const x = yield tx,
y = yield ty,
z = yield tz;
return x + y + z;
});
console.log(
option(0) (id) (data)); // none => some => ...
This should work, but it doesn't. Where does the additional functorial wrapping come from? I guess I am a bit lost in recursion here.
By the way I know that this only works for deterministic functors/monads.
I'm not able to implement an equivalent coroutine for applicative computations
Yes, because generator functions are monadic, not just applicative. The operand of a yield expression can depend on the result of the previous yield expression - that's characteristic for a monad.
Where does the additional functorial wrapping come from? I guess I am a bit lost here.
You are doing ap(of(…))(…) - this is equivalent to map(…)(…) according to the Applicative laws. Compared to the chain call in the first snippet, this does not do any unwrapping of the result, so you get a nested maybe type (which in your implementation, is encoded as a function).
So this is a readable way (the code doesn't matter, what matters is the style):
arr.map().filter() // looping 2 times
And loops are considered as a faster way:
for(/* whatever */) {
// looping once, and doing all we need in the same loop
}
So my question is: is there a way, maybe from the functional programming world, to combine the readability of the former with the performance of the latter?
P.S. There is a trend to downvote such questions. If you want to, please write the reason as well.
Of course there is.
1st alternative: Transducer
const mapReduce = map => reduce => (acc, x) =>
reduce(acc, map(x));
const filterReduce = filter => reduce => (acc, x) =>
filter(x)
? reduce(acc, x)
: acc;
const transduce = (...ts) => xs =>
xs.reduce(ts.reduce(comp, id) (concat), []);
const comp = (f, g) =>
x => f(g(x));
const id = x => x;
const concat = (xs, ys) =>
xs.concat(ys);
const sqr = n => n * n;
const isOdd = n => n & 1 === 1;
const log = console.log;
// the upper code is usually library code
// so you don't have to deal with its complexity but only with its API
const tx = filterReduce(isOdd),
ty = mapReduce(sqr);
const r = transduce(tx, ty) ([1,2,3,4,5]); // filter/map in same iteration
log(r);
2nd alternative: Bare recursion with a tail call optimization effect
const loop = f => {
let acc = f();
while (acc && acc.type === tailRec)
acc = f(...acc.args);
return acc;
};
const tailRec = (...args) =>
({type: tailRec, args});
const comp = (f, g) => x =>
f(g(x));
const sqr = n => n * n;
const isOdd = n => n & 1 === 1;
const log = console.log;
// the upper code is usually library code
// so you don't have to deal with its complexity but only with its API
const r = loop((xs = [1,2,3,4,5], acc = [], i = 0) => {
if (i === xs.length)
return acc;
else
return tailRec( // filter/map in same iteration
xs,
isOdd(xs[i]) ? acc.concat(sqr(xs[i])) : acc,
i + 1);
});
log(r);
I'd say transducer are for normal, simpler iterations whereas recursion is suitable for more complex ones, for example when you need short circuiting (prematurely exiting).
Personally I don't think having some for-loops in your code makes it unreadable, but that's opinion based I suppose.
There are many ways to make your code more readable. If you're going to use this functionality often then you could create a method to add to Array.prototype - this way you can write the for-loop once and call it when you need it without having to see what you consider ugly code. Below is an example:
//This method will now be available to all Arrays instances:
Array.prototype.prettyLoop = function() {
console.log('All I do is execute a basic for-loop');
for (let i = 0; i < this.length; i++) {
console.log(this[i]);
}
};
//Call the method from your script
["a", 1, null, "x", 1989, false, {}].prettyLoop();
I'm following an article about Transducers in JavaScript, and in particular I have defined the following functions
const reducer = (acc, val) => acc.concat([val]);
const reduceWith = (reducer, seed, iterable) => {
let accumulation = seed;
for (const value of iterable) {
accumulation = reducer(accumulation, value);
}
return accumulation;
}
const map =
fn =>
reducer =>
(acc, val) => reducer(acc, fn(val));
const sumOf = (acc, val) => acc + val;
const power =
(base, exponent) => Math.pow(base, exponent);
const squares = map(x => power(x, 2));
const one2ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
res1 = reduceWith(squares(sumOf), 0, one2ten);
const divtwo = map(x => x / 2);
Now I want to define a composition operator
const more = (f, g) => (...args) => f(g(...args));
and I see that it is working in the following cases
res2 = reduceWith(more(squares,divtwo)(sumOf), 0, one2ten);
res3 = reduceWith(more(divtwo,squares)(sumOf), 0, one2ten);
which are equivalent to
res2 = reduceWith(squares(divtwo(sumOf)), 0, one2ten);
res3 = reduceWith(divtwo(squares(sumOf)), 0, one2ten);
The whole script is online.
I don't understand why I can't concatenate also the last function (sumOf) with the composition operator (more). Ideally I'd like to write
res2 = reduceWith(more(squares,divtwo,sumOf), 0, one2ten);
res3 = reduceWith(more(divtwo,squares,sumOf), 0, one2ten);
but it doesn't work.
Edit
It is clear that my initial attempt was wrong, but even if I define the composition as
const compose = (...fns) => x => fns.reduceRight((v, fn) => fn(v), x);
I still can't replace compose(divtwo,squares)(sumOf) with compose(divtwo,squares,sumOf)
Finally I've found a way to implement the composition that seems to work fine
const more = (f, ...g) => {
if (g.length === 0) return f;
if (g.length === 1) return f(g[0]);
return f(more(...g));
}
Better solution
Here it is another solution with a reducer and no recursion
const compose = (...fns) => (...x) => fns.reduceRight((v, fn) => fn(v), ...x);
const more = (...args) => compose(...args)();
usage:
res2 = reduceWith(more(squares,divtwo,sumOf), 0, one2ten);
res3 = reduceWith(more(divtwo,squares,sumOf), 0, one2ten);
full script online
Your more operates with only 2 functions. And the problem is here more(squares,divtwo)(sumOf) you execute a function, and here more(squares,divtwo, sumOf) you return a function which expects another call (fo example const f = more(squares,divtwo, sumOf); f(args)).
In order to have a variable number of composable functions you can define a different more for functions composition. Regular way of composing any number of functions is compose or pipe functions (the difference is arguments order: pipe takes functions left-to-right in execution order, compose - the opposite).
Regular way of defining pipe or compose:
const pipe = (...fns) => x => fns.reduce((v, fn) => fn(v), x);
const compose = (...fns) => x => fns.reduceRight((v, fn) => fn(v), x);
You can change x to (...args) to match your more definition.
Now you can execute any number of functions one by one:
const pipe = (...fns) => x => fns.reduce((v, fn) => fn(v), x);
const compose = (...fns) => x => fns.reduceRight((v, fn) => fn(v), x);
const inc = x => x + 1;
const triple = x => x * 3;
const log = x => { console.log(x); return x; } // log x, then return x for further processing
// left to right application
const pipe_ex = pipe(inc, log, triple, log)(10);
// right to left application
const compose_ex = compose(log, inc, log, triple)(10);
I still can't replace compose(divtwo,squares)(sumOf) with compose(divtwo,squares,sumOf)
Yes, they are not equivalent. And you shouldn't try anyway! Notice that divtwo and squares are transducers, while sumOf is a reducer. They have different types. Don't build a more function that mixes them up.
If you insist on using a dynamic number of transducers, put them in an array:
[divtwo, squares].reduceRight((t, r) => t(r), sumOf)