Using Task monads and Reader monads in javascript (DynamoDB and Facebook API) - javascript

here we have are trying to make a lot of calls in a functional way with javascript, the problem is that we end up with Reader.of(Task.of(Reader.of(Task.of))), so we need to map(map(map(map))) the values that we need to operate.
The computation needs to go to a DynamoDB table, get a specific property, then go again to DynamoDB and get another property, after that go to facebook api to get a collection, finally store that info in a table in Dynamo. So each of this calls use an AWS javascript plugin and the facebook Graph plugin in node. So we need to pass AWS dependency on run, and then we need to fork, then pass again AWS and fork again, then pass FBGraph on run and fork again, this is kind of tedious if you are building a computation.
Anyway here is the code:
require('pointfree-fantasy').expose(global)
import Reader from 'fantasy-readers'
import Task from 'data.task'
import _ from 'ramda'
const log = x => {console.log(x); return x}
const ReaderTask = Reader.ReaderT(Task)
// scan :: string -> [a]
const scan = x => Reader.ask.map(env => env.aws.scanAsync({tableName: x}))
// batchGetItems :: [a] -> [b]
const batchGetItem = x => Reader.ask.map(env => env.aws.batchGetItemAsync(x))
// batchWriteItem :: [a] -> [b]
const batchWriteItem = x => Reader.ask.map(env => env.aws.batchWriteItemAsync(x))
// scanCampaigns :: null -> [a]
const scanCampaigns = () => scan('CampaignStats')
const FBBatchGetItem = x => Reader.ask.map(env => env.fb.batchAsync(x))
const getCampaigns = compose(scanCampaigns, Reader.of)
const groupUsers = _.groupBy(x => x.user_fbid)
const batchGetAccessToken = chain(batchGetItem, ReaderTask.of)
const getCampaignsInsights = chain(FBBatchGetItem, ReaderTask.of)
const saveInsights = chain(batchWriteItem, ReaderTask.of)
const updateCampaignStats = chain(batchWriteItem, ReaderTask.of)
const taskOfEvery = (Task, num) => compose(map(Task.of),_.splitEvery(num))
const runTaskWithFn = (fn, task) => fn(task.fork(err => 'err', x => x))
const filterActive = _.filter(x => x.active === 'true')
const getItems = x => x.Items
const groupAndFilter = compose(groupUsers, filterActive, getItems)
// filterByLastFetch :: ([a], string) => [a]
const filterByLastFetch = (x, y) => x.filter(x => x.last_fetch < y)
export {getCampaigns, batchGetAccessToken, getCampaignsInsights, saveInsights,
groupUsers,filterByLastFetch, updateCampaignStats, taskOfEvery,
runTaskWithFn, filterActive, groupAndFilter,
getItems}
The objective is to pass the AWS plugin and FBGraph plugin only once to the computation, an build an elegant composition like:
const computation = compose(saveIntoDynamo3, fetchFromFacebook,fetchFromDynamo2,fetchFromDynamo)
and then:
computation().run({aws: AWSService, FB: FBGraph})

Related

Returning a promise node js

I have a value seed which returns a promise which I need to pass in to the hdkey function which will use that output.
seed returns a promise and it throws an exception because i need that unwrapped response.
I'v looked at some other posts about it and I'm wondering about the fact that the value I'm trying to set (root), gets used for the subsequent commands.
Do I just keeping doing .then()?
var bip39 = require('bip39');
var hdkey = require('hdkey');
var createHash = require('create-hash');
//var btcLib = require('bitcoinjs-lib');
var bs58check = require('bs58check');
//const mnemonic = bip39.generateMnemonic(); //generates string
const mnemonic = "gentle mutual speak consider mandate kingdom cash explain soul exile cabin squeeze";
const seed = bip39.mnemonicToSeed(mnemonic)
.then(a => ); //creates seed buffer
console.log('Seed: ' + seed);
console.log('mnemonic: ' + mnemonic);
const root = hdkey.fromMasterSeed(seed);
const masterPrivateKey = root.privateKey.toString('hex');
You need to await the Promise to get the seed buffer:
const bip39 = require('bip39')
const hdkey = require('hdkey');
const mnemonic = "gentle mutual speak consider mandate kingdom cash explain soul exile cabin squeeze";
bip39.mnemonicToSeed(mnemonic)
.then(seed => {
console.log('Seed: ', seed);
console.log('mnemonic: ', mnemonic);
const root = hdkey.fromMasterSeed(seed);
const masterPrivateKey = root.privateKey.toString('hex');
console.log(masterPrivateKey);
})
.catch(err => {
console.error('error to seed', err)
})
Or use the bip39.mnemonicToSeedSync method to avoid the Promise interface - but with lesser performance
const bip39 = require('bip39')
const hdkey = require('hdkey');
const mnemonic = "gentle mutual speak consider mandate kingdom cash explain soul exile cabin squeeze";
const seed = bip39.mnemonicToSeedSync(mnemonic)
console.log('Seed: ', seed);
console.log('mnemonic: ', mnemonic);
const root = hdkey.fromMasterSeed(seed);
const masterPrivateKey = root.privateKey.toString('hex');
console.log(masterPrivateKey);

How to create a type definition for local variable inside lambda?

So I decided to add proprietary method to an instance of a class but that killed my type information. Here is what I'm talking about:
window.onload = async () => {
let picker = new Windows.Storage.Pickers.FolderPicker()
picker.fileTypeFilter.append("*")
let files = await (await picker.pickSingleFolderAsync()).getFilesAsync(),
file = files.filter(value => value.fileType == ".RMX").pop(),
zonefiles = files.filter(value => value.fileType.startsWith(".Z"))
console.log(file.name)
console.log(zonefiles)
const openedfile = await file.openAsync(Windows.Storage.FileAccessMode.Read)
openedfile.createreaderat = at => {
const reader = new Windows.Storage.Streams.DataReader(this.getInputStreamAt(at))
reader.byteOrder = Windows.Storage.Streams.ByteOrder.littleEndian
return reader
}
let datareader = openedfile.createreaderat(0) //gets type of any
}
I'm using visual studio 2017 community edition and I've created an UWP JS app.
Any ideas how will the d.ts file would look like so that it mitigates the issue and gives my datareader appropriate type?
I'm assuming something like that should work but it's giving me syntax errors:
window.onload = async () => {
const openedfile: { ...: Windows.Storage.Streams.RandomAccessStream,
createreaderat: (at: number) => Windows.Storage.Streams.DataReader}
}
Try this...
openedfile.createreaderat = at => <Windows.Storage.Streams.DataReader>({
const reader = new Windows.Storage.Streams.DataReader(this.getInputStreamAt(at));
reader.byteOrder = Windows.Storage.Streams.ByteOrder.littleEndian;
return reader;
});

How to create a pool for worker threads

I have been learning about the experimental worker threads module in Node.js. I've read the official documentation, as well as most available articles, which are still quite sparse.
I have created a simple example that spawns ten (10) Worker threads in order to generate 10,000 SHA256 digests and then digitally sign them.
Using ten (10) Workers takes around two (2) seconds to generate all 10,000. Without workers, it takes approximately fifteen (15) seconds.
In the official documentation, it states that creating a pool of Workers is recommended versus spawning Workers on demand.
I've tried to find articles on how I'd go about doing this, but I haven't had any luck thus far.
How would I create a pool of Worker threads? Would the worker.js file somehow be modified so that I could create the Workers in advance and then send a message to the workers, which would cause them to execute their code? Would the pool be specific to the use case or is it possible to create a generic pool that could load a file or something and handle any use case?
Thank you.
MAIN
const { performance } = require('perf_hooks')
const { Worker } = require('worker_threads')
// Spawn worker
const spawn = function spawnWorker(workerData) {
return new Promise((resolve, reject) => {
const worker = new Worker('./worker.js', { workerData })
worker.on('message', (message) => resolve(message))
worker.on('error', reject)
worker.on('exit', (code) => {
if (code !== 0)
reject(new Error(`Worker stopped with exit code ${code}`))
})
})
}
const generate = async function generateData() {
const t0 = performance.now()
const initArray = []
for (step = 1; step < 10000; step += 1000) {
initArray.push({
start: step,
end: step + 999
})
}
const workersArray = initArray
.map(x => spawn(x))
const result = await Promise.all(workersArray)
let finalArray = []
for (let x of result) {
finalArray = finalArray.concat(x.data)
}
const t1 = performance.now()
console.log(`Total time: ${t1 - t0} ms`)
console.log('Length:', finalArray.length)
}
generate()
.then(x => {
console.log('EXITING!')
process.exit(0)
})
WORKERS
const { performance } = require('perf_hooks')
const { workerData, parentPort, threadId} = require('worker_threads')
const crypto = require('crypto')
const keys = require('./keys')
const hash = function createHash(data) {
const result = crypto.createHash('sha256')
result.update(data, 'utf8')
return result.digest('hex')
}
const sign = function signData(key, data) {
const result = crypto.createSign('RSA-SHA256')
result.update(data)
return result.sign(key, 'base64')
}
const t0 = performance.now()
const data = []
for (i = workerData.start; i <= workerData.end; i++) {
const digest = hash(i.toString())
const signature = sign(keys.HTTPPrivateKey, digest)
data.push({
id: i,
digest,
signature,
})
}
const t1 = performance.now()
parentPort.postMessage({
workerData,
data,
time: t1 - t0,
status: 'Done',
})
I would suggest using workerpool. It basically does all the pool management for you and it supports both worker threads and clusters.

Pointfree-Style with template-string in ramda.js

I've problems writing a pointfree-style function in ramda.js and wondered if anybody can help me with that. The getEnv function reads an env-variable and logs to console if it couldn't be found.
Here is my code
const env = name => R.path(['env', name], process);
const getEnv = name => R.pipe(
env,
R.when(R.isNil, () => log(`Missing env "${name}"`))
)(name);
console.log(getEnv('myenv'))
I'd want to drop the name parameter of the getEnv function (and if possible also on the env function) but don't know how to do this.
The function getEnv does more than it should. It actualy returns the content of the path or logs a validation message.
Split it into two separate functions. In my example below, I call it findPath andvalidatePath, which works generically for all paths. I've wrapped validatePath into another function calledvalidateEnvPath, which searches directly for "env"
To get rid of env you can do the following: R.flip (R.curry (R.path)). This will turn the function curry and then the arguments around, so you can tell the function where you want to query first
const process = {env: {myenv: ':)'}}
const path = R.flip(R.curry(R.path))
const findPathInProcess = R.pipe(
path (process),
R.ifElse(
R.isNil,
R.always(undefined),
R.identity
)
)
const validatePath = path =>
validationPathResponse (findPathInProcess( path )) (`can't find something under [${path}]`)
const validateEnvPath = path =>
validatePath (buildPath (['env']) (path))
const buildPath = xs => x =>
xs.concat(x)
const validationPathResponse = response => errorMessage =>
response
? response
: errorMessage
console.log(validatePath(['env', 'myenv']))
console.log(validateEnvPath('myenv'))
console.log(validateEnvPath('yourenv'))
<script src="https://cdnjs.cloudflare.com/ajax/libs/ramda/0.25.0/ramda.min.js"></script>
Consider using Either monad. Sanctuary has it already implemented and plays nice with its brother ramda:
const viewEnv = S.pipe ([
S.flip (R.append) (['env']),
R.lensPath,
R.view,
S.T (process),
S.toEither ('Given variable could not be retrieved')
])
const log = R.tap (console.log)
const eitherSomeVar = viewEnv ('someVar')
const eitherWhatever = S.bimap (log) (doSomeOtherStuff)
In addition one could also write the following
const path = R.flip(R.path) // already curried
const findPathInProcess = R.pipe(
path(process),
R.when(
R.isNil,
R.always(undefined)
)
)

Gremlin get count in javascript AWS Lambda

I am trying to get the count of edges for my graph through an AWS lambda
but getting an error
g.V(...).inE(...).outV(...).count(...).then is not a function
My lambda code is:
const gremlin = require('gremlin');
const DriverRemoteConnection = gremlin.driver.DriverRemoteConnection;
const Graph = gremlin.structure.Graph;
exports.handler = (event, context, callback) => {
let dc = new DriverRemoteConnection('ws://<endpoint>:8182/gremlin');
const graph = new Graph();
const g = graph.traversal().withRemote(dc);
g.V('19071640').inE('friend').count().then(num => console.log(num));
}
It works if I do
g.V('19071640').inE('friend').outV().then(friends => console.log(friends));
I think that you need to iterate your traversal.
g.V('19071640').inE('friend').count().next().then(num => console.log(num));

Categories