Here is my nodejs code:
const cp = require('child_process');
describe('cloud function test suites', () => {
describe('deleteCampaign test suites', () => {
const cloudFunctionName = 'deleteCampaign';
it('should print campaign data', () => {
const campaign = { id: '1' };
const encodedCampaign = Buffer.from(JSON.stringify(campaign)).toString(
'base64',
);
const data = JSON.stringify({ data: encodedCampaign });
const executeResultOutput = cp
.execSync(
`gcloud beta functions call ${cloudFunctionName} --data '${data}'`,
)
.toString();
const executionId = executeResultOutput.split(': ')[1];
const logs = cp
.execSync(
`gcloud beta functions logs read ${cloudFunctionName} --execution-id ${executionId}`,
)
.toString();
console.log(logs);
expect(logs).toContain('campaign: {"id":"1"}');
});
});
});
I want to print the logs to stdout, but logs is empty string.
But when I read logs using gcloud command line, it's ok. The stdout is correct:
gcloud beta functions logs read deleteCampaign --execution-id ee5owvtzlekc
LEVEL NAME EXECUTION_ID TIME_UTC LOG
D deleteCampaign ee5owvtzlekc 2018-09-13 12:46:17.734 Function execution started
I deleteCampaign ee5owvtzlekc 2018-09-13 12:46:17.738 campaign: {"id":"1"}
D deleteCampaign ee5owvtzlekc 2018-09-13 12:46:17.742 Function execution took 9 ms, finished with status: 'ok'
I use jest and nodejs write some tests for my cloud functions. Why the logs is empty string?
The string you are trying to get is empty, because the logs take a bit more time to generate. Even though the Google Cloud Function has finished executing, you'll have to wait a few seconds for the logs to be ready.
Reading your code, you are not letting this happen, hence you are getting an empty string.
The first thing that I noticed reading your code was this part:
const executionId = executeResultOutput.split(': ')[1];
I understand that you want to extract the Google Cloud Function's Execution ID. I had problems here because the string was not limited to the execution ID, it also included a new line character and the word "result". I made sure to just extract the necessary Execution ID with the next code:
const executionId = executeResultOutput.split(':')[1]; //We get the GCP ID.
const executionId2 = executionId.split("\n")[0].toString(); //removing the right part of the string.
If you have found the way to get the execution ID without problems then ignore my code.
Below you can find the code that has worked for me implementing functions.
let cloudFunctionLog ='';
function getLogs(){
console.log('Trying to get logs...');
const logs = cp
.execSync(`gcloud beta functions logs read ${cloudFunctionName} --execution-id ${executionId2}`);
return logs;
}
do{
cloudFunctionLog=getLogs();
if(!cloudFunctionLog){
console.log('Logs are not ready yet...');
}else{
console.log(`${cloudFunctionLog}`);
}
}while(!cloudFunctionLog);//Do it while the string comes empty.
When the logs are no longer empty, they'll show up in your console.
Related
This question already has an answer here:
Why is my PDF not saving intermittently in my Node function?
(1 answer)
Closed last year.
As described in the firebase docs, it is required to
"resolve functions that perform asynchronous processing (also known as
"background functions") by returning a JavaScript promise."
(https://firebase.google.com/docs/functions/terminate-functions?hl=en).
otherwise it might happen, that
"the Cloud Functions instance running your function does not shut down
before your function successfully reaches its terminating condition or
state. (https://firebase.google.com/docs/functions/terminate-functions?hl=en)
In this case I am trying to adapt a demo-code for pdf-generation written by Volodymyr Golosay on https://medium.com/firebase-developers/how-to-generate-and-store-a-pdf-with-firebase-7faebb74ccbf.
The demo uses 'https.onRequest' as trigger and fulfillis the termination requirement with 'response.send(result)'. In the adaption I need to use a 'document.onCreate' trigger and therefor need to find a different termination.
In other functions I can fulfill this requirement by using async/await, but here I am struggling to get a stable function with good performance. The shown function logs after 675 ms "finished with status: 'ok' ", but around 2 minutes later it logs again that the pdf-file is saved now (see screenshot of the logger).
What should I do to terminate the function properly?
// adapting the demo code by Volodymyr Golosay published on https://medium.com/firebase-developers/how-to-generate-and-store-a-pdf-with-firebase-7faebb74ccbf
// library installed -> npm install pdfmake
const functions = require("firebase-functions");
const admin = require("firebase-admin");
admin.initializeApp();
const db = admin.firestore();
const Printer = require('pdfmake');
const fonts = require('pdfmake/build/vfs_fonts.js');
const fontDescriptors = {
Roboto: {
normal: Buffer.from(fonts.pdfMake.vfs['Roboto-Regular.ttf'], 'base64'),
bold: Buffer.from(fonts.pdfMake.vfs['Roboto-Medium.ttf'], 'base64'),
italics: Buffer.from(fonts.pdfMake.vfs['Roboto-Italic.ttf'], 'base64'),
bolditalics: Buffer.from(fonts.pdfMake.vfs['Roboto-Italic.ttf'], 'base64'),
}
};
exports.generateDemoPdf = functions
// trigger by 'document.onCreate', while demo uses 'https.onRequest'
.firestore
.document('collection/{docId}')
.onCreate(async (snap, context) => {
const printer = new Printer(fontDescriptors);
const chunks = [];
// define the content of the pdf-file
const docDefinition = {
content: [{
text: 'PDF text is here.',
fontSize: 19 }
]
};
const pdfDoc = printer.createPdfKitDocument(docDefinition);
pdfDoc.on('data', (chunk) => {
chunks.push(chunk);
});
pdfDoc.on('end', async () => {
const result = Buffer.concat(chunks);
// Upload generated file to the Cloud Storage
const docId = "123456789"
const bucket = admin.storage().bucket();
const fileRef = bucket.file(`${docId}.pdf`, {
metadata: {
contentType: 'application/pdf'
}
});
await fileRef.save(result);
console.log('result is saved');
// NEEDS PROPER TERMINATION HERE?? NEEDS TO RETURN A PROMISE?? FIREBASE DOCS: https://firebase.google.com/docs/functions/terminate-functions?hl=en
// the demo with 'https.onRequest' uses the following line to terminate the function properly:
// response.send(result);
});
pdfDoc.on('error', (err) => {
return functions.logger.log('An error occured!');
});
pdfDoc.end();
});
I think everything is fine in your code. It seems it takes 1m 34s to render the file and save it to storage.
Cloud function will be terminated automatically when all micro and macro tasks are done. Right after you last await.
To check how long does it takes and does it terminate right after saving, you can run the firebase emulator on your local machine.
You will see logs in the terminal and simultaneously watch on storage.
I suspect you did terminate properly - that's the nature of promises. Your function "terminated" with a 200 status, returning a PROMISE for the results of the PDF save. When the PDF save actually terminates later, the result is logged and the promise resolved. This behavior is WHY you return the promise.
For a project I need to incorporate a backend Python function with Javascript (main code for a chatbot). Using Child processes, it seems to work (when using 'node script.js'). However, I need to access the data from the called python function. Right now, all I am getting is the the output.I tried to store it in the global variable, but it's showing as 'undefined'. Is there a way to actually access the data so I can use it outside the stdout.on?
This is the Javascript code for running the pythonscript:
// Give a path to the QR scanner Python file
const qrScannerPath = "python/qrCodeScanner.py"
const base64Arg = "base64_2.txt"
// Provide the '.exe' python file. If python is available as an 'environment varaible', then simply refer to it as 'python'"
const pythonExe = "python"
// Function to convert a utf-8 array to a string
const utfConverter = function (data) {
return String.fromCharCode.apply(String,(data))
}
// let's us handle python scripts
const spawn = require("child_process").spawn
const scriptExe = spawn(pythonExe, [qrScannerPath, base64Arg])
// If all goes well, the program should execute the Python code
let counterpartyData = {}
scriptExe.stdout.on("data", function (data) {
console.log("getting the Python script data...")
let cp = JSON.parse(utfConverter(data))
counterpartyData = {... cp} //should store the data to the variable
});
console.log(counterpartyData) // shows 'undefinied"
// In case our python script cannot be run, we'll get an error
scriptExe.stderr.on("data", (data) => {
console.error("error : " + data.toString())
});
//logs error message
scriptExe.on('error', (error) => {
console.error('error: ', error.message);
});
// Logs a message saying that our code worked perfectly fine
scriptExe.on("exit", (code) => {
console.log("Process quit with code : " + code)
})
If I run this code with node, the output of 'counterpartyData' is undefined. However, inside the stdout, it actually prints out the data I want.
Furthermore, I get python import errors when running the app on Heroku :(.
Thank you in advance!!
Happy New Year and joyful greetings <3
i tried to implement async and await inside spawn child process. But it didn't worked. Please see this
Expected output
*************
http://www.stevecostellolaw.com/
*************
http://www.stevecostellolaw.com/personal-injury.html
http://www.stevecostellolaw.com/personal-injury.html
*************
http://www.stevecostellolaw.com/#
http://www.stevecostellolaw.com/#
*************
http://www.stevecostellolaw.com/home.html
http://www.stevecostellolaw.com/home.html
*************
http://www.stevecostellolaw.com/about-us.html
http://www.stevecostellolaw.com/about-us.html
*************
http://www.stevecostellolaw.com/
http://www.stevecostellolaw.com/
*************
Becoz each time spawn child found await it will go back to python script and print ************* it and then print URL. Ignore 2 times printing of same url here.
Output which i m getting
C:\Users\ASUS\Desktop\searchermc>node app.js
server running on port 3000
DevTools listening on ws://127.0.0.1:52966/devtools/browser/933c20c7-e295-4d84-a4b8-eeb5888ecbbf
[3020:120:0402/105304.190:ERROR:device_event_log_impl.cc(214)] [10:53:04.188] USB: usb_device_handle_win.cc:1056 Failed to read descriptor from node connection: A device attached to the system is not functioning. (0x1F)
[3020:120:0402/105304.190:ERROR:device_event_log_impl.cc(214)] [10:53:04.189] USB: usb_device_handle_win.cc:1056 Failed to read descriptor from node connection: A device attached to the system is not functioning. (0x1F)
*************
http://www.stevecostellolaw.com/
http://www.stevecostellolaw.com/personal-injury.html
http://www.stevecostellolaw.com/personal-injury.html
http://www.stevecostellolaw.com/#
http://www.stevecostellolaw.com/#
http://www.stevecostellolaw.com/home.html
http://www.stevecostellolaw.com/home.html
http://www.stevecostellolaw.com/about-us.html
http://www.stevecostellolaw.com/about-us.html
http://www.stevecostellolaw.com/
http://www.stevecostellolaw.com/
*************
Please see the app.js code below
// form submit request
app.post('/formsubmit', function(req, res){
csvData = req.files.csvfile.data.toString('utf8');
filteredArray = cleanArray(csvData.split(/\r?\n/))
csvData = get_array_string(filteredArray)
csvData = csvData.trim()
var keywords = req.body.keywords
keywords = keywords.trim()
// Send request to python script
var spawn = require('child_process').spawn;
var process = spawn('python', ["./webextraction.py", csvData, keywords, req.body.full_search])
var outarr = []
// process.stdout.on('data', (data) => {
// console.log(`stdout: ${data}`);
// });
process.stdout.on('data', async function(data){
console.log("\n ************* ")
console.log(data.toString().trim())
await outarr.push(data.toString().trim())
console.log("\n ************* ")
});
});
Python function which is sending in the URLs when the if condition matched
# Function for searching keyword start
def search_keyword(href, search_key):
extension_list = ['mp3', 'jpg', 'exe', 'jpeg', 'png', 'pdf', 'vcf']
if(href.split('.')[-1] not in extension_list):
try:
content = selenium_calling(href)
soup = BeautifulSoup(content,'html.parser')
search_string = re.sub("\s+"," ", soup.body.text)
search_string = search_string.lower()
res = [ele for ele in search_key if(ele.lower() in search_string)]
outstr = getstring(res)
outstr = outstr.lstrip(", ")
if(len(res) > 0):
print(href)
found_results.append(href)
href_key_dict[href] = outstr
return 1
else:
notfound_results.append(href)
except Exception as err:
pass
I want to do all this because of the python script which takes more time to execute and thus give timeout error each time, so i am thinking to get intermediate ouput of the python script in my nodejs script. you can see the error i m getting in below image.
I'm not sure I completely understand what you're trying to do, but I'll give it a shot since you seem to have asked this question many times already (which usually isn't a good idea). I believe that there's a lack of clarity in your question - it would help a lot if you could clarify what your end goal is (i.e. how do you want this to behave?)
I think you mentioned two separate problems here. The first is that you expect a new line of '******' to be placed before each separate piece of data returned from your script. This is something that can't be relied on - check out the answer to this question for more detail: Order of process.stdout.on( 'data', ... ) and process.stderr.on( 'data', ... ). The data will be passed to your stdout handler in chunks, not line-by-line, and any amount of data can be provided at a time depending how much is currently in the pipe.
The part I'm most confused about is your phrasing of "to get intermediate ouput of the python script in my nodejs script". There's not necessarily any "immediate" data - you can't rely on data coming in at any particular time with your process's stdout handler, its going to hand you data at a pace determined by the Python script itself and the process its running in. With that said, it sounds like your main problem here is the timeout happening on your POST. You aren't ever ending your request - that's why you're getting a timeout. I'm going to assume that you want to wait for the first chunk of data - regardless of how many lines it contains - before sending a response back. In that case, you'll need to add res.send, like this:
// form submit request
app.post('/formsubmit', function(req, res){
csvData = req.files.csvfile.data.toString('utf8');
filteredArray = cleanArray(csvData.split(/\r?\n/))
csvData = get_array_string(filteredArray)
csvData = csvData.trim()
var keywords = req.body.keywords
keywords = keywords.trim()
// Send request to python script
var spawn = require('child_process').spawn;
var process = spawn('python', ["./webextraction.py", csvData, keywords, req.body.full_search])
var outarr = []
// process.stdout.on('data', (data) => {
// console.log(`stdout: ${data}`);
// });
// Keep track of whether we've already ended the request
let responseSent = false;
process.stdout.on('data', async function(data){
console.log("\n ************* ")
console.log(data.toString().trim())
outarr.push(data.toString().trim())
console.log("\n ************* ")
// If the request hasn't already been ended, send back the current output from the script
// and end the request
if (!responseSent) {
responseSent = true;
res.send(outarr);
}
});
});
I have a function that monitors a node in a Realtime database and once a new child is written to the node the function simply needs to create a html document in a Google Cloud bucket. The HTML document will have a unique name and will contain some data from the node. It's all fairly straightforward, however I can't actually create and write to the document. I've tried 3 methods so far (outlined in the code below), none of these methods work.
const {Storage} = require('#google-cloud/storage');
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
const fs = require('fs');
const {StringStream} = require('#rauschma/stringio')
const instanceId = 'my-project-12345';
const bucketName = 'my-bucket';
exports.processCertification = functions.database.instance(instanceId).ref('/t/{userId}/{testId}')
.onCreate((snapshot, context) => {
const dataJ = snapshot.toJSON();
var testResult = "Invalid";
if(dataJ.r == 1) {testResult = "Positive";}
else if(dataJ.r == 2) {testResult = "Negative";}
console.log('Processing certificate:', context.params.testId, testResult);
var storage = new Storage({projectId: instanceId});
const fileName = context.params.testId + '.html';
const fileContents = "<html><head></head><body>Result: " + testResult + "</body></html>"
const options = {resumable:false, metadata:{contentType:'text/html'}};
const bucket = storage.bucket(bucketName);
const file = bucket.file(fileName);
console.log('Saving to:' + bucketName + '/' + fileName);
if(false) {
// Test 1. the file.save method
// Errors with:
// (node:2) MetadataLookupWarning: received unexpected error = URL is not defined code = UNKNOWN
file.save(fileContents, options, function(err) {
if (!err) {console.log("Save created object at " + bucketName + "/" + fileName);}
else {console.log("Save Failed " + err);}
});
} else if(true) {
// Test 2. the readStream.pipe method
// No errors, doesn't output error message, doesn't output finish message, no file created
fs.createReadStream(fileContents)
.pipe(file.createWriteStream(options))
.on('error', function(err) {console.log('WriteStream Error');})
.on('finish', function() {console.log('WriteStream Written');});
} else {
// Test 3. the StringStream with readStream.pipe method
// Errors with:
// (node:2) MetadataLookupWarning: received unexpected error = URL is not defined code = UNKNOWN
const writeStream = storage.bucket(bucketName).file(fileName).createWriteStream(options);
writeStream.on('finish', function(){console.log('WriteStream Written');}).on('error', function(err){console.log('WriteStream Error');});
const readStream = new StringStream(fileContents);
readStream.pipe(writeStream);
}
console.log('Function Finished');
return 0;
});
In all cases the "Processing certificate" and "Saving to" outputs appear, I also get the "Function Finished" message every time. The errors (or in one case no response) is written against each of the tests in the code.
My next step will be to create the file locally and then use upload() method, however each of these methods seem like they should work, plus the only error message I have is talking about URL errors so I suspect trying to use upload() method would run into the same problems as well.
I'm using Node.JS v8.17.0 and the following packages
"dependencies": {
"#google-cloud/storage": "^5.0.0",
"#rauschma/stringio": "^1.4.0",
"firebase-admin": "^8.10.0",
"firebase-functions": "^3.6.1"
}
Any advice is most welcome
In each case, you are not working with promises correctly. For database triggers (and all other background triggers), you must return a promise that resolves when all of the asynchronous work is complete in a function. Right now, you're not doing anything at all with promises, while each of the APIs you're calling are all asynchronous. Your function is just returning 0 immediately without waiting for the upload to complete, and Cloud Functions is simply terminating and cleaning up before anything can happen.
I suggest choosing one of the methods that returns a promise with the upload is complete (probably file.save()), then return that promise from the function.
I'm trying to use mock-cli to stub process.arv in mocha tests for a cli app. I want to test that a message is console.logged when an incorrect argument ("imit") is passed to process.argv (as defined by commands).
I'm trying to adapt the example from the documentation but i don't think i have set everything up correctly.
it passes when i comment out "stdin: require('../mocks/fakeInputStream'), // Hook up a fake input stream" though i know it's not working correctly
it fails with TypeError: sourceStream.on is not a function when run as described below
Can someone see what I'm missing?
/index.js
var commands = ['init'];
function getGitHeadArgs() {
return process.argv.slice(2, process.argv.length);
}
if (getGitHeadArgs().length) {
if (!commands.includes(getGitHeadArgs()[0])) {
console.log("Silly Githead! That's not a githead command");
}
eval(getGitHeadArgs()[0])();
} else {
console.log("You didn't tell githead to do anything!");
}
/testIndex.js
var assert = require('assert');
var index = require('../index.js');
var mockCli = require("mock-cli");
describe("incorrect argument", function() {
it("imit throws an error if an invalid command is raised", function() {
var argv = ['node', '../index.js', 'imit']; // Fake argv
var stdio = {
stdin: require('../mocks/fakeInputStream'), // Hook up a fake input stream
stdout: process.stdout, // Display the captured output in the main console
stderr: process.stderr // Display the captured error output in the main console
};
var kill = mockCli(argv, stdio, function onProcessComplete(error, result) {
var exitCode = result.code; // Process exit code
var stdout = result.stdout; // UTF-8 string contents of process.stdout
var stderr = result.stderr; // UTF-8 string contents of process.stderr
assert.equal(exitCode, 0);
assert.equal(stdout, "Silly Githead! That's not a githead command\n");
assert.equal(stderr, '');
});
// Execute the CLI task
require('../index.js');
// Kill the task if still running after one second
setTimeout(kill, 1000);
});
Is ../mocks/fakeInputStream a valid path?
Is the object at ../mocks/fakeInputStream a valid instance of ReadableStream?
The source code is avalible at GitHub.
Make sure you meet the requirements for the captureStdin(sourceStream, callback) function.
The module uses that function to capture your fakeInputStream and pipe it into a captureStream.