Puppeteer doesn't close browser - javascript

I'm running puppeteer on express/node/ubuntu as follow:
var puppeteer = require('puppeteer');
var express = require('express');
var router = express.Router();
/* GET home page. */
router.get('/', function(req, res, next) {
(async () => {
headless = true;
const browser = await puppeteer.launch({headless: true, args:['--no-sandbox']});
const page = await browser.newPage();
url = req.query.url;
await page.goto(url);
let bodyHTML = await page.evaluate(() => document.body.innerHTML);
res.send(bodyHTML)
await browser.close();
})();
});
running this script multiple times leaves hundred of Zombies:
$ pgrep chrome | wc -l
133
Which clogs the srv,
How do I fix this?
Running kill from a Express JS script could solve it?
Is there a better way to get the same result other than puppeteer and headless chrome?

Ahhh! This is a simple oversight. What if an error occurs and your await browser.close() never executes thus leaving you with zombies.
Using shell.js seems to be a hacky way of solving this issue.
The better practice is to use try..catch..finally. The reason being you would want the browser to be closed irrespective of a happy flow or an error being thrown.
And unlike the other code snippet, you don't have to try and close the browser in the both the catch block and finally block. finally block is always executed irrespective of whether an error is thrown or not.
So, your code should look like,
const puppeteer = require('puppeteer');
const express = require('express');
const router = express.Router();
/* GET home page. */
router.get('/', function(req, res, next) {
(async () => {
const browser = await puppeteer.launch({
headless: true,
args: ['--no-sandbox'],
});
try {
const page = await browser.newPage();
url = req.query.url;
await page.goto(url);
const bodyHTML = await page.evaluate(() => document.body.innerHTML);
res.send(bodyHTML);
} catch (e) {
console.log(e);
} finally {
await browser.close();
}
})();
});
Hope this helps!

wrap your code in try-catch like this and see if it helps
headless = true;
const browser = await puppeteer.launch({headless: true, args:['--no-sandbox']});
try {
const page = await browser.newPage();
url = req.query.url;
await page.goto(url);
let bodyHTML = await page.evaluate(() => document.body.innerHTML);
res.send(bodyHTML);
await browser.close();
} catch (error) {
console.log(error);
} finally {
await browser.close();
}

From my experience, the browser closing process may take some time after close is called. Anyway, you can check the browser process property to check if it's still not closed and force kill it.
if (browser && browser.process() != null) browser.process().kill('SIGINT');
I'm also posting the full code of my puppeteer resources manager below. Take a look at bw.on('disconnected', async () => {
const puppeteer = require('puppeteer-extra')
const randomUseragent = require('random-useragent');
const StealthPlugin = require('puppeteer-extra-plugin-stealth')
const USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36';
puppeteer.use(StealthPlugin())
function ResourceManager(loadImages) {
let browser = null;
const _this = this;
let retries = 0;
let isReleased = false;
this.init = async () => {
isReleased = false;
retries = 0;
browser = await runBrowser();
};
this.release = async () => {
isReleased = true;
if (browser) await browser.close();
}
this.createPage = async (url) => {
if (!browser) browser = await runBrowser();
return await createPage(browser,url);
}
async function runBrowser () {
const bw = await puppeteer.launch({
headless: true,
devtools: false,
ignoreHTTPSErrors: true,
slowMo: 0,
args: ['--disable-gpu','--no-sandbox','--no-zygote','--disable-setuid-sandbox','--disable-accelerated-2d-canvas','--disable-dev-shm-usage', "--proxy-server='direct://'", "--proxy-bypass-list=*"]
});
bw.on('disconnected', async () => {
if (isReleased) return;
console.log("BROWSER CRASH");
if (retries <= 3) {
retries += 1;
if (browser && browser.process() != null) browser.process().kill('SIGINT');
await _this.init();
} else {
throw "===================== BROWSER crashed more than 3 times";
}
});
return bw;
}
async function createPage (browser,url) {
const userAgent = randomUseragent.getRandom();
const UA = userAgent || USER_AGENT;
const page = await browser.newPage();
await page.setViewport({
width: 1920 + Math.floor(Math.random() * 100),
height: 3000 + Math.floor(Math.random() * 100),
deviceScaleFactor: 1,
hasTouch: false,
isLandscape: false,
isMobile: false,
});
await page.setUserAgent(UA);
await page.setJavaScriptEnabled(true);
await page.setDefaultNavigationTimeout(0);
if (!loadImages) {
await page.setRequestInterception(true);
page.on('request', (req) => {
if(req.resourceType() == 'stylesheet' || req.resourceType() == 'font' || req.resourceType() == 'image'){
req.abort();
} else {
req.continue();
}
});
}
await page.evaluateOnNewDocument(() => {
//pass webdriver check
Object.defineProperty(navigator, 'webdriver', {
get: () => false,
});
});
await page.evaluateOnNewDocument(() => {
//pass chrome check
window.chrome = {
runtime: {},
// etc.
};
});
await page.evaluateOnNewDocument(() => {
//pass plugins check
const originalQuery = window.navigator.permissions.query;
return window.navigator.permissions.query = (parameters) => (
parameters.name === 'notifications' ?
Promise.resolve({ state: Notification.permission }) :
originalQuery(parameters)
);
});
await page.evaluateOnNewDocument(() => {
// Overwrite the `plugins` property to use a custom getter.
Object.defineProperty(navigator, 'plugins', {
// This just needs to have `length > 0` for the current test,
// but we could mock the plugins too if necessary.
get: () => [1, 2, 3, 4, 5],
});
});
await page.evaluateOnNewDocument(() => {
// Overwrite the `plugins` property to use a custom getter.
Object.defineProperty(navigator, 'languages', {
get: () => ['en-US', 'en'],
});
});
await page.goto(url, { waitUntil: 'networkidle2',timeout: 0 } );
return page;
}
}
module.exports = {ResourceManager}

I solve it with https://www.npmjs.com/package/shelljs
var shell = require('shelljs');
shell.exec('pkill chrome')

try to close the browser before sending the response
var puppeteer = require('puppeteer');
var express = require('express');
var router = express.Router();
router.get('/', function(req, res, next) {
(async () => {
headless = true;
const browser = await puppeteer.launch({headless: true});
const page = await browser.newPage();
url = req.query.url;
await page.goto(url);
let bodyHTML = await page.evaluate(() => document.body.innerHTML);
await browser.close();
res.send(bodyHTML);
})();
});

I ran into the same issue and while your shelljs solution did work, it kills all chrome processes, which might interrupt one that is still processing a request. Here is a better solution that should work.
var puppeteer = require('puppeteer');
var express = require('express');
var router = express.Router();
router.get('/', function (req, res, next) {
(async () => {
await puppeteer.launch({ headless: true }).then(async browser => {
const page = await browser.newPage();
url = req.query.url;
await page.goto(url);
let bodyHTML = await page.evaluate(() => document.body.innerHTML);
await browser.close();
res.send(bodyHTML);
});
})();
});

use
(await browser).close()
that happens because what the browser contains is a promise you have to solve it, I suffered a lot for this I hope it helps

I use the following basic setup for running Puppeteer:
const puppeteer = require("puppeteer");
let browser;
(async () => {
browser = await puppeteer.launch();
const [page] = await browser.pages();
/* use the page */
})()
.catch(err => console.error(err))
.finally(() => browser?.close())
;
Here, the finally block guarantees the browser will close correctly regardless of whether an error was thrown. Errors are logged (if desired). I like .catch and .finally as chained calls because the mainline Puppeteer code is one level flatter, but this accomplishes the same thing:
const puppeteer = require("puppeteer");
(async () => {
let browser;
try {
browser = await puppeteer.launch();
const [page] = await browser.pages();
/* use the page */
}
catch (err) {
console.error(err);
}
finally {
await browser?.close();
}
})();
There's no reason to call newPage because Puppeteer starts with a page open.
As for Express, you need only place the entire code above, including let browser; and excluding require("puppeteer"), into your route, and you're good to go, although you might want to use an async middleware error handler.
You ask:
Is there a better way to get the same result other than puppeteer and headless chrome?
That depends on what you're doing and what you mean by "better". If your goal is to get document.body.innerHTML and the page content you're interested in is baked into the static HTML, you can dump Puppeteer entirely and just make a request to get the resource, then use Cheerio to extract the desired information.
Another consideration is that you may not need to load and close a whole browser per request. If you can use one new page per request, consider the following strategy:
const express = require("express");
const puppeteer = require("puppeteer");
const asyncHandler = fn => (req, res, next) =>
Promise.resolve(fn(req, res, next)).catch(next)
;
const browserReady = puppeteer.launch({
args: ["--no-sandbox", "--disable-setuid-sandbox"]
});
const app = express();
app
.set("port", process.env.PORT || 5000)
.get("/", asyncHandler(async (req, res) => {
const browser = await browserReady;
const page = await browser.newPage();
try {
await page.goto(req.query.url || "http://www.example.com");
return res.send(await page.content());
}
catch (err) {
return res.status(400).send(err.message);
}
finally {
await page.close();
}
}))
.use((err, req, res, next) => res.sendStatus(500))
.listen(app.get("port"), () =>
console.log("listening on port", app.get("port"))
)
;
Finally, make sure to never set any timeouts to 0 (for example, page.setDefaultNavigationTimeout(0);), which introduces the potential for the script to hang forever. If you need a generous timeout, at most set it to a few minutes--long enough not to trigger false positives.
See also:
Parallelism of Puppeteer with Express Router Node JS. How to pass page between routes while maintaining concurrency
Puppeteer unable to run on heroku

Related

Puppeteer querySelector returns empty object [duplicate]

Recently I started to crawl the web using Puppeteer. Below is a code for extracting a specific product name from the shopping mall.
const puppeteer = require('puppeteer');
(async () => {
const width = 1600, height = 1040;
const option = { headless: false, slowMo: true, args: [`--window-size=${width},${height}`] };
const browser = await puppeteer.launch(option);
const page = await browser.newPage();
const vp = {width: width, height: height};
await page.setViewport(vp);
const navigationPromise = page.waitForNavigation();
await page.goto('https://shopping.naver.com/home/p/index.nhn');
await navigationPromise;
await page.waitFor(2000);
const textBoxId = 'co_srh_input';
await page.type('.' + textBoxId, '양말', {delay: 100});
await page.keyboard.press('Enter');
await page.waitFor(5000);
await page.waitForSelector('div.info > a.tit');
const stores = await page.evaluate(() => {
const links = Array.from(document.querySelectorAll('div.info > a.tit'));
return links.map(link => link.innerText).slice(0, 10) // 10개 제품만 가져오기
});
console.log(stores);
await browser.close();
})();
I have a question. How can I output the crawled results to an HTML document (without using the database)? Please use sample code to explain it.
I used what was seen on blog.kowalczyk.info
const puppeteer = require("puppeteer");
const fs = require("fs");
async function run() {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto("https://www.google.com/", { waitUntil: "networkidle2" });
// hacky defensive move but I don't know a better way:
// wait a bit so that the browser finishes executing JavaScript
await page.waitFor(1 * 1000);
const html = await page.content();
fs.writeFileSync("index.html", html);
await browser.close();
}
run();
fs.writeFile()
You can use the following write_file function that returns a Promise that resolves or rejects when fs.writeFile() succeeds or fails.
Then, you can await the Promise from within your anonymous, asynchronous function and check whether or not the data was written to the file:
'use strict';
const fs = require('fs');
const puppeteer = require('puppeteer');
const write_file = (file, data) => new Promise((resolve, reject) => {
fs.writeFile(file, data, 'utf8', error => {
if (error) {
console.error(error);
reject(false);
} else {
resolve(true);
}
});
});
(async () => {
// ...
const stores = await page.evaluate(() => {
return Array.from(document.querySelectorAll('div.info > a.tit'), link => link.innerText).slice(0, 10); // 10개 제품만 가져오기
});
if (await write_file('example.html', stores.toString()) === false) {
console.error('Error: Unable to write stores to example.html.');
}
// ...
});

How can I launch a single process of puppeteer.launch() and just send pages to it in Node?

The following code runs on every one of my requests and I'm afraid that it's trying to launch the browser every time and causing server issues on Heroku. I want to launch puppeteer like a Singleton instance where I only launch it once and then after that my requests will just trigger browser.newPage(). I'm not experienced in JS to resolve this.
(async () => {
const browser = await puppeteer.launch({ headless: true});
const page = await browser.newPage();
await page.on('response', interceptedResponse =>{
let status = interceptedResponse.status();
interceptedResponse.text()
.then((text) => {
handleResponse(text)
browser.close();
})
.catch(err => {
console.error(`interceptedResponse error: ${err}`)
browser.close();
});
});
await page.goto(url);
})();
You can create a class handling this for you. It may not be "official singleton" but id does what you want:
checkout browser.js:
var puppeteer = require('puppeteer')
class PuppeteerApi {
browser = null
constructor(config) {
this.config = config
}
setConfig(config) {
this.config = config
}
async newBrowser() {
return await puppeteer.launch(this.config)
}
async getBrowser() {
if (!this.browser) {
this.browser = await this.newBrowser()
}
return this.browser
}
async newPage() {
const browser = await this.getBrowser()
const page = await browser.newPage()
return page
}
async handBack(page) {
// close the page or even reuse it?.
await page.close()
// you could add logic for closing the whole browser instance depending what
// you want.
}
async shutdown() {
await this.browser.close()
}
}
const config = {
headless: false
}
const browserApi = new PuppeteerApi(config)
export default browserApi
// use it like:
// import and set config once!.
var browserApi = require('./browser.js')
const config = { headless: true }
browserApi.setConfig(config)
// in an request handler you could do this:
(async () => {
var page = await browserApi.newPage()
// do some stuff..
// in the end hand the page back for eitehr closing it
// or maybe putting it in a pool? .
await browser.handBack(page)
})()
I do not know the behaviour of puppeteer when for example 30 pages would be opened. Here would be an example which could open a given amount of browser instances in parallel.
var puppeteer = require('puppeteer')
class PuppeteerApi {
browsers = []
index = 0
constructor(browserLimit, config) {
this.config = config
this.browserLimit = browserLimit
if (typeof this.browserLimit !== 'number' || this.browserLimit < 1) {
throw 'BrowserLimit needs atleast to be 1!!'
}
}
setConfig(config) {
this.config = config
}
async newBrowser() {
return await puppeteer.launch(this.config)
}
async getBrowser() {
if (this.index >= this.browserLimit) {
this.index = 0
}
if (!this.browsers[this.index]) {
this.browsers[this.index] = await this.newBrowser()
}
// iterate through browsers.
return this.browsers[this.index++]
}
async newPage() {
const browser = await this.getBrowser()
const page = await browser.newPage()
return page
}
async handBack(page) {
await page.close()
}
async shutdown() {
const proms = this.browsers.map(b => b.close())
await Promise.all(proms)
}
}
const config = {
headless: false
}
const limit = 5
const browserApi = new PuppeteerApi(limit, config)
export default browserApi
If you like a functional style (which is less code), it is fastly to adapt. Here is the first example:
var puppeteer = require('puppeteer')
let browser = null
let config = {
headless: false
}
const newBrowser = async() => {
return await puppeteer.launch(this.config)
}
export const setPuppeteerConfig = (_config) => {
config = _config
}
export const getPage = async() => {
const browser = await getBrowser()
return await browser.newPage()
}
const getBrowser = async() => {
if (!browser) {
browser = await newBrowser()
}
return browser
}
export const handback = async(page) => {
await page.close()
}
export const shutdown = async() => {
await browser.close()
}
// usage:
const { setPuppeteerConfig , shutdown, getPage, handback } = require('./browser')
// setconfig..
(async () => {
const page = await getPage()
// do some stuff..
await handback(page)
})
Feel free to leave a comment if anything is not working as indendet.

Scraping with puppeteer and returning JSON

I'm trying to create a node app that requires a URL from the user, the URL is then passed to scrape.js and using puppeteer, scrapes certain fields, and then passes the data back to app.js in a json format (so that I can then upset it into a doc). But what I receive is the entire ServerResponse and not the data in a json format as I'm intending.
I was hoping someone with more experience could shed some light. Here is what I have so far:
// app.js
const scrape = require('./scrape');
const router = express.Router();
router.get( '/', ( req, res ) => {
const url = req.body.url;
const item = new Promise((resolve, reject) => {
scrape
.scrapeData()
.then((data) => res.json(data))
.catch(err => reject('Scraping failed...'))
})
});
// scrape.js
const puppeteer = require('puppeteer');
const scrapeData = async () => {
const browser = await puppeteer.launch({ headless: true });
const page = await browser.newPage();
await page.setViewport({ width: 360, height: 640 });
await page.goto(url);
let scrapedData = await page.evaluate(() => {
let scrapedDetails = [];
let elements = document.querySelectorAll('#a-page');
elements.forEach(element => {
let detailsJson = {};
try {
detailsJson.title = element.querySelector('h1#title').innerText;
detailsJson.desc = element.querySelector('#description_box').innerText;
} catch (exception) {}
scrapedDetails.push(detailsJson);
});
return scrapedDetails;
}));
// console.dir(scrapeData) - logs the data successfully.
};
module.exports.scrapeData = scrapeData
You have a naming problem. scrape.js is exporting the scrapeData function. Inside that function, you declared a scrapedData variable, which is not the same thing.
Where you put a:
console.dir(scrapeData) - logs the data successfully.
Add
return scrapeData;
That should solve your issue.

Using puppeteer on an actual API

I'm sorry for the long question, I'm a newbie at node but I have made a CRUD API before with authentication and everything, I just need to understand how to integrate puppeteer to the API, so let me begin:
This is my project structure:
puppeteer-api
controllers - puppeteer.controller.js
routes - puppeteer.routes.js
index.js
This is my index.js file:
const puppeteer = require('puppeteer');
const express = require('express');
const booking = require('./routes/puppeteer.routes')
const app = express();
app.use('/booking', booking);
let port = 8080;
app.listen(port, () => {
console.log('Server is running on https://localhost:8080/');
});
puppeteer.routes.js:
const express = require('express');
const router = express.Router();
const puppeteer_controller = require('../controllers/puppeteer.controller');
router.get('/', puppeteer_controller.get_booking);
module.exports = router;
puppeteer.controller.js:
const puppeteer = require('puppeteer');
exports.get_booking = (req, res, next) => {
res.json = (async function main() {
try {
const browser = await puppeteer.launch({ headless: true});
const page = await browser.newPage();
await page.goto('https://www.booking.com/searchresults.es-ar.html?label=gen173nr-1DCAEoggI46AdIM1gEaAyIAQGYASy4ARfIAQzYAQPoAQGIAgGoAgM&lang=es-ar&sid=bc11c3e819d105b3c501d0c7a501c718&sb=1&src=index&src_elem=sb&error_url=https%3A%2F%2Fwww.booking.com%2Findex.es-ar.html%3Flabel%3Dgen173nr-1DCAEoggI46AdIM1gEaAyIAQGYASy4ARfIAQzYAQPoAQGIAgGoAgM%3Bsid%3Dbc11c3e819d105b3c501d0c7a501c718%3Bsb_price_type%3Dtotal%26%3B&ss=El+Bols%C3%B3n%2C+R%C3%ADo+Negro%2C+Argentina&is_ski_area=&checkin_year=&checkin_month=&checkout_year=&checkout_month=&no_rooms=1&group_adults=2&group_children=0&b_h4u_keep_filters=&from_sf=1&ss_raw=el+bols&ac_position=0&ac_langcode=es&ac_click_type=b&dest_id=-985282&dest_type=city&place_id_lat=-41.964452&place_id_lon=-71.532732&search_pageview_id=06d48fb6823e00e9&search_selected=true&search_pageview_id=06d48fb6823e00e9&ac_suggestion_list_length=5&ac_suggestion_theme_list_length=0');
await page.waitForSelector('.sr_item');
page.on('console', consoleObj => console.log(consoleObj.text()));
console.log('Retrieving hotels data');
const hoteles = page.evaluate(() => {
let hoteles = [];
let x = document.getElementsByClassName('sr_item');
hoteles.push(x);
let navigation = document.getElementsByClassName('sr_pagination_item');
for (const nav of navigation) {
nav.click();
hoteles.push(document.getElementsByClassName('sr_item'));
}
console.log('Finished looping through');
return hoteles;
});
} catch(e) {
console.log('error', e);
}
})();
};
So, what I want is to be able to send a GET request from my app and get a response from my API with a list of hotels from booking, it's just a personal project, the thing is, using Postman I'm sending the GET request but I get no response at all, so I'm wondering what I'm doing wrong and what direction to follow, if anyone would be able to point me in the right direction I would be so grateful.
The block ({})() runs your code instantly instead of on a request.
res.json is a function, you should not reassign this.
Instead, move the function somewhere else and call it like below,
async function scraper() {
try {
const browser = await puppeteer.launch({ headless: true});
const page = await browser.newPage();
await page.goto('https://www.booking.com/searchresults.es-ar.html?label=gen173nr-1DCAEoggI46AdIM1gEaAyIAQGYASy4ARfIAQzYAQPoAQGIAgGoAgM&lang=es-ar&sid=bc11c3e819d105b3c501d0c7a501c718&sb=1&src=index&src_elem=sb&error_url=https%3A%2F%2Fwww.booking.com%2Findex.es-ar.html%3Flabel%3Dgen173nr-1DCAEoggI46AdIM1gEaAyIAQGYASy4ARfIAQzYAQPoAQGIAgGoAgM%3Bsid%3Dbc11c3e819d105b3c501d0c7a501c718%3Bsb_price_type%3Dtotal%26%3B&ss=El+Bols%C3%B3n%2C+R%C3%ADo+Negro%2C+Argentina&is_ski_area=&checkin_year=&checkin_month=&checkout_year=&checkout_month=&no_rooms=1&group_adults=2&group_children=0&b_h4u_keep_filters=&from_sf=1&ss_raw=el+bols&ac_position=0&ac_langcode=es&ac_click_type=b&dest_id=-985282&dest_type=city&place_id_lat=-41.964452&place_id_lon=-71.532732&search_pageview_id=06d48fb6823e00e9&search_selected=true&search_pageview_id=06d48fb6823e00e9&ac_suggestion_list_length=5&ac_suggestion_theme_list_length=0');
await page.waitForSelector('.sr_item');
page.on('console', consoleObj => console.log(consoleObj.text()));
console.log('Retrieving hotels data');
const hoteles = page.evaluate(() => {
let hoteles = [];
let x = document.getElementsByClassName('sr_item');
hoteles.push(x);
let navigation = document.getElementsByClassName('sr_pagination_item');
for (const nav of navigation) {
nav.click();
hoteles.push(document.getElementsByClassName('sr_item'));
}
console.log('Finished looping through');
return hoteles;
});
} catch(e) {
console.log('error', e);
}
}
// Call the scraper
exports.get_booking = async (req, res, next) => {
const scraperData = await scraper();
res.json(scraperData)
}
It will make the controller into a promise and return the JSON data.

How to print an HTML document using Puppeteer?

Recently I started to crawl the web using Puppeteer. Below is a code for extracting a specific product name from the shopping mall.
const puppeteer = require('puppeteer');
(async () => {
const width = 1600, height = 1040;
const option = { headless: false, slowMo: true, args: [`--window-size=${width},${height}`] };
const browser = await puppeteer.launch(option);
const page = await browser.newPage();
const vp = {width: width, height: height};
await page.setViewport(vp);
const navigationPromise = page.waitForNavigation();
await page.goto('https://shopping.naver.com/home/p/index.nhn');
await navigationPromise;
await page.waitFor(2000);
const textBoxId = 'co_srh_input';
await page.type('.' + textBoxId, '양말', {delay: 100});
await page.keyboard.press('Enter');
await page.waitFor(5000);
await page.waitForSelector('div.info > a.tit');
const stores = await page.evaluate(() => {
const links = Array.from(document.querySelectorAll('div.info > a.tit'));
return links.map(link => link.innerText).slice(0, 10) // 10개 제품만 가져오기
});
console.log(stores);
await browser.close();
})();
I have a question. How can I output the crawled results to an HTML document (without using the database)? Please use sample code to explain it.
I used what was seen on blog.kowalczyk.info
const puppeteer = require("puppeteer");
const fs = require("fs");
async function run() {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto("https://www.google.com/", { waitUntil: "networkidle2" });
// hacky defensive move but I don't know a better way:
// wait a bit so that the browser finishes executing JavaScript
await page.waitFor(1 * 1000);
const html = await page.content();
fs.writeFileSync("index.html", html);
await browser.close();
}
run();
fs.writeFile()
You can use the following write_file function that returns a Promise that resolves or rejects when fs.writeFile() succeeds or fails.
Then, you can await the Promise from within your anonymous, asynchronous function and check whether or not the data was written to the file:
'use strict';
const fs = require('fs');
const puppeteer = require('puppeteer');
const write_file = (file, data) => new Promise((resolve, reject) => {
fs.writeFile(file, data, 'utf8', error => {
if (error) {
console.error(error);
reject(false);
} else {
resolve(true);
}
});
});
(async () => {
// ...
const stores = await page.evaluate(() => {
return Array.from(document.querySelectorAll('div.info > a.tit'), link => link.innerText).slice(0, 10); // 10개 제품만 가져오기
});
if (await write_file('example.html', stores.toString()) === false) {
console.error('Error: Unable to write stores to example.html.');
}
// ...
});

Categories