I am trying to scrap data from this website immobilienscout24.de using puppeteer. I think it is required to keep session data to navigate different pages on the site. following is my code sometime some pages are not loaded and are detected my requests as robot requests.
Please see the code and help me with session management when web scraping using puppeteer.
const puppeteer = require('puppeteer-extra')
const storage = require('node-persist');
const StealthPlugin = require('puppeteer-extra-plugin-stealth')
puppeteer.use(StealthPlugin())
const cheerio = require('cheerio')
const pretty = require("pretty");
puppeteer.launch({
headless: false,
args: ["--disable-setuid-sandbox"],
'ignoreHTTPSErrors': true,
executablePath: '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
userDataDir: '/Users/username/Library/Application Support/Google/Chrome/Default'
}).then(async browser => {
const page = await browser.newPage()
const baseURL = 'https://www.immobilienscout24.de'
for(var p=1; p <= 10; p++) {
await page.goto("https://www.immobilienscout24.de/Suche/de/neubauwohnung-mieten?pagenumber="+p,{
waitUntil: "load"
})
const client = await page.target().createCDPSession();
const cookies = (await client.send('Network.getAllCookies')).cookies;
await page.setCookie(...cookies);
const localStorage = await page.evaluate(() => Object.assign({}, window.localStorage))
const html = await page.content();
const $ = cheerio.load(html);
const tiles = $('.result-list__listing');
tiles.map( async (i, item) => {
let link = $(item).find('a.result-list-entry__brand-title-container').attr('href');
if (link.includes("expose")) {
link = baseURL+link
}
console.log(link)
});
await page.waitForTimeout(10000)
}
await browser.close()
})
You're making 10 request at the same time, because you're using a traditional loop:
for(var p=1; p <= 10; p++)
So the website properly had something like rate-limit to prevent ddos attack, that's why you're detected as bot.
With ES6, you can request 10 times but in a sequence like this:
for (let p of [...Array(10).keys()] ){
// execute your request here
}
Hope it help!
I've provided the code below, can you tell me why I would get this error ? I am trying to web-scrape some information from one website to put on a website I am creating, I already have permission to do so. The information I am trying to web-scrape is the name of the event, the time of the event, the location of the event, and the description of the event... I seen this tutorial on YouTube, but for some reason I get this error running ming.
sync function scrapeProduct(url){
const browser = await puppeteer.launch();
const page = await browser.newPage();
page.goto(url);
const [el] = await page.$x('//*[#id="calendar-events-day"]/ul/li[1]/h3/a');
const txt = await el.getProperty('textContent')
const rawTxt = await txt.jsonValue();
const [el1] = await page.$x('//*[#id="calendar-events-day"]/ul/li[1]/time[1]/span[2]');
const txt1 = await el1.getProperty('textContent')
const rawTxt1 = await txt1.jsonValue();
console.log({rawTxt, rawTxt1});
browser.close();
}
scrapeProduct('https://events.ucf.edu');
I'm doing some scraping after receiving html from an api. I'd like to do the following:
Open html page in chrome so I can find selectors in the console.
Immediately load the same html page into a jsdom instance
Drop into the repl - I can then find the right selectors in the console and test them out in a live jsdom environment to see if they work.
For 1, I have:
async function openHtml(htmlString) {
const browser = await puppeteer.launch({headless: false});
const page = await browser.newPage();
await page.setContent(htmlString);
return;
// await browser.close();
}
The code provided with the api is:
var req = http.request(options, function (res) {
var chunks = [];
res.on("data", function (chunk) {
chunks.push(chunk);
});
res.on("end", function () {
var body = Buffer.concat(chunks);
response = JSON.parse(body); //response.content = html, response.cookies = cookies
const dom = new JSDOM(response.content);
console.log(dom.window.document.querySelector("p").textContent); // "Hello world"
openHtml(response.content);
console.log('hi');
});
});
req.end();
If I run the code at the command line the browser opens as expected. However, if I set a breakpoint at:
console.log('hi');
It does not. How can I get this working?
openHtml is an async function. So you'll have to set the method calling in await (promise) and main function to async as well.
var req = http.request(options, function (res) {
var chunks = []
res.on('data', function (chunk) {
chunks.push(chunk)
})
res.on('end', async function () {
var body = Buffer.concat(chunks)
response = JSON.parse(body) //response.content = html, response.cookies = cookies
const dom = new JSDOM(response.content)
console.log(dom.window.document.querySelector('p').textContent) // 'Hello world'
await openHtml(response.content)
console.log('hi')
})
})
req.end()
I am very new to puppeteer (I started today). I have some code that is working the way that I want it to except for an issue that I think is making it extremely inefficient. I have a function that links me through potentially thousands of urls that have incremental IDs to pull the name, position, and stats of each player and then inserts that data into a neDB database. Here is my code:
const puppeteer = require('puppeteer');
const Datastore = require('nedb');
const database = new Datastore('database.db');
database.loadDatabase();
async function scrapeProduct(url, id){
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto(url);
let attributes = [];
const [name] = await page.$x('//*[#id="ctl00_ctl00_ctl00_Main_Main_name"]');
const txt = await name.getProperty('innerText');
const playerName = await txt.jsonValue();
attributes.push(playerName);
//Make sure that there is a legitimate player profile before trying to pull a bunch of 'undefined' information.
if(playerName){
const [role] = await page.$x('//*[#id="ctl00_ctl00_ctl00_Main_Main_position"]');
const roleTxt = await role.getProperty('innerText');
const playerRole = await roleTxt.jsonValue();
attributes.push(playerRole);
//Loop through the 12 attributes and pull their values.
for(let i = 1; i < 13; i++){
let vLink = '//*[#id="ctl00_ctl00_ctl00_Main_Main_SectionTabBox"]/div/div/div/div[1]/table/tbody/tr['+i+']/td[2]';
const [e1] = await page.$x(vLink);
const val = await e1.getProperty('innerText');
const skillVal = await val.jsonValue();
attributes.push(skillVal);
}
//Create a player profile to be pushed into the database. (I realize this is very wordy and ugly code)
let player = {
Name: attributes[0],
Role: attributes[1],
Athleticism: attributes[2],
Speed: attributes[3],
Durability: attributes[4],
Work_Ethic: attributes[5],
Stamina: attributes[6],
Strength: attributes[7],
Blocking: attributes[8],
Tackling: attributes[9],
Hands: attributes[10],
Game_Instinct: attributes[11],
Elusiveness: attributes[12],
Technique: attributes[13],
_id: id,
};
database.insert(player);
console.log('player #' + id + " scraped.");
await browser.close();
} else {
console.log("Blank profile");
await browser.close();
}
}
//Making sure the first URL is scraped before moving on to the next URL. (i removed the URL because its unreasonably long and is not important for this part).
(async () => {
for(let i = 0; i <= 1000; i++){
let link = 'https://url.com/Ratings.aspx?rid='+i+'§ion=Ratings';
await scrapeProduct(link, i);
}
})();
What I think is making this so inefficient is the fact that everytime scrapeProduct() is called, i create a new browser and create a new page. Instead I believe it would be more efficient to create 1 browser and 1 page and just change the pages URL with
await page.goto(url)
I believe that in order to do what I'm trying to accomplish here, i need to move:
const browser = await puppeteer.launch();
const page = await browser.newPage();
outside of my scrapeProduct() function but i cannot seem to get this to work. Anytime I try i get an error in my function saying that page is not defined. I am very new to puppeteer (started today), I would appreciate any guidance on how to accomplish this. Thank you very much!
TL;DR
How do i create 1 Browser instance and 1 Page instance that a function can use repeatedly by only changing the await page.goto(url) function.
About a year ago I tried to a make an React Native Pokemon Go helper app. Since there wasn't an api for pokemon nest and pokestops I created a server that scraped thesilphroad.com and I found the need to implement something like #Arkan said.
I wanted the server to be able to take multiple request, so I decided to initialize the browser when the server is booted up. When a request is received, the server checks to see if MAX_TABS have been reached. If reached, it waits, if not a new tab is opened and the scrape is performed
Here's the scraper.js
const puppeteer = require ('puppeteer')
const fs = require('fs')
const Page = require('./Page')
const exec = require('child_process').exec
const execSync = require('util').promisify(exec)
module.exports = class scraper {
constructor(){
this.browser = null
this.getPages = null
this.getTotalPages = null
this.isRunning = false
//browser permissions
this.permissions = ['geolocation']
this.MAX_TABS = 5
//when puppeteer launches
this.useFirstTab = true
}
async init(config={}){
let headless = config.headless != undefined ? config.headless : true
this.permissions = this.permissions.concat(config.permissions || [])
//get local chromium location
let browserPath = await getBrowserPath('firefox') || await getBrowserPath('chrome')
this.browser = await puppeteer.launch({
headless:headless,
executablePath:browserPath,
defaultViewport:null,
args:[
'--start-maximized',
]
})
this.getPages = this.browser.pages
this.getTotalPages = ()=>{
return this.getPages().then(pages=>pages.length).catch(err=>0)
}
this.isRunning = true
}
async waitForTab(){
let time = Date.now()
let cycles = 1
await new Promise(resolve=>{
let interval = setInterval(async()=>{
let totalPages = await this.getTotalPages()
if(totalPages < this.MAX_TABS){
clearInterval(interval)
resolve()
}
if(Date.now() - time > 100)
console.log('Waiting...')
if(Date.now() - time > 20*1000){
console.log('... ...\n'.repeat(cycle)+'Still waiting...')
cycle++
time = Date.now()
}
},500)
})
}
//open new tab and go to page
async openPage(url,waitSelector,lat,long){
await this.waitForTab()
let pg
//puppeteer launches with a blank tab, use this
// if(this.useFirstTab){
// let pages = await this.browser.pages()
// pg = pages.pop()
// this.useFirstTab = false
// }
// else
pg = await this.browser.newPage()
if(lat && long){
await this.setPermissions(url)
}
let page = await new Page()
await page.init(pg,url,waitSelector,lat,long)
return page
}
async setPermissions(url){
const context = this.browser.defaultBrowserContext();
await context.overridePermissions(url,this.permissions)
}
}
// assumes that the browser is in path
async function getBrowserPath(browserName){
return execSync('command -v chromium').then(({stdout,stderr})=>{
if(stdout.includes('not found'))
return null
return stdout
}).catch(err=>null)
}
The scraper imports Page.js, which is just wrapper for a puppeteer Page object with the functions I used most made available
const path = require('path')
const fs = require('fs')
const userAgents = require('./staticData/userAgents.json')
const cookiesPath = path.normalize('./cookies.json')
// a wrapper for a puppeteer page with pre-made functions
module.exports = class Page{
constuctor(useCookies=false){
this.page = null
this.useCookies = useCookies
this.previousSession = this.useCookies && fs.existsSync(cookiesPath)
}
async close (){
await this.page.close()
}
async init(page,url,waitSelector,lat,long){
this.page = page
let userAgent = userAgents[Math.floor(Math.random()*userAgents.length)]
await this.page.setUserAgent(userAgent)
await this.restoredSession()
if(lat && long)
await this.page.setGeolocation({
latitude: lat || 59.95, longitude:long || 30.31667, accuracy:40
})
await this.page.goto(url)
await this.wait(waitSelector)
}
async screenshotElement(selector='body',directory='./screenshots',padding=0,offset={}) {
const rect = await this.page.evaluate(selector => {
const el = document.querySelector(selector)
const {x, y, width, height} = el.getBoundingClientRect()
return {
left: x,
top: y,
width,
height,
id: el.id
}
}, selector)
let ext = 'jpeg'
let filename = path.normalize(directory+'/'+Date.now())
return await this.page.screenshot({
type:ext,
path:filename+' - '+selector.substring(5)+'.'+ext,
clip: {
x: rect.left - padding+(offset.left || 0),
y: rect.top - padding+(offset.right || 0),
width: rect.width + padding * 2+(offset.width||0),
height: rect.height + padding * 2+ (offset.height||0)
},
encoding:'base64'
})
}
async restoredSession(){
if(!this.previousSession)
return false
let cookies = require(cookiesPath)
for(let cookie of cookies){
await this.page.setCookie(cookie)
}
console.log('Loaded previous session')
return true
}
async saveSession(){
//write cookie to file
if(!this.useCookies)
return
const cookies = await this.page.cookies()
fs.writeFileSync(cookiesPath,JSON.stringify(cookies,null,2))
console.log('Wrote cookies to file')
}
//wait for text input elment and type text
async type(selector,text,options={delay:150}){
await this.wait(selector)
await this.page.type(selector,text,options)
}
//click and waits
async click(clickSelector,waitSelector=500){
await this.page.click(clickSelector)
await this.wait(waitSelector)
}
//hovers over element and waits
async hover(selector,waitSelector=500){
await this.page.hover(selector)
await this.wait(1000)
await this.wait(waitSelector)
}
//waits and suppresses timeout errors
async wait(selector=500, waitForNav=false){
try{
//waitForNav is a puppeteer's waitForNavigation function
//which for me does nothing but timeouts after 30s
waitForNav && await this.page.waitForNavigation()
await this.page.waitFor(selector)
} catch (err){
//print everything but timeout errors
if(err.name != 'Timeout Error'){
console.log('error name:',err.name)
console.log(err)
console.log('- - - '.repeat(4))
}
this.close()
}
}
}
``
To achieve this, you'll just need to separate the browser from your requests, like in a class, for example:
class PuppeteerScraper {
async launch(options = {}) {
this.browser = await puppeteer.launch(options);
// you could reuse the page instance if it was defined here
}
/**
* Pass the address and the function that will scrape your data,
* in order to mantain the page inside this object
*/
async goto(url, callback) {
const page = await this.browser.newPage();
await page.goto(url);
/**evaluate its content */
await callback(page);
await page.close();
}
async close() {
await this.browser.close();
}
}
and, to implement it:
/**
* scrape function, takes the page instance as its parameters
*/
async function evaluate_page(page) {
const titles = await page.$$eval('.col-xs-6 .star-rating ~ h3 a', (itens) => {
const text_titles = [];
for (const item of itens) {
if (item && item.textContent) {
text_titles.push(item.textContent);
}
}
return text_titles;
});
console.log('titles', titles);
}
(async () => {
const scraper = new PuppeteerScraper();
await scraper.launch({ headless: false });
for (let i = 1; i <= 6; i++) {
let link = `https://books.toscrape.com/catalogue/page-${i}.html`;
await scraper.goto(link, evaluate_page);
}
scraper.close();
})();
altho, if you want something more complex, you could take a look how they done at Apify project.
I am trying to iterate over unique youtube video links to get screenshot.
After debugging, I noticed for the forloop below, JS spawn 2 process threads, 1 for each index i . The processALink() function in the second thread seems to start before the processALink() in the first thread has ended fully.
Why is this happening? I thought using async/wait stops this from happening.
The forloop is inside a async function. The code below is just a snippet from the oringinal source code.
for(let i = 0; i<2; i++){
var link = linksArr[i];
var label = labelsArr[i];
await proccessALink(link, label)
}
Function def for processALink()
var proccessALink = async (link,label)=>{
//set download path
var downloadPath = 'data/train/'+label;
//parse the url
var urlToScreenshot = parseUrl(link)
//Give a URL it will take a screen shot
if (validUrl.isWebUri(urlToScreenshot)) {
// console.log('Screenshotting: ' + urlToScreenshot + '&t=' + req.query.t)
console.log('Screenshotting: ' + link)
;(async () => {
//Logic to login to youtube below
//await login();
//go to the url and wait till all the content is loaded.
await page.goto(link, {
waitUntil: 'networkidle'
//waitUntil: 'domcontentloaded'
})
//await page.waitForNavigation();
//Find the video player in the page
const video = await page.$('.html5-video-player')
await page.content();
//Run some command on consoleDev
await page.evaluate(() => {
// Hide youtube player controls.
let dom = document.querySelector('.ytp-chrome-bottom')
if(dom != null){
dom.style.display = 'none'
}
})
await video.screenshot({path: downloadPath});
})()
} else {
res.send('Invalid url: ' + urlToScreenshot)
}
}
Remove the IIFE inside processALink() and it should resolve the issue of running multiple screenshots at the same time.
const proccessALink = async(link, label) => {
//set download path
const downloadPath = 'data/train/' + label;
//parse the url
const urlToScreenshot = parseUrl(link)
//Give a URL it will take a screen shot
if (validUrl.isWebUri(urlToScreenshot)) {
// console.log('Screenshotting: ' + urlToScreenshot + '&t=' + req.query.t)
console.log('Screenshotting: ' + link);
//Logic to login to youtube below
//await login();
//go to the url and wait till all the content is loaded.
await page.goto(link, {
waitUntil: 'networkidle'
//waitUntil: 'domcontentloaded'
})
//await page.waitForNavigation();
//Find the video player in the page
const video = await page.$('.html5-video-player')
await page.content();
//Run some command on consoleDev
await page.evaluate(() => {
// Hide youtube player controls.
let dom = document.querySelector('.ytp-chrome-bottom')
if (dom != null) {
dom.style.display = 'none'
}
})
await video.screenshot({
path: downloadPath
});
} else {
res.send('Invalid url: ' + urlToScreenshot)
}
}