Web scraping a stream chat in real-time (puppeteer.js) - javascript

I want to get the chat from a stream in real time with web scraping.
Attempting to create a while loop inside of the .then() function of puppeeter doesn't seem to be effective, and in some implementations breaks it all together.
I am able to get the initial scrape to happen, but in all cases the program ends and does not want to follow the while loop I implemented.
Working code WITHOUT while loop
const puppeteer = require ('puppeteer');
//initiating Puppeteer
puppeteer
.launch ()
.then (async browser => {
//opening a new page and navigating to the live stream
const page = await browser.newPage ();
await page.goto ('https://www.younow.com/Ken_Nara24');
await page.waitForSelector ('body');
//manipulating the page's content
let getComments = await page.evaluate (() => {
let comments = document.body.querySelectorAll ('.comment');
let scrapeItems = [];
comments.forEach (item => {
let commentAuthor = item.querySelector ('div.user-card__header.mini-profile-launcher').innerText;
let commentContent = '';
try {
commentContent = item.querySelector ('div.user-card__body.ng-star-inserted').innerText;
} catch (err) {}
scrapeItems.push ({
commentAuthor: commentAuthor,
commentContent: commentContent,
});
});
let items = {
"userComments": scrapeItems,
};
return items;
});
//outputting the scraped data
console.log (getComments);
//closing the browser
await browser.close ();
})
//handling any errors
.catch (function (err) {
console.error (err);
});
All attempts to get that logic looping have been for naught. I cannot find a way or past issue/example that clearly defines how or if such a thing could be done. I have made a few attempts to implement it myself, but nothing has even compiled correctly.
Am I missing something significant here? I just want to essentially listen to a web page and every 3-5 seconds re-scrape it.

If you still need help you could give this way a try.
const puppeteer = require("puppeteer");
let pageScraping = false; /* set scraping to false */
const scraper = async () => {
if (pageScraping == true) return; /* check if already scraping page */
let browser, page;
let pageUrl = 'https://www.younow.com/Ken_Nara24';
try {
pageScraping = true; /* set scraping to true */
browser = await puppeteer.launch({ headless: true });
page = await browser.newPage();
await page.goto(pageUrl, { waitUntil: 'domcontentloaded', timeout: 60000 });
/* wait for chat to be visible */
await page.waitForSelector('.chat', { visible: true, timeout: 60000 });
let getComments = await page.evaluate(() => {
let scrapeComments = [];
let comments = document.querySelectorAll('.comment');
comments.forEach(comment => {
let commentContent = '';
let commentAuthor = comment.querySelector('div[class="user-card__header mini-profile-launcher"]').innerText;
commentContent = comment.querySelector('div[class="user-card__body ng-star-inserted"]').innerText;
scrapeComments.push({
'commentAuthor': commentAuthor,
'commentContent': commentContent,
});
});
return { 'userComments': scrapeComments };
});
console.log(await getComments); /* log comments */
} catch (err) {
console.log(err.message);
} finally {
if (browser) { /* check if browser is open befor trying to close */
await browser.close();
console.log('closing browser');
}
pageScraping = false; /* set scraping to false again */
await setTimeout(scraper, 5000); /* wait 5 seconds befor re-scraping */
}
}
setTimeout(scraper, 5000); /* start scraping */

Related

Cheerio selector after page loaded

I want to scrape a url value of iframe in this website: https://lk21online.digital/nonton-profile-2021-subtitle-indonesia/
When i search iframe from view page source its not found, i think iframe is loaded after page loaded by javascript
Or my selector is wrong?
Please somebody help me to check my selector or what i need to do for my code
Sorry for my poor english...
There is my code:
async function getDetail(res, url) {
try {
const html = await scraping(res, url)
const $ = cheerio.load(html)
const article = $('#site-container #content .gmr-maincontent #primary #main .gmr-box-content #muvipro_player_content_id #player1-tab-content')
let result = []
setTimeout(() => {
article.each(function () {
const title = $(this).find('.item-article h2').text()
const watch = $(this).find('iframe').attr('src')
result.push({
title,
watch,
})
})
res.json({ result })
}, 5000)
}
catch (err) {
console.log(err)
}
}
this is video iframe
You can't use cheerio for this. Cheerio is not dynamic and just loads whatever html is coming back from the request.
Looking at your webpage, most content is loaded async, so the initial html will be pretty empty.
In addition the video source is lazily loaded when it enters the browser window. So you have to use an actual headless browser to accomplish the task. Here's an example:
// iframeUrl.js
const puppeteer = require("puppeteer");
(async () => {
const browser = await puppeteer.launch();
const page = await browser.newPage();
// Goto page
await page.goto("https://lk21online.digital/nonton-profile-2021-subtitle-indonesia/");
// Scroll down
page.evaluate((_) => window.scrollBy(0, 1000));
// Wait a bit
await new Promise((resolve) => setTimeout(resolve, 5000));
// Get the src of the iframe
const iframeUrl = await page.evaluate(`$("#player1-tab-content iframe").attr("src")`);
console.log(iframeUrl);
await browser.close();
process.exit(0);
})();

Initializing a Puppeteer Browser Outside of Scraping Function

I am very new to puppeteer (I started today). I have some code that is working the way that I want it to except for an issue that I think is making it extremely inefficient. I have a function that links me through potentially thousands of urls that have incremental IDs to pull the name, position, and stats of each player and then inserts that data into a neDB database. Here is my code:
const puppeteer = require('puppeteer');
const Datastore = require('nedb');
const database = new Datastore('database.db');
database.loadDatabase();
async function scrapeProduct(url, id){
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto(url);
let attributes = [];
const [name] = await page.$x('//*[#id="ctl00_ctl00_ctl00_Main_Main_name"]');
const txt = await name.getProperty('innerText');
const playerName = await txt.jsonValue();
attributes.push(playerName);
//Make sure that there is a legitimate player profile before trying to pull a bunch of 'undefined' information.
if(playerName){
const [role] = await page.$x('//*[#id="ctl00_ctl00_ctl00_Main_Main_position"]');
const roleTxt = await role.getProperty('innerText');
const playerRole = await roleTxt.jsonValue();
attributes.push(playerRole);
//Loop through the 12 attributes and pull their values.
for(let i = 1; i < 13; i++){
let vLink = '//*[#id="ctl00_ctl00_ctl00_Main_Main_SectionTabBox"]/div/div/div/div[1]/table/tbody/tr['+i+']/td[2]';
const [e1] = await page.$x(vLink);
const val = await e1.getProperty('innerText');
const skillVal = await val.jsonValue();
attributes.push(skillVal);
}
//Create a player profile to be pushed into the database. (I realize this is very wordy and ugly code)
let player = {
Name: attributes[0],
Role: attributes[1],
Athleticism: attributes[2],
Speed: attributes[3],
Durability: attributes[4],
Work_Ethic: attributes[5],
Stamina: attributes[6],
Strength: attributes[7],
Blocking: attributes[8],
Tackling: attributes[9],
Hands: attributes[10],
Game_Instinct: attributes[11],
Elusiveness: attributes[12],
Technique: attributes[13],
_id: id,
};
database.insert(player);
console.log('player #' + id + " scraped.");
await browser.close();
} else {
console.log("Blank profile");
await browser.close();
}
}
//Making sure the first URL is scraped before moving on to the next URL. (i removed the URL because its unreasonably long and is not important for this part).
(async () => {
for(let i = 0; i <= 1000; i++){
let link = 'https://url.com/Ratings.aspx?rid='+i+'&section=Ratings';
await scrapeProduct(link, i);
}
})();
What I think is making this so inefficient is the fact that everytime scrapeProduct() is called, i create a new browser and create a new page. Instead I believe it would be more efficient to create 1 browser and 1 page and just change the pages URL with
await page.goto(url)
I believe that in order to do what I'm trying to accomplish here, i need to move:
const browser = await puppeteer.launch();
const page = await browser.newPage();
outside of my scrapeProduct() function but i cannot seem to get this to work. Anytime I try i get an error in my function saying that page is not defined. I am very new to puppeteer (started today), I would appreciate any guidance on how to accomplish this. Thank you very much!
TL;DR
How do i create 1 Browser instance and 1 Page instance that a function can use repeatedly by only changing the await page.goto(url) function.
About a year ago I tried to a make an React Native Pokemon Go helper app. Since there wasn't an api for pokemon nest and pokestops I created a server that scraped thesilphroad.com and I found the need to implement something like #Arkan said.
I wanted the server to be able to take multiple request, so I decided to initialize the browser when the server is booted up. When a request is received, the server checks to see if MAX_TABS have been reached. If reached, it waits, if not a new tab is opened and the scrape is performed
Here's the scraper.js
const puppeteer = require ('puppeteer')
const fs = require('fs')
const Page = require('./Page')
const exec = require('child_process').exec
const execSync = require('util').promisify(exec)
module.exports = class scraper {
constructor(){
this.browser = null
this.getPages = null
this.getTotalPages = null
this.isRunning = false
//browser permissions
this.permissions = ['geolocation']
this.MAX_TABS = 5
//when puppeteer launches
this.useFirstTab = true
}
async init(config={}){
let headless = config.headless != undefined ? config.headless : true
this.permissions = this.permissions.concat(config.permissions || [])
//get local chromium location
let browserPath = await getBrowserPath('firefox') || await getBrowserPath('chrome')
this.browser = await puppeteer.launch({
headless:headless,
executablePath:browserPath,
defaultViewport:null,
args:[
'--start-maximized',
]
})
this.getPages = this.browser.pages
this.getTotalPages = ()=>{
return this.getPages().then(pages=>pages.length).catch(err=>0)
}
this.isRunning = true
}
async waitForTab(){
let time = Date.now()
let cycles = 1
await new Promise(resolve=>{
let interval = setInterval(async()=>{
let totalPages = await this.getTotalPages()
if(totalPages < this.MAX_TABS){
clearInterval(interval)
resolve()
}
if(Date.now() - time > 100)
console.log('Waiting...')
if(Date.now() - time > 20*1000){
console.log('... ...\n'.repeat(cycle)+'Still waiting...')
cycle++
time = Date.now()
}
},500)
})
}
//open new tab and go to page
async openPage(url,waitSelector,lat,long){
await this.waitForTab()
let pg
//puppeteer launches with a blank tab, use this
// if(this.useFirstTab){
// let pages = await this.browser.pages()
// pg = pages.pop()
// this.useFirstTab = false
// }
// else
pg = await this.browser.newPage()
if(lat && long){
await this.setPermissions(url)
}
let page = await new Page()
await page.init(pg,url,waitSelector,lat,long)
return page
}
async setPermissions(url){
const context = this.browser.defaultBrowserContext();
await context.overridePermissions(url,this.permissions)
}
}
// assumes that the browser is in path
async function getBrowserPath(browserName){
return execSync('command -v chromium').then(({stdout,stderr})=>{
if(stdout.includes('not found'))
return null
return stdout
}).catch(err=>null)
}
The scraper imports Page.js, which is just wrapper for a puppeteer Page object with the functions I used most made available
const path = require('path')
const fs = require('fs')
const userAgents = require('./staticData/userAgents.json')
const cookiesPath = path.normalize('./cookies.json')
// a wrapper for a puppeteer page with pre-made functions
module.exports = class Page{
constuctor(useCookies=false){
this.page = null
this.useCookies = useCookies
this.previousSession = this.useCookies && fs.existsSync(cookiesPath)
}
async close (){
await this.page.close()
}
async init(page,url,waitSelector,lat,long){
this.page = page
let userAgent = userAgents[Math.floor(Math.random()*userAgents.length)]
await this.page.setUserAgent(userAgent)
await this.restoredSession()
if(lat && long)
await this.page.setGeolocation({
latitude: lat || 59.95, longitude:long || 30.31667, accuracy:40
})
await this.page.goto(url)
await this.wait(waitSelector)
}
async screenshotElement(selector='body',directory='./screenshots',padding=0,offset={}) {
const rect = await this.page.evaluate(selector => {
const el = document.querySelector(selector)
const {x, y, width, height} = el.getBoundingClientRect()
return {
left: x,
top: y,
width,
height,
id: el.id
}
}, selector)
let ext = 'jpeg'
let filename = path.normalize(directory+'/'+Date.now())
return await this.page.screenshot({
type:ext,
path:filename+' - '+selector.substring(5)+'.'+ext,
clip: {
x: rect.left - padding+(offset.left || 0),
y: rect.top - padding+(offset.right || 0),
width: rect.width + padding * 2+(offset.width||0),
height: rect.height + padding * 2+ (offset.height||0)
},
encoding:'base64'
})
}
async restoredSession(){
if(!this.previousSession)
return false
let cookies = require(cookiesPath)
for(let cookie of cookies){
await this.page.setCookie(cookie)
}
console.log('Loaded previous session')
return true
}
async saveSession(){
//write cookie to file
if(!this.useCookies)
return
const cookies = await this.page.cookies()
fs.writeFileSync(cookiesPath,JSON.stringify(cookies,null,2))
console.log('Wrote cookies to file')
}
//wait for text input elment and type text
async type(selector,text,options={delay:150}){
await this.wait(selector)
await this.page.type(selector,text,options)
}
//click and waits
async click(clickSelector,waitSelector=500){
await this.page.click(clickSelector)
await this.wait(waitSelector)
}
//hovers over element and waits
async hover(selector,waitSelector=500){
await this.page.hover(selector)
await this.wait(1000)
await this.wait(waitSelector)
}
//waits and suppresses timeout errors
async wait(selector=500, waitForNav=false){
try{
//waitForNav is a puppeteer's waitForNavigation function
//which for me does nothing but timeouts after 30s
waitForNav && await this.page.waitForNavigation()
await this.page.waitFor(selector)
} catch (err){
//print everything but timeout errors
if(err.name != 'Timeout Error'){
console.log('error name:',err.name)
console.log(err)
console.log('- - - '.repeat(4))
}
this.close()
}
}
}
``
To achieve this, you'll just need to separate the browser from your requests, like in a class, for example:
class PuppeteerScraper {
async launch(options = {}) {
this.browser = await puppeteer.launch(options);
// you could reuse the page instance if it was defined here
}
/**
* Pass the address and the function that will scrape your data,
* in order to mantain the page inside this object
*/
async goto(url, callback) {
const page = await this.browser.newPage();
await page.goto(url);
/**evaluate its content */
await callback(page);
await page.close();
}
async close() {
await this.browser.close();
}
}
and, to implement it:
/**
* scrape function, takes the page instance as its parameters
*/
async function evaluate_page(page) {
const titles = await page.$$eval('.col-xs-6 .star-rating ~ h3 a', (itens) => {
const text_titles = [];
for (const item of itens) {
if (item && item.textContent) {
text_titles.push(item.textContent);
}
}
return text_titles;
});
console.log('titles', titles);
}
(async () => {
const scraper = new PuppeteerScraper();
await scraper.launch({ headless: false });
for (let i = 1; i <= 6; i++) {
let link = `https://books.toscrape.com/catalogue/page-${i}.html`;
await scraper.goto(link, evaluate_page);
}
scraper.close();
})();
altho, if you want something more complex, you could take a look how they done at Apify project.

puppeteer only scraping around 200 pages and don't continue

for some reason that I don't understand my node app stops scraping after few minutes without any errors while only scraping, btw its an infinity scroll website...
this is the code:
const fs = require('fs');
(async() => {
// start the browser
const browser = await puppeteer.launch({ args: ['--no-sandbox'] });
// open a new page
const page = await browser.newPage();
const pageURL = 'http://www.yad4.co.il/dogs//////////////#1';
try {
// try to go to URL
await page.goto(pageURL);
console.log(`opened the page: ${pageURL}`);
await page.setViewport({
width: 1200,
height: 800
});
await autoScroll(page);
} catch (error) {
console.log(`failed to open the page: ${pageURL} with the error: ${error}`);
}
// Find all links to dogs
const postsSelector = '.yd-search-page .container .row .col-md-9 .yd-gallery .search-handler-yd .col-xs-12 #dogs_more .col-md-4 .yd-dog-img .yd-mask a';
await page.waitForSelector(postsSelector);
const postUrls = await page.$$eval(postsSelector, postLinks => postLinks.map(link => link.href));
// Visit each page one by one
for (let postUrl of postUrls) {
// open the page
try {
await page.goto(postUrl);
console.log('opened the page: ', postUrl);
} catch (error) {
console.log(error);
console.log('failed to open the page: ', postUrl);
}
// get the name of the dog
const dogSelector = '.adopt.yd-amuta .container .yd-dog-cont .col-xs-12 .adopt-head .row .col-sm-6 .adopt-breadcrumb-title h2 span';
// await page.waitForSelector(dogSelector);
const dogName = await page.$eval(dogSelector, dogSelector => dogSelector.innerHTML);
// Writing the news inside a json file
fs.appendFile("dogtest4.json", JSON.stringify({dogName},), function(err) {
if (err) throw err;
console.log("Saved!");
});
}
// all done, close the browser
await browser.close();
async function autoScroll(page){
await page.evaluate(async () => {
await new Promise((resolve, reject) => {
var totalHeight = 0;
var distance = 100;
var timer = setInterval(() => {
var scrollHeight = document.body.scrollHeight;
window.scrollBy(0, distance);
totalHeight += distance;
if(totalHeight >= scrollHeight){
clearInterval(timer);
resolve();
}
}, 100);
});
});
}
process.exit()
})();
so it gives me information but randomly, I mean sometimes it gives me 115 pages sometimes 300 pages and some times barely 90 pages and I don't understand why,
please help me.
Thank you.
I can't comment, but I suppose it could be something to do with memory limit being reached and that slows things down.
you can try adding "await" in front of fs.appendFile(...) like here explained, might work for you

How to split code in multiple files when using a module like that is required for all files?

In this case I'm using puppeteer chrome headless browser and mongoDb. Here's my code:
var browser = await puppeteer.launch()
var page = await browser.newPage()
var db = await MongoClient.connect("mongodb://localhost:27017/bot")
var scraped_users = db.collection("scraped_users")
I want to split my code into relevant files like "chrome.js", "twitter.js", "database,js", "utilities.js". Problem is I need those 4 variables I declared above in pretty much every file and it seems stupid to pass them in every function. I also can't re-declare them into every file because that would open a new chrome browser and establish a new database connection.
Let say you could have file init.js:
/*let isInitialized, browser, page, db, scraped_users
async function init() {
if (!isInitialized) {
browser = await puppeteer.launch()
page = await browser.newPage()
db = await MongoClient.connect("mongodb://localhost:27017/bot")
scraped_users = db.collection("scraped_users")
isInitialized = true
}
return { browser, page, db, scraped_users }
}
module.exports = init*/
let common = {}; //store some global's.
//this should only be called once, eg. when App starts
common.init = async function () {
this.browser = await puppeteer.launch()
this.page = await this.browser.newPage()
this.db = await MongoClient.connect("mongodb://localhost:27017/bot")
this.scraped_users = this.db.collection("scraped_users")
}
module.exports = common;
then use it like:
/*const init = require('./init.js')
init().then(({browser, page, db, scraped_users}) => {
console.log(browser, page, db, scraped_users)
})*/
const common = require('./common');
const doSomething = require('./do-something');
async function run() {
await common.init(); //this only needs to be done once
console.log(common.browser); //etc
await doSomething();
}
//now inside other units, eg. do-something.js
const common = require('./common');
async function doSomething() {
console.log(common.browser);
}

Retrieving JavaScript Rendered HTML with Puppeteer

I am attempting to scrape the html from this NCBI.gov page. I need to include the #see-all URL fragment so that I am guaranteed to get the searchpage instead of retrieving the HTML from an incorrect gene page https://www.ncbi.nlm.nih.gov/gene/119016.
URL fragments are not passed to the server, and are instead used by the javascript of the page client-side to (in this case) create entirely different HTML, which is what you get when you go to the page in a browser and "View page source", which is the HTML I want to retrieve. R readLines() ignores url tags followed by #
I tried using phantomJS first, but it just returned the error described here ReferenceError: Can't find variable: Map, and it seems to result from phantomJS not supporting some feature that NCBI was using, thus eliminating this route to solution.
I had more success with Puppeteer using the following Javascript evaluated with node.js:
const puppeteer = require('puppeteer');
(async() => {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto(
'https://www.ncbi.nlm.nih.gov/gene/?term=AGAP8#see-all');
var HTML = await page.content()
const fs = require('fs');
var ws = fs.createWriteStream(
'TempInterfaceWithChrome.js'
);
ws.write(HTML);
ws.end();
var ws2 = fs.createWriteStream(
'finishedFlag'
);
ws2.end();
browser.close();
})();
however this returned what appeared to be the pre-rendered html. how do I (programmatically) get the final html that I get in browser?
You can try to change this:
await page.goto(
'https://www.ncbi.nlm.nih.gov/gene/?term=AGAP8#see-all');
into this:
await page.goto(
'https://www.ncbi.nlm.nih.gov/gene/?term=AGAP8#see-all', {waitUntil: 'networkidle'});
Or, you can create a function listenFor() to listen to a custom event on page load:
function listenFor(type) {
return page.evaluateOnNewDocument(type => {
document.addEventListener(type, e => {
window.onCustomEvent({type, detail: e.detail});
});
}, type);
}`
await listenFor('custom-event-ready'); // Listen for "custom-event-ready" custom event on page load.
LE:
This also might come in handy:
await page.waitForSelector('h3'); // replace h3 with your selector
Maybe try to wait
await page.waitForNavigation(5);
and after
let html = await page.content();
I had success using the following to get html content that was generated after the page has been loaded.
const browser = await puppeteer.launch();
try {
const page = await browser.newPage();
await page.goto(url);
await page.waitFor(2000);
let html_content = await page.evaluate(el => el.innerHTML, await page.$('.element-class-name'));
console.log(html_content);
} catch (err) {
console.log(err);
}
Hope this helps.
Waiting for network idle was not enough in my case, so I used dom loaded event:
await page.goto(url, {waitUntil: 'domcontentloaded', timeout: 60000} );
const data = await page.content();
Indeed you need innerHTML:
fs.writeFileSync( "test.html", await (await page.$("html")).evaluate( (content => content.innerHTML ) ) );
If you want to actually await a custom event, you can do it this way.
const page = await browser.newPage();
/**
* Attach an event listener to page to capture a custom event on page load/navigation.
* #param {string} type Event name.
* #return {!Promise}
*/
function addListener(type) {
return page.evaluateOnNewDocument(type => {
// here we are in the browser context
document.addEventListener(type, e => {
window.onCustomEvent({ type, detail: e.detail });
});
}, type);
}
const evt = await new Promise(async resolve => {
// Define a window.onCustomEvent function on the page.
await page.exposeFunction('onCustomEvent', e => {
// here we are in the node context
resolve(e); // resolve the outer Promise here so we can await it outside
});
await addListener('app-ready'); // setup listener for "app-ready" custom event on page load
await page.goto('http://example.com'); // N.B! Do not use { waitUntil: 'networkidle0' } as that may cause a race condition
});
console.log(`${evt.type} fired`, evt.detail || '');
Built upon the example at https://github.com/GoogleChrome/puppeteer/blob/master/examples/custom-event.js

Categories