getting dynamic data using python - javascript

I'm new to Python and got interested in writing scripts. I'm currently building a crawler that goes on a page and extract copy from tags. Write now I can only list tags; I'm having trouble getting the text out of tags and I'm not sure why exactly. I'm also using BeautifulSoup and PyQt4 to get dynamic data(this might need a new question).
So based on this code below, I should be getting the "Images" copy from the Google homepage, or at least the span tag itself. I'm getting returned NONE
I tried reading the docs for BeautifulSoup and it was a little overwhelming. I'm still reading it, but I think I keep going down a rabbit hole. I can print all anchor tags or all divs, but targeting a specific one is where I'm struggling.
import urllib
import re
from bs4 import BeautifulSoup, Comment
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *
class Render(QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.loadFinished.connect(self._loadFinished)
self.mainFrame().load(QUrl(url))
self.app.exec_()
def _loadFinished(self, result):
self.frame = self.mainFrame()
self.app.quit()
url = 'http://google.com'
source = urllib.urlopen(url).read()
soup = BeautifulSoup(source, 'html.parser')
js_test = soup.find("a", class_="gb_P")
print js_test

Related

Scraping javascript page with PyQt5 and QWebEngineView

I'm trying to render a javascripted webpage into populated HTML for scraping. Researching different solutions (selenium, reverse-engineering the page etc.) led me to this technique but I can't get it working. BTW I am new to python, basically at the cut/paste/experiment stage. Got past installation and indentation issues but I'm stuck now.
In the test code below, print(sample_html) works and returns the original html of the target page but print(render(sample_html)) always returns the word 'None'.
Interestingly, if you run this on amazon.com they detect it is not a real browser and return html with a warning about automated access. However the other test pages provide true html that should render, except it doesn't.
How do I troubleshoot the result always returning "None'?
def render(source_html):
"""Fully render HTML, JavaScript and all."""
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEngineView
class Render(QWebEngineView):
def __init__(self, html):
self.html = None
self.app = QApplication(sys.argv)
QWebEngineView.__init__(self)
self.loadFinished.connect(self._loadFinished)
self.setHtml(html)
self.app.exec_()
def _loadFinished(self, result):
# This is an async call, you need to wait for this
# to be called before closing the app
self.page().toHtml(self.callable)
def callable(self, data):
self.html = data
# Data has been stored, it's safe to quit the app
self.app.quit()
return Render(source_html).html
import requests
#url = 'http://webscraping.com'
#url='http://www.amazon.com'
url='https://www.ncbi.nlm.nih.gov/nuccore/CP002059.1'
sample_html = requests.get(url).text
print(sample_html)
print(render(sample_html))
EDIT: Thanks for the responses which were incorporated into the code. But now it returns an error and the script hangs until I kill the python launcher which then causes a segfault:
This is the revised code:
def render(source_url):
"""Fully render HTML, JavaScript and all."""
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
class Render(QWebEngineView):
def __init__(self, url):
self.html = None
self.app = QApplication(sys.argv)
QWebEngineView.__init__(self)
self.loadFinished.connect(self._loadFinished)
# self.setHtml(html)
self.load(QUrl(url))
self.app.exec_()
def _loadFinished(self, result):
# This is an async call, you need to wait for this
# to be called before closing the app
self.page().toHtml(self._callable)
def _callable(self, data):
self.html = data
# Data has been stored, it's safe to quit the app
self.app.quit()
return Render(source_url).html
# url = 'http://webscraping.com'
# url='http://www.amazon.com'
url = "https://www.ncbi.nlm.nih.gov/nuccore/CP002059.1"
print(render(url))
Which throws these errors:
$ python3 -tt fees-pkg-v2.py
Traceback (most recent call last):
File "fees-pkg-v2.py", line 30, in _callable
self.html = data
AttributeError: 'method' object has no attribute 'html'
None (hangs here until force-quit python launcher)
Segmentation fault: 11
$
I already started reading up on python classes to fully understand what I'm doing (always a good thing). I'm thinking something in my environment could be the problems (OSX Yosemite, Python 3.4.3, Qt5.4.1, sip-4.16.6). Any other suggestions?
The problem was the environment. I had manually installed Python 3.4.3, Qt5.4.1, and sip-4.16.6 and must have mucked something up. After installing Anaconda, the script started working. Thanks again.

Form scraping using scrapy with Javascript embedded

I am trying to scrape this website. My spider is functional but the website has javascript embedded in the form to get the result, which I can t get through.
I've read about selenium and how I have to include a browser to get through it but I still with how to scrape after the dynamically generated HTML is loaded, while still passing form arguments.
Here is the code for my spider, any help, referrals or code snippets are welcome. I've navigated and read many threads to no avail.
from scrapy.spiders import BaseSpider
from scrapy.http import FormRequest
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from tax1.items import Tax1Item
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from selenium import webdriver
import time
class tax1Spider(BaseSpider):
name = "tax1Spider"
allowed_domains = ["http://taxesejour.impots.gouv.fr"]
start_urls = ["http://taxesejour.impots.gouv.fr/DTS_WEB/UK/"]
def parse(self, response):
yield FormRequest.from_response(response,
formname='PAGE_DELIBV2',
formdata={'A8':'05 - Hautes-alpes',
'A10':'AIGUILLES'},
callback = self.parse1)
rules = (
Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//div[#class="lh0 dzSpan dzA15"]',)), callback="parse1", follow= True))
# lh0 dzSpan dzA15
def __init__(self):
CrawlSpider.__init__(self)
# use any browser you wish
self.browser = webdriver.Firefox()
def __del__(self):
self.browser.close()
def parse1(self, response):
#hxs = HtmlXPathSelector(response)
self.browser.get(response.url)
time.sleep(3)
Selector(text=self.browser.page_source)
items = []
#item = Tax1Item()
item['message'] = hxs.select('//td[#id="tzA18"]').extract()
print item['message']
return item

How to retrieve the exact HTML as in a browser

I'm using a Python script to render web pages and retrieve their HTML's. It works fine with most of the pages, but with some of them the HTML retrieved is incomplete. And I don't quite understand why. This is the script I'm using to scrap this page, for some reason, the link to every product is not in the HTML:
Link: http://www.pullandbear.com/es/es/mujer/vestidos-c29016.html
Python script:
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *
from PyQt4 import QtNetwork
from PyQt4 import QtCore
url = sys.argv[1]
path = sys.argv[2]
class Render(QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.loadFinished.connect(self._loadFinished)
self.request = QtNetwork.QNetworkRequest()
self.request.setUrl(QtCore.QUrl(url))
self.request.setRawHeader("Accept-Language", QtCore.QByteArray ("es ,*"))
self.mainFrame().load(self.request)
self.app.exec_()
def _loadFinished(self, result):
self.frame = self.mainFrame()
self.app.quit()
r = Render(url)
result = r.frame.toHtml()
html_file = open(path, "w")
html_file.write("%s" % result.encode("utf-8"))
html_file.close()
sys.exit(app.exec_())
This code was taken from here: https://impythonist.wordpress.com/2015/01/06/ultimate-guide-for-scraping-javascript-rendered-web-pages/
Am I missing something? What are the limitations of this framework?
Thanks in advance,
If you want headless browsing you can combine phantomjs with selenium, the following gets all the source:
url = "http://www.pullandbear.com/es/es/mujer/vestidos-c29016.html"
from selenium import webdriver
dr = webdriver.PhantomJS()
dr.get(url)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
element = WebDriverWait(dr, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, "grid_itemContainer"))
)
Just using selenium without the WebDriverWait did not always return the full source, adding the wait until the a tags with the grid_itemContainer class were visible makes sure the html has been generated, the xpath below returns all your links:
print([a.get_attribute('href') for a in dr.find_elements_by_xpath("//a[#class='grid_itemContainer']")])
[u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-detalle-crochet-pechera-c29016p100064004.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-bordado-escote-pico-c29016p100123006.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-manga-larga-espalda-abierta-c29016p100147503.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-hombros-descubiertos-beads-c29016p100182001.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-jacquard-capa-c29016p100255505.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-vaquero-eyelets-c29016p100336010.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-liso-oversized-c29016p100289013.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-liso-oversized-c29016p100289013.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-camisero-oversized-c29016p100036616.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-pico-c29016p100166506.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-estampado-rayas-c29016p100234507.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-manga-corta-liso-c29016p100262008.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-largo-cuello-halter-liso-c29016p100036162.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-capa-jacquard-%C3%A9tnico-c29016p100259002.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-largo-cuello-halter-rayas-c29016p100036161.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-capa-jacquard-tri%C3%A1ngulo-c29016p100255506.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-marinero-escote-bardot-c29016p100259003.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-rayas-escote-espalda-c29016p100262007.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cruzado-c29016p100216013.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-flores-canes%C3%BA-bordado-c29016p100203011.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-bordados-c29016p100037160.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-flores-volante-c29016p100216014.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-lencero-c29016p100104515.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuadros-detalle-encaje-c29016p100216016.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-drapeado-abertura-bajo-c29016p100129011.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-drapeado-abertura-bajo-c29016p100129011.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-vaquero-bolsillo-plastr%C3%B3n-c29016p100036822.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-rayas-bajo-desigual-c29016p100123010.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-camisero-vaquero-c29016p100036575.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-midi-estampado-rayas-c29016p100189011.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-midi-rayas-manga-3-4-c29016p100149507.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-midi-canal%C3%A9-ajustado-c29016p100149508.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-estampado-bolsillos-c29016p100212503.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-corte-evas%C3%A9-bolsillos-c29016p100189012.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-vaquero-camisero-cuadros-c29016p100036624.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/pichi-vaquero-c29016p100073526.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-estampado-geom%C3%A9trico-cuello-halter-c29016p100037021.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-perkins-manga-larga-c29016p100036882.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-perkins-manga-larga-c29016p100036882.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-perkins-manga-larga-c29016p100036882.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-perkins-manga-larga-c29016p100036882.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-jacquard-evas%C3%A9-c29016p100037207.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cr%C3%AApe-evas%C3%A9-estampado-flores-manga-3-4-c29016p100036932.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cr%C3%AApe-evas%C3%A9-estampado-flores-manga-3-4-c29016p100037280.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-perkins-parche-c29016p100037464.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cr%C3%AApe-evas%C3%A9-liso-manga-3-4-c29016p100036930.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cr%C3%AApe-evas%C3%A9-liso-manga-3-4-c29016p100036930.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-alto-liso-c29016p100037156.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-alto-estampado-flores-c29016p100036921.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-cuello-alto-estampado-corbatero-c29016p100037155.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-largo-manga-sisa-c29016p100170011.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-largo-manga-sisa-rayas-c29016p100170012.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-manga-acampanada-c29016p100149506.html', u'http://www.pullandbear.com/es/es/mujer/vestidos/vestido-punto-espalda-abierta-c29016p100195504.html']
If you want to write the source:
with open("out.html", "w") as f:
f.write(dr.page_source)
I think you can use http://ghost-py.readthedocs.org/en/latest/ for this case. It's loads web page like real browser and run JavaScript.
Also you can try PhantomJS for example, but it written on nodeJS.

Scraping a site using Selenium and BeautifulSoup

So I'm trying to scrape a site that loads something dynamically with JS. My goal is to build a quick python script to load a site, see if there's a certain word, and then email me if it's there.
I'm relatively new to coding, so if there's a better way, I'd be happy to hear.
I'm currently working to load the page with Selenium, then scrape the generated page with BeautifulSoup, and that's where I'm having the issue. How do I get beautifulsoup to scrape the site I just opened in selenium?
from __future__ import print_function
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
import urllib, urllib2
import time
url = 'http://www.somesite.com/'
path_to_chromedriver = '/Users/admin/Downloads/chromedriver'
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
site = browser.get(url)
html = urllib.urlopen(site).read()
soup = BeautifulSoup(html, "lxml")
print(soup.prettify())
I have an error that says
Traceback (most recent call last):
File "probation color.py", line 16, in <module>
html = urllib.urlopen(site).read()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib.py", line 87, in urlopen
return opener.open(url)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib.py", line 185, in open
fullurl = unwrap(toBytes(fullurl))
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib.py", line 1075, in unwrap
url = url.strip()
AttributeError: 'NoneType' object has no attribute 'strip'
which I don't really understand or understand why is happening. Is it something internally with urllib? How do I fix it? I think solving that will fix my problem.
The HTML can be found using the "page_source" attribute on the browser. This should work:
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
browser.get(url)
html = browser.page_source
soup = BeautifulSoup(html, "lxml")
print(soup.prettify())
from __future__ import print_function
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
#import urllib, urllib2
import time
url = 'http://www.somesite.com/'
path_to_chromedriver = '/Users/admin/Downloads/chromedriver'
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
site = browser.get(url)
html = site.page_source #you should have used this...
#html = urllib.urlopen(site).read() #this is the mistake u did...
soup = BeautifulSoup(html, "lxml")
print(soup.prettify())

How do you pass html to Selenium

I have a webcrawler and I want to pass the html+javascript it retrieves into selenium, is this possible? To clarify I do not want to use webdriver.get to retrieve the page with selenium since my crawler is faster.
I ended up scraping the webpage with PyQt4 on a xvfb server since I was using amazon ec2 which doesn't come with x11. The code below loads the webpage containing JavaScript and waits 7 seconds before returning the html so all the JavaScript will have finished loading.
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *
from xvfbwrapper import Xvfb
class Render(QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.timerScreen = QTimer()
self.timerScreen.setInterval(7000)
self.timerScreen.setSingleShot(True)
self.timerScreen.timeout.connect(self.getHtml)
self.loadFinished.connect(self.timerScreen.start)
self.mainFrame().load(QUrl(url))
self.app.exec_()
def getHtml(self):
self.frame = self.mainFrame()
self.app.quit()
args = {"nolisten":"tcp"}
vdisplay = Xvfb(**args)
vdisplay.start()
url = 'url here'
r = Render(url)
html = r.frame.toHtml()
print html
f = open("./test.html","wb")
f.write(html.__str__().encode("utf-8"))
f.close()
#stri = str(html).encode("utf-8")
vdisplay.stop()

Categories