how to create hyperlink in piechart - javascript

I want to do a pie chart in matplotlib.
This pie chart will be a representation of two variables: male and female.
That's easy to do :)
What I would like to do next, I'm not even sure if it's possible to do with matplotlib, I would like to make these two variables clickable so if I click on male, I would see another page with information about this, same thing with female.
Image map isn't a solution since this variables may change in the future.
Anyone has any idea how to do this? If it's possible with matplotlib or what program would you recommend.
Thank you!

While it's not really in a workably stable state yet, have a look at the html5 canvas backend for matplotlib. It looks interesting, anyway, and will probably be the best way to do this sort of thing (interactive webpage with a matplotlib plot) in the future.
In the meantime, as #Mark suggested, it's not too hard to dynamically generate an imagemap for the wedges of a pie plot.
Here's a rough example, that I'm sure you could adapt to whatever web framework you're using.
import matplotlib.pyplot as plt
def main():
# Make an example pie plot
fig = plt.figure()
ax = fig.add_subplot(111)
labels = ['Beans', 'Squash', 'Corn']
wedges, plt_labels = ax.pie([20, 40, 60], labels=labels)
ax.axis('equal')
make_image_map(fig, wedges, labels, 'temp.html')
def make_image_map(fig, wedges, labels, html_filename):
"""Makes an example static html page with a image map of a pie chart.."""
#-- Save the figure as an image and get image size ------------------------
# Be sure to explictly set the dpi when saving the figure
im_filename = 'temp.png'
fig.savefig(im_filename, dpi=fig.dpi)
# Get figure size...
_, _, fig_width, fig_height = fig.bbox.bounds
#-- Get the coordinates of each wedge as a string of x1,y2,x2,y2... -------
coords = []
for wedge in wedges:
xy = wedge.get_verts()
# Transform to pixel coords
xy = fig.get_transform().transform(xy)
# Format into coord string and convert to <0,0> in top left...
xy = ', '.join(['%0.2f,%0.2f' % (x, fig_height - y) for x, y in xy])
coords.append(xy)
#-- Build web page --------------------------------------------------------
header = """
<html>
<body>
<img src="{0}" alt="Pie Chart" usemap="#pie_map" width="{1}" height="{2}" />
""".format(im_filename, fig_width, fig_height)
# Make the image map
map = '<map name="pie_map">\n'
for label, xy in zip(labels, coords):
href = 'http://images.google.com/images?q={0}'.format(label)
area = '<area shape="poly" coords="{0}" href="{1}" alt="{2}" />'
area = area.format(xy, href, label)
map += ' ' + area + '\n'
map += '</map>\n'
footer = """
</body>
</html>"""
# Write to a file...
with file(html_filename, 'w') as outfile:
outfile.write(header + map + footer)
if __name__ == '__main__':
main()
Edit: I just realized that you might not be referring to embedding the plot into a web page... (I assumed that you were from the "display another page" bit in your question.) If you want more of a desktop app, without having to mess with a "full" gui toolkit, you can do something like this:
import matplotlib.pyplot as plt
def main():
# Make an example pie plot
fig = plt.figure()
ax = fig.add_subplot(111)
labels = ['Beans', 'Squash', 'Corn']
wedges, plt_labels = ax.pie([20, 40, 60], labels=labels)
ax.axis('equal')
make_picker(fig, wedges)
plt.show()
def make_picker(fig, wedges):
import webbrowser
def on_pick(event):
wedge = event.artist
label = wedge.get_label()
webbrowser.open('http://images.google.com/images?q={0}'.format(label))
# Make wedges selectable
for wedge in wedges:
wedge.set_picker(True)
fig.canvas.mpl_connect('pick_event', on_pick)
if __name__ == '__main__':
main()
Which opens a browser window for a google image search of whatever the wedge is labeled as...

You can do this with an imagemap or HTML element overlay controlled by JavaScript/jQuery.
Essentially, send your chart data to the page along with the chart image, and use JS to create the elements with the links according to the specification of the data.
It's a bit harder than the bar graphs I've done this to before, but should work fine.

Related

ClickInfo plugin problem on multiple mpld3 charts on same HTML page

I have two matplotlib scatter plot charts (rendered using mpld3.fig_to_html) on the same HTML page. I'm trying to make each point on the chart clickable (to open another window showing more info) using this ClickInfo plugin.
The generated HTML for these charts shows different id's, however the id of one chart gets mixed up with the other when each point is clicked passing the wrong info to the page it opens. There's not much info on how will I restrict search for elements using mpld3.get_element(this.props.id, this.fig);.
If I only have one chart on the same page, this plugin works perfectly.
Would be a great help to have a solution to make this plugin work for this use case.
class ClickInfo(plugins.PluginBase):
JAVASCRIPT = """
mpld3.register_plugin("clickinfo", ClickInfo);
ClickInfo.prototype = Object.create(mpld3.Plugin.prototype);
ClickInfo.prototype.constructor = ClickInfo;
ClickInfo.prototype.requiredProps = ["id","urls"];
function ClickInfo(fig, props){
mpld3.Plugin.call(this, fig, props);
};
ClickInfo.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
urls = this.props.urls;
obj.elements().on("mousedown",function(d, i){window.open(urls[i], '_blank')});
}
"""
def __init__(self, points, urls):
self.points = points
self.urls = urls
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
print("+++", mpld3.utils.get_id(points, suffix))
self.dict_ = {"type": "clickinfo","id": mpld3.utils.get_id(points, suffix=suffix, prefix='el'),"urls":urls}
Just found out that the version I have (0.5.2) of the mpld3 package has a targets parameter for PointHTMLTooltip that accepts a list of urls, when clicked opens the url on a new window/tab and works perfectly. No need to use ClickInfo.
mpld3.plugins.PointHTMLTooltip(self, points, labels=None, targets=None, hoffset=0, voffset=10, css=None)

How to scrape multiple pages with an unchanging URL - Python 3

I recently got in touch with web scraping and tried to web scrape various pages. For now, I am trying to scrape the following site - http://www.pizzahut.com.cn/StoreList
So far I've used selenium to get the longitude and latitude scraped. However, my code right now only extracts the first page. I know there is a dynamic web scraping that executes javascript and loads different pages, but had hard time trying to find a right solution. I was wondering if there's a way to access the other 49 pages or so, because when I click next page the URL does not change because it is set, so I cannot just iterate over a different URL each time
Following is my code so far:
import os
import requests
import csv
import sys
import time
from bs4 import BeautifulSoup
page = requests.get('http://www.pizzahut.com.cn/StoreList')
soup = BeautifulSoup(page.text, 'html.parser')
for row in soup.find_all('div',class_='re_RNew'):
name = row.find('p',class_='re_NameNew').string
info = row.find('input').get('value')
location = info.split('|')
location_data = location[0].split(',')
longitude = location_data[0]
latitude = location_data[1]
print(longitude, latitude)
Thank you so much for helping out. Much appreciated
Steps to get the data:
Open the developer tools in your browser (for Google Chrome it's Ctrl+Shift+I). Now, go to the XHR tab which is located inside the Network tab.
After doing that, click on the next page button. You'll see the following file.
Click on that file. In the General block, you'll see these 2 things that we need.
Scrolling down, in the Form Data tab, you can see the 3 variables as
Here, you can see that changing the value of pageIndex will give all the pages required.
Now, that we've got all the required data, we can write a POST method for the URL http://www.pizzahut.com.cn/StoreList/Index using the above data.
Code:
I'll show you the code to scrape first 2 pages, you can scrape any number of pages you want by changing the range().
for page_no in range(1, 3):
data = {
'pageIndex': page_no,
'pageSize': 10,
'keyword': '输入餐厅地址或餐厅名称'
}
page = requests.post('http://www.pizzahut.com.cn/StoreList/Index', data=data)
soup = BeautifulSoup(page.text, 'html.parser')
print('PAGE', page_no)
for row in soup.find_all('div',class_='re_RNew'):
name = row.find('p',class_='re_NameNew').string
info = row.find('input').get('value')
location = info.split('|')
location_data = location[0].split(',')
longitude = location_data[0]
latitude = location_data[1]
print(longitude, latitude)
Output:
PAGE 1
31.085877 121.399176
31.271117 121.587577
31.098122 121.413396
31.331458 121.440183
31.094581 121.503654
31.270737000 121.481178000
31.138214 121.386943
30.915685 121.482079
31.279029 121.529255
31.168283 121.283322
PAGE 2
31.388674 121.35918
31.231706 121.472644
31.094857 121.219961
31.228564 121.516609
31.235717 121.478692
31.288498 121.521882
31.155139 121.428885
31.235249 121.474639
30.728829 121.341429
31.260372 121.343066
Note: You can change the results per page by changing the value of pageSize (currently it's 10).

Work-around for code folding in Hugo blogdown websites

In Rmarkdown documents there is a code folding option--code_folding: hide. This option is not available for Hugo websites generated with the blogdown package (see this recently opened feature request). Has anyone come up with a work-around for this? Even some JavaScript and HTML code that can be added on a post-by-post basis?
As an example, blogdown generates a default Hugo site with the Hugo-lithium theme when you run the following command in an new directory:
blogdown::new_site().
One of the posts generated for this default is below. I'd be interested in adding code folding to this example, that is, a hide/show button that the webpage above each R code block.
title: "Hello R Markdown"
author: "Frida Gomam"
date: 2015-07-23T21:13:14-05:00
categories: ["R"]
tags: ["R Markdown", "plot", "regression"]
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(collapse = TRUE)
```
# R Markdown
This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see <http://rmarkdown.rstudio.com>.
You can embed an R code chunk like this:
```{r cars}
summary(cars)
fit <- lm(dist ~ speed, data = cars)
fit
```
# Including Plots
You can also embed plots. See Figure \#ref(fig:pie) for example:
```{r pie, fig.cap='A fancy pie chart.', tidy=FALSE}
par(mar = c(0, 1, 0, 1))
pie(
c(280, 60, 20),
c('Sky', 'Sunny side of pyramid', 'Shady side of pyramid'),
col = c('#0292D8', '#F7EA39', '#C4B632'),
init.angle = -50, border = NA
)

open plotly in qwebview in interactive mode

I'm using plotly library in offline mode with python and what I'm trying to do is to create some plot, save them as local html and load in a second moment into a QWebView.
This is the code for a boxplot with a dummy variable:
from PyQt5.QtWebKitWidgets import QWebView
import plotly
import plotly.graph_objs as go
x1 = [10, 3, 4, 5, 20, 4, 3]
trace1 = go.Box(
x = x1)
layout = go.Layout(
showlegend = True
)
data = [trace1]
fig = go.Figure(data=data, layout = layout)
fn = '/home/matteo/plot.html'
plotly.offline.plot(fig, filename = fn,
auto_open = False)
view = QWebView()
view.load(QUrl.fromLocalFile(fn))
view.show()
I'm facing 2 main problems:
if I let the code as it is, the QWebView won't show anything like in the image:
if I open the html file with the standard browser (Firefox for example), I can see and interact with the plot, and that's fine. But if I save the html page from the browser in a local directory and try to load the saved file into the QWebView I can see the plot, but cannot interact with it (maybe for some Javascript missing?!):
Anybody has some ideas how to embed an interactive offline made chart into a QWebView?
Ok, I should have find what the problem is.
Is seems that QWebView has some difficulties to load the local file because it is too heavy (about 2mb for simple plot).
So I used the option to not include the javascript when saving the local file and to load the javascript in a second moment thanks, as described here.
In other words, create the initial html tags, include the result of the figure generated by plotly without the whole javascript code, and include the link of the javascript.
In this way the file is super light and QWebView does not have issue to open it.
# create the initial html code
raw_html = '<head><meta charset="utf-8" /></head>''<head><meta charset="utf-8" /><script src="https://cdn.plot.ly/plotly-latest.min.js"></script></head>'
# call the plot method without all the javascript code
raw_html += plotly.offline.plot(fig, filename = fn, include_plotlyjs=False)
# close the body and html tags
raw_html += '</body></html>'

Plotly (offline) for Python click event

Is it possible to add click events to a Plotly scatter plot (offline mode in Python)?
As an example, I want to change the shape of a set of scatter points upon being clicked.
What I tried so far
My understanding from reading other questions from the site (with no clear answer) is that I may have to produce the html and then edit it after the fact by putting in javascript code? So I could write a javascript function, save it off to my_js.js and then link to it from the html?
I've been doing some work with offline plots in plotly and had the same challenge.
Here's a kludge I've come up with which my prove as inspiration for others.
Some limitations:
Assumes that you have the offline output in a single html file, for a single plot.
Assumes that your on events are named the same as the event handlers.
Requires Beautiful Soup 4.
Assumes you've got lxml installed.
Developed with Plotly 2.2.2
Code Snippet:
import bs4
def add_custom_plotly_events(
filename,
events = {
"plotly_click": "function plotly_click(data) { console.log(data); }",
"plotly_hover": "function plotly_hover(data) { console.log(data); }"
},
prettify_html = True
):
# what the value we're looking for the javascript
find_string = "Plotly.newPlot"
# stop if we find this value
stop_string = "then(function(myPlot)"
def locate_newplot_script_tag(soup):
scripts = soup.find_all('script')
script_tag = soup.find_all(string=re.compile(find_string))
if len(script_tag) == 0:
raise ValueError("Couldn't locate the newPlot javascript in {}".format(filename))
elif len(script_tag) > 1:
raise ValueError("Located multiple newPlot javascript in {}".format(filename))
if script_tag[0].find(stop_string) > -1:
raise ValueError("Already updated javascript, it contains:", stop_string)
return script_tag[0]
def split_javascript_lines(new_plot_script_tag):
return new_plot_script_tag.string.split(";")
def find_newplot_creation_line(javascript_lines):
for index, line in enumerate(javascript_lines):
if line.find(find_string) > -1:
return index, line
raise ValueError("Missing new plot creation in javascript, couldn't find:", find_string)
def join_javascript_lines(javascript_lines):
# join the lines with javascript line terminator ;
return ";".join(javascript_lines)
def register_on_events(events):
on_events_registration = []
for function_name in events:
on_events_registration.append("myPlot.on('{}', {})".format(
function_name, function_name
))
return on_events_registration
# load the file
with open(filename) as inf:
txt = inf.read()
soup = bs4.BeautifulSoup(txt, "lxml")
new_plot_script_tag = locate_newplot_script_tag(soup)
javascript_lines = split_javascript_lines(new_plot_script_tag)
line_index, line_text = find_newplot_creation_line(javascript_lines)
on_events_registration = register_on_events(events)
# replace whitespace characters with actual whitespace
# using + to concat the strings as {} in format
# causes fun times with {} as the brackets in js
# could possibly overcome this with in ES6 arrows and such
line_text = line_text + ".then(function(myPlot) { " + join_javascript_lines(on_events_registration) +" })".replace('\n', ' ').replace('\r', '')
# now add the function bodies we've register in the on handles
for function_name in events:
javascript_lines.append(events[function_name])
# update the specific line
javascript_lines[line_index] = line_text
# update the text of the script tag
new_plot_script_tag.string.replace_with(join_javascript_lines(javascript_lines))
# save the file again
with open(filename, "w") as outf:
# tbh the pretty out is still ugly af
if prettify_html:
for line in soup.prettify(formatter = None):
outf.write(str(line))
else:
outf.write(str(soup))
According to Click events in python offline mode? on Plotly's community site this is not supported, at least as of December 2015.
That post does contain some hints as to how to implement this functionality yourself, if you're feeling adventurous.

Categories