-
Notifications
You must be signed in to change notification settings - Fork 0
/
scraping.py
138 lines (96 loc) · 3.82 KB
/
scraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import datetime as dt
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemispheres": hemispheres(browser)
}
# Stop webdriver and return data
browser.quit()
return data
def mars_news(browser):
# Visit the mars nasa news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one('div.list_text')
# Use the parent element to find the first `a` tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# ### Featured Images
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com/'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
# ## Mars Facts
def mars_facts():
try:
# use 'read_html" to scrape the facts table into a dataframe
df = pd.read_html('https://data-class-mars-facts.s3.amazonaws.com/Mars_Facts/index.html')[0]
except BaseException:
return None
# Assign columns and set index of dataframe
df.columns = ['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
# Convert dataframe into HTML format, add bootstrap
return df.to_html(classes = "table table-striped")
# Function for collecting hemisphere images and titles
def hemispheres(browser):
# Use browser to visit the URL
url = 'https://marshemispheres.com/'
browser.visit(url)
# Create a list to hold the images and titles.
hemisphere_image_urls = []
# Write code to retrieve the image urls and titles for each hemisphere.
html = browser.html
img_soup = soup(html, 'html.parser')
for i in range(4):
hemispheres = {}
browser.find_by_css('a.product-item h3')[i].click()
image_url = browser.find_link_by_text('Sample')['href']
image_title = browser.find_by_css('h2.title').text
hemispheres['image_url'] = image_url
hemispheres['image_title'] = image_title
hemisphere_image_urls.append(hemispheres)
browser.back()
# Return the list that holds the dictionary of each image url and title.
return hemisphere_image_urls
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())