-
Notifications
You must be signed in to change notification settings - Fork 0
/
scraping.py
104 lines (96 loc) · 3.79 KB
/
scraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# Imports
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
import datetime as dt
from webdriver_manager.chrome import ChromeDriverManager
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemispheres": hemispheres(browser)}
# Stop webdriver and return data
browser.quit()
return data
# Scrape Mars News
def mars_news(browser):
# Visit the mars nasa news site
url = 'https://data-class-mars.s3.amazonaws.com/Mars/index.html'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one('div.list_text')
# Use the parent element to find the first 'a' tag and save it as 'news_title'
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# ## JPL Space Images Featured Image
def featured_image(browser):
# Visit URL
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base url to create an absolute url
img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'
return img_url
# ## Mars Facts
def mars_facts():
# Add try/except for error handling
try:
# Use 'read_html' to scrape the facts table into a dataframe
df = pd.read_html('https://data-class-mars-facts.s3.amazonaws.com/Mars_Facts/index.html')[0]
except BaseException:
return None
# Assign columns and set index of dataframe
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
# Convert dataframe into HTML format, add bootstrap
return df.to_html()
# Deliverable 2 Scrape Hemisphere Data
def hemispheres(browser):
# Visit url
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# collect four hemisphere images and titles
hemisphere_image_urls = []
for i in range(4):
hemispheres = {}
browser.find_by_css('a.product-item h3')[i].click()
element = browser.links.find_by_text('Sample').first
img_url = element['href']
title = browser.find_by_css("h2.title").text
hemispheres["img_url"] = img_url
hemispheres["title"] = title
hemisphere_image_urls.append(hemispheres)
browser.back()
return hemisphere_image_urls
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())