-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathweb_scraper.py
228 lines (181 loc) · 8.18 KB
/
web_scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import os
import random
import time
import requests
import json
# import library for faster scraping
import cchardet
from fake_useragent import UserAgent
from bs4 import BeautifulSoup, SoupStrainer
from constants import TED_URL, HEADERS, SCRAPED_TALK_PAGES_PATH, SCRAPED_CATALOG_PAGES_PATH
from utilities import create_logger, save_html_to_file
# speed up program by filtering what to parse
catalog_parse_only = SoupStrainer('div', id='browse-results')
talk_page_parse_only = SoupStrainer('main', id='maincontent')
talk_data_parse_only = SoupStrainer('script', id='__NEXT_DATA__')
logger = create_logger()
ua = UserAgent()
class WebScrappy:
def __init__(self):
self.last_page = WebScrappy.get_pages_count()
self.start_scraping()
@staticmethod
def get_pages_count():
"""
Perform request to catalog page to extract pagination last page number
:return: Return last page number
:rtype: int
"""
response = requests.get(TED_URL + '/talks', headers=HEADERS)
if response.status_code != 200:
logger.error(f'Scraping pagination number resulted in {response.status_code} code')
logger.error(f'Printing page content: {response.content}')
catalog_page = BeautifulSoup(response.content, 'lxml', parse_only=catalog_parse_only)
gap_span = catalog_page.find('span', class_='pagination__item pagination__gap')
last_page_num = gap_span.find_next_sibling().get_text()
return int(last_page_num)
@staticmethod
def get_catalog_page(page_number):
"""
:param page_number: catalog page number
:return:
"""
time.sleep(random.randint(10, 20))
HEADERS['User-Agent'] = ua.random
url = f'{TED_URL}/talks?page={page_number}?sort=oldest'
response = requests.get(url, headers=HEADERS)
if response.status_code != 200:
logger.error(f'Scraping catalog page {page_number} resulted in {response.status_code} code')
logger.error(f'Printing page content: {response.content}')
save_html_to_file(
response.content,
os.path.join(SCRAPED_CATALOG_PAGES_PATH, f'catalog_page_{page_number}.html')
)
catalog_page = BeautifulSoup(response.content, 'lxml', parse_only=catalog_parse_only)
return catalog_page
@staticmethod
def get_talk_page(url):
"""
Get talk's data and page html content
:param url: url of a talk's page
:return: Return talk data and page html content
"""
time.sleep(random.randint(15, 20))
HEADERS['User-Agent'] = ua.random
response = requests.get(url, headers=HEADERS)
if response.status_code != 200:
logger.error(f'Scraping talk page {url} resulted in {response.status_code} code')
logger.error(f'Printing page content: {response.content}')
filename = url.split('/')[-1]
save_html_to_file(response.content, os.path.join(SCRAPED_TALK_PAGES_PATH, filename, '.html'))
return response.content
@staticmethod
def parse_talk_transcript(transcript_data):
"""
Join transcript string into a single text
:param transcript_data: response content containing transcript data
:return: transcript of a talk
"""
transcript_data = transcript_data.get('translation')
# check if talk has no transcript
if not transcript_data:
return ''
paragraphs_list = transcript_data['paragraphs']
text_list = []
for paragraph in paragraphs_list:
cues = paragraph.get('cues')
paragraph_text = [cue.get('text').replace('\n', ' ') for cue in cues]
text_list.append(' '.join(paragraph_text))
transcript = ' '.join(text_list)
return transcript
@staticmethod
def parse_talk_page_info(page_content):
"""
Get all information about a talk from its html content
:param page_content: talk page html content
:return: Talk information from its page on TED
:rtype: dict
"""
# parse page section containing almost all talk data
talk_page_data = BeautifulSoup(page_content, 'lxml', parse_only=talk_data_parse_only)
# parse talk's page content
talk_page_content = BeautifulSoup(page_content, 'lxml', parse_only=talk_page_parse_only)
talk_page_data = json.loads(talk_page_data.script.get_text())
page_right_side = talk_page_content.find('aside')
video_data = talk_page_data['props']['pageProps']['videoData']
player_data = json.loads(video_data['playerData'])
event = player_data['event']
talk_data = {
'_id': video_data['id'],
'title': video_data['title'],
'duration': video_data['duration'],
'views': video_data['viewedCount'],
'likes': page_right_side.find_previous_sibling('div').select_one('i.icon-heart + span').get_text()[2:-1],
'summary': video_data['description'],
'event': event,
'recorded_date': video_data['recordedOn'],
'published_date': video_data['publishedAt'],
'topics': [
{'id': topic['id'], 'name': topic['name']} for topic in video_data['topics']['nodes']
],
'speakers': [
{
'name': ' '.join([speaker['firstname'], speaker['lastname']]).strip(),
'occupation': 'Educator' if event == 'TED-Ed' else speaker['description']
} for speaker in video_data['speakers']['nodes']
],
'subtitle_languages': [
{
'name': language['languageName'],
'code': language['languageCode']
} for language in player_data['languages']
],
'youtube_video_code': player_data.get('external', {}).get('code'),
'related_videos': [video['id'] for video in video_data['relatedVideos']],
'transcript': WebScrappy.parse_talk_transcript(talk_page_data['props']['pageProps']['transcriptData'])
}
return talk_data
@staticmethod
def scrape_catalog_page_info(catalog_page):
"""
Scrape talks page url from catalog page
:return: Return list of talks with their info
:rtype: list
"""
data = []
# find all talks divs
talk_divs = catalog_page.find_all('div', class_='media media--sm-v')
for div in talk_divs:
# get direct children
talk_image, _ = div.find_all(recursive=False)
filename = talk_image.a['href'].split('/')[-1]
file_path = os.path.join(SCRAPED_TALK_PAGES_PATH, filename, '.html')
if os.path.exists(file_path):
continue
# get url of a TED talk page
url = TED_URL + talk_image.a['href']
page_content = WebScrappy.get_talk_page(url)
talk_page_info = WebScrappy.parse_talk_page_info(page_content)
data.append({**talk_page_info, 'page_url': url})
logger.debug(f'Finished scraping talk - {talk_page_info.get("title")}')
return data
def start_scraping(self):
print('Starting to web scrape')
# iterate over all catalog pages
for page_number in range(1, self.last_page + 1):
file_path = os.path.join(SCRAPED_CATALOG_PAGES_PATH, f'catalog_page_{page_number}.html')
# check if catalog page has been scraped
if os.path.exists(file_path):
with open(file_path, 'rb') as f:
catalog_page = f.read()
else:
catalog_page = WebScrappy.get_catalog_page(page_number)
catalog_page_talks_info = WebScrappy.scrape_catalog_page_info(catalog_page)
logger.debug(f'Finished scraping page {page_number}/{self.last_page}')
print('Finished scraping! :)')
if __name__ == '__main__':
# create folder for storing intermediate scraping data
os.makedirs(os.path.join('data', 'scraped_catalog_pages'), exist_ok=True)
os.makedirs(os.path.join('data', 'scraped_talk_pages'), exist_ok=True)
# start web scraping
scrappy = WebScrappy()