-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
112 lines (78 loc) · 3.82 KB
/
scraper.py
File metadata and controls
112 lines (78 loc) · 3.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import random
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
import pandas as pd
# from tqdm import tqdm
from stqdm import stqdm as tqdm
from directories import DATA_DIR
class Scraper:
def __init__(self):
self.base_url = "https://www.contractortalk.com/"
def _get_page_source(self, url):
response = requests.get(url, headers={'Cache-Control': 'no-cache'})
soup = BeautifulSoup(response.content, features="html.parser")
return soup
def scrape_search_results(self, search_terms):
"""Search on Contractor Talk and Scrape all result page links"""
search_terms = "+".join(search_terms)
search_id = random.randint(111111, 999999)
search_url = self._format_search_url(search_terms, search_id)
page_links, total_page_count, search_url = self._scrape_search_result_page_links(search_url)
results = pd.DataFrame(columns=["post link"], data=page_links)
_range = range(total_page_count)
desc = f"Scraping {total_page_count} pages of results"
for _ in tqdm(_range, desc=desc):
if search_url is not None:
page_links, _, search_url = self._scrape_search_result_page_links(search_url)
df = pd.DataFrame(columns=["post link"], data=page_links)
results = pd.concat([results, df], ignore_index=True)
filename = f"{search_terms}.csv"
filepath = DATA_DIR / filename
results.to_csv(filepath)
return filepath
def scrape_posts_from_result_links(self, filepath):
df = pd.read_csv(filepath)
tqdm.pandas(desc=f"Scraping {df.shape[0]} posts")
df[["title", "text", "url"]] = df["post link"].progress_apply(lambda x: self._get_post_contents(x))
df.to_csv(filepath)
def _get_post_contents(self, post_link):
post_url = urljoin(self.base_url, post_link)
soup = self._get_page_source(post_url)
itemid = post_url.rsplit("/", 1)[0] + "/"
post_div = soup.find("div", attrs={"itemid": itemid})
title = post_div.find("h1", attrs={"class": "MessageCard__thread-title"}).text.strip()
text = post_div.find("article", attrs={"qid": "post-text"}).text.strip()
return pd.Series([title, text, post_url])
def _format_search_url(self, search_terms, search_id, page=1):
search_url = urljoin(self.base_url, f"search/{search_id}/")
if page > 1:
params = f"?page={page}&q={search_terms}&o=relevance"
search_url = urljoin(search_url, params)
else:
params = f"?q={search_terms}&o=relevance"
search_url = urljoin(search_url, params)
return search_url
def _scrape_search_result_page_links(self, search_url):
soup = self._get_page_source(search_url)
posts = soup.find_all("a", attrs={"qid": "search-results-title"})
page_links = [post["href"] for post in posts]
# Should we continue
next_page_btn = soup.find("a", attrs={"qid": "page-nav-next-button"})
next_page_exists = next_page_btn["aria-disabled"] == "false"
if next_page_exists:
next_page_link = urljoin(self.base_url, next_page_btn["href"])
else:
next_page_link = None
# How mnay pages are there?
last_page_btn = soup.find_all("a", attrs={"qid": "page-nav-other-page"})[-1]
total_page_count = int(last_page_btn.text.strip())
return page_links, total_page_count, next_page_link
if __name__ == "__main__":
scraper = Scraper()
# search_terms = ["hardie", "siding"]
# results = scraper.scrape_search_results(search_terms)
# filename = "+".join(search_terms) + ".csv"
# results.to_csv(DATA_DIR / filename)
filepath = "data/hardie+siding.csv"
scraper.scrape_posts_from_result_links(filepath)