Ammar Azman
commited on
Commit
•
3aee80f
1
Parent(s):
56b5ebb
Upload scraper.py
Browse files- scraper.py +88 -0
scraper.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from xtractor.utils import (
|
2 |
+
extractor,
|
3 |
+
read_the_json,
|
4 |
+
dumps_the_json,
|
5 |
+
jsonl_converter,
|
6 |
+
multi_threading,
|
7 |
+
)
|
8 |
+
|
9 |
+
|
10 |
+
def fetch_links(link="https://www.shinjiru.com.my/blog/"):
|
11 |
+
soup = extractor(link)
|
12 |
+
div_tag = soup.find_all("div", class_="row")
|
13 |
+
# div_tag = div_tag.find_all("div", class_="col-sm-6 item")
|
14 |
+
|
15 |
+
temp = []
|
16 |
+
for divTag in div_tag:
|
17 |
+
a_tag = divTag.find_all("a", href=True)
|
18 |
+
for aTag in a_tag:
|
19 |
+
if (
|
20 |
+
"blog" in aTag["href"]
|
21 |
+
and "page" not in aTag["href"]
|
22 |
+
and "category" not in aTag["href"]
|
23 |
+
):
|
24 |
+
temp.append(aTag["href"])
|
25 |
+
|
26 |
+
all_links = list(set(temp))
|
27 |
+
return all_links
|
28 |
+
|
29 |
+
|
30 |
+
def get_all_links():
|
31 |
+
temp = []
|
32 |
+
for i in range(38):
|
33 |
+
base_link = f"https://www.shinjiru.com.my/blog/page/{i+1}"
|
34 |
+
all_links = fetch_links(link=base_link)
|
35 |
+
temp.append(all_links)
|
36 |
+
|
37 |
+
full_links = [i for x in temp for i in x]
|
38 |
+
full_links = list(set(full_links))
|
39 |
+
return full_links
|
40 |
+
|
41 |
+
|
42 |
+
def get_articles(link):
|
43 |
+
soup = extractor(link)
|
44 |
+
# div_tag = soup.find("div", class_="row")
|
45 |
+
try:
|
46 |
+
div_tag = soup.find("div", class_="col-lg")
|
47 |
+
|
48 |
+
title = div_tag.find("h1").text
|
49 |
+
p_tag = div_tag.find_all("p")
|
50 |
+
|
51 |
+
all_p_tag = [x.text for x in p_tag]
|
52 |
+
article = " ".join(all_p_tag)
|
53 |
+
|
54 |
+
return title, article
|
55 |
+
except:
|
56 |
+
pass
|
57 |
+
|
58 |
+
|
59 |
+
if "__main__" == __name__:
|
60 |
+
# full_links = get_all_links()
|
61 |
+
# print(len(full_links))
|
62 |
+
# data = {"links": full_links}
|
63 |
+
# dumps_the_json(data, json_file_name="./shinjiru/all_links_shinjiru.json")
|
64 |
+
|
65 |
+
links = read_the_json("./shinjiru/all_links_shinjiru.json")
|
66 |
+
links = links["links"]
|
67 |
+
|
68 |
+
title_ = []
|
69 |
+
body_ = []
|
70 |
+
for link in links:
|
71 |
+
try:
|
72 |
+
title, article = get_articles(link)
|
73 |
+
title_.append(title)
|
74 |
+
body_.append(article)
|
75 |
+
except:
|
76 |
+
pass
|
77 |
+
|
78 |
+
data = {"title": title_, "body": body_}
|
79 |
+
|
80 |
+
dumps_the_json(data, json_file_name="./shinjiru/shinjiru_article.json")
|
81 |
+
print("DUMPS!")
|
82 |
+
jsonl_converter(
|
83 |
+
json_file_path="./shinjiru/shinjiru_article.json",
|
84 |
+
json_l_file_path="./shinjiru/shinjiru_article.jsonl",
|
85 |
+
col_1_name="title",
|
86 |
+
col_2_name="body",
|
87 |
+
)
|
88 |
+
print("CONVERTED!")
|