Spaces:
Sleeping
Sleeping
:recycle: [Refactor] Move hardcoded consts to network_configs
Browse files- documents/webpage_content_extractor.py +18 -19
- networks/google_searcher.py +2 -3
- networks/html_fetcher.py +2 -5
- networks/network_configs.py +20 -0
documents/webpage_content_extractor.py
CHANGED
@@ -5,6 +5,7 @@ from bs4 import BeautifulSoup, Comment, NavigableString, Tag
|
|
5 |
from tiktoken import get_encoding as tiktoken_get_encoding
|
6 |
from utils.logger import logger
|
7 |
from markdownify import markdownify
|
|
|
8 |
|
9 |
# from trafilatura import extract as extract_text_from_html
|
10 |
# from inscriptis import get_text as extract_text_from_html
|
@@ -26,17 +27,7 @@ class WebpageContentExtractor:
|
|
26 |
|
27 |
ignore_tags = ["script", "style", "button"]
|
28 |
|
29 |
-
|
30 |
-
"sidebar",
|
31 |
-
"footer",
|
32 |
-
"related",
|
33 |
-
"comment",
|
34 |
-
"topbar",
|
35 |
-
"menu",
|
36 |
-
"offcanvas",
|
37 |
-
"navbar",
|
38 |
-
]
|
39 |
-
ignore_classes_pattern = f'{"|".join(ignore_classes)}'
|
40 |
removed_element_counts = 0
|
41 |
for element in soup.find_all():
|
42 |
class_str = ""
|
@@ -61,10 +52,12 @@ class WebpageContentExtractor:
|
|
61 |
or (re.search(ignore_classes_pattern, class_str, flags=re.IGNORECASE))
|
62 |
or (re.search(ignore_classes_pattern, id_str, flags=re.IGNORECASE))
|
63 |
):
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
68 |
element.decompose()
|
69 |
removed_element_counts += 1
|
70 |
|
@@ -76,9 +69,14 @@ class WebpageContentExtractor:
|
|
76 |
return html_str
|
77 |
|
78 |
def extract(self, html_path):
|
79 |
-
logger.note(f"
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
html_str = self.filter_html_str(html_str)
|
84 |
|
@@ -108,8 +106,9 @@ if __name__ == "__main__":
|
|
108 |
/ "files"
|
109 |
/ "urls"
|
110 |
# / "stackoverflow.com_questions_295135_turn-a-string-into-a-valid-filename.html"
|
111 |
-
/ "www.liaoxuefeng.com_wiki_1016959663602400_1017495723838528.html"
|
112 |
# / "docs.python.org_zh-cn_3_tutorial_interpreter.html"
|
|
|
113 |
)
|
114 |
extractor = WebpageContentExtractor()
|
115 |
main_content = extractor.extract(html_path)
|
|
|
5 |
from tiktoken import get_encoding as tiktoken_get_encoding
|
6 |
from utils.logger import logger
|
7 |
from markdownify import markdownify
|
8 |
+
from networks.network_configs import IGNORE_CLASSES
|
9 |
|
10 |
# from trafilatura import extract as extract_text_from_html
|
11 |
# from inscriptis import get_text as extract_text_from_html
|
|
|
27 |
|
28 |
ignore_tags = ["script", "style", "button"]
|
29 |
|
30 |
+
ignore_classes_pattern = f'{"|".join(IGNORE_CLASSES)}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
removed_element_counts = 0
|
32 |
for element in soup.find_all():
|
33 |
class_str = ""
|
|
|
52 |
or (re.search(ignore_classes_pattern, class_str, flags=re.IGNORECASE))
|
53 |
or (re.search(ignore_classes_pattern, id_str, flags=re.IGNORECASE))
|
54 |
):
|
55 |
+
try:
|
56 |
+
logger.note(f"Removing:\n{element}")
|
57 |
+
logger.warn(class_str)
|
58 |
+
except:
|
59 |
+
# logger.note(f"Removing unknown element")
|
60 |
+
pass
|
61 |
element.decompose()
|
62 |
removed_element_counts += 1
|
63 |
|
|
|
69 |
return html_str
|
70 |
|
71 |
def extract(self, html_path):
|
72 |
+
logger.note(f"Extracting content from: {html_path}")
|
73 |
+
|
74 |
+
if not Path(html_path).exists():
|
75 |
+
logger.warn(f"File not found: {html_path}")
|
76 |
+
return ""
|
77 |
+
|
78 |
+
with open(html_path, "r", encoding="utf-8") as rf:
|
79 |
+
html_str = rf.read()
|
80 |
|
81 |
html_str = self.filter_html_str(html_str)
|
82 |
|
|
|
106 |
/ "files"
|
107 |
/ "urls"
|
108 |
# / "stackoverflow.com_questions_295135_turn-a-string-into-a-valid-filename.html"
|
109 |
+
# / "www.liaoxuefeng.com_wiki_1016959663602400_1017495723838528.html"
|
110 |
# / "docs.python.org_zh-cn_3_tutorial_interpreter.html"
|
111 |
+
/ "zh.wikipedia.org_zh-hans_%E7%94%B0%E4%B8%AD%E6%9F%A0%E6%AA%AC.html"
|
112 |
)
|
113 |
extractor = WebpageContentExtractor()
|
114 |
main_content = extractor.extract(html_path)
|
networks/google_searcher.py
CHANGED
@@ -3,6 +3,7 @@ from pathlib import Path
|
|
3 |
from utils.enver import enver
|
4 |
from utils.logger import logger
|
5 |
from networks.filepath_converter import QueryToFilepathConverter
|
|
|
6 |
|
7 |
|
8 |
class GoogleSearcher:
|
@@ -16,9 +17,7 @@ class GoogleSearcher:
|
|
16 |
def send_request(self, result_num=10, safe=False):
|
17 |
self.request_response = requests.get(
|
18 |
url=self.url,
|
19 |
-
headers=
|
20 |
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62",
|
21 |
-
},
|
22 |
params={
|
23 |
"q": self.query,
|
24 |
"num": result_num,
|
|
|
3 |
from utils.enver import enver
|
4 |
from utils.logger import logger
|
5 |
from networks.filepath_converter import QueryToFilepathConverter
|
6 |
+
from networks.network_configs import REQUESTS_HEADERS
|
7 |
|
8 |
|
9 |
class GoogleSearcher:
|
|
|
17 |
def send_request(self, result_num=10, safe=False):
|
18 |
self.request_response = requests.get(
|
19 |
url=self.url,
|
20 |
+
headers=REQUESTS_HEADERS,
|
|
|
|
|
21 |
params={
|
22 |
"q": self.query,
|
23 |
"num": result_num,
|
networks/html_fetcher.py
CHANGED
@@ -4,8 +4,7 @@ from pathlib import Path
|
|
4 |
from utils.enver import enver
|
5 |
from utils.logger import logger
|
6 |
from networks.filepath_converter import UrlToFilepathConverter
|
7 |
-
|
8 |
-
IGNORE_HOSTS = ["weibo.com"]
|
9 |
|
10 |
|
11 |
class HTMLFetcher:
|
@@ -24,9 +23,7 @@ class HTMLFetcher:
|
|
24 |
def send_request(self):
|
25 |
self.request_response = requests.get(
|
26 |
url=self.url,
|
27 |
-
headers=
|
28 |
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62",
|
29 |
-
},
|
30 |
proxies=self.enver.requests_proxies,
|
31 |
)
|
32 |
|
|
|
4 |
from utils.enver import enver
|
5 |
from utils.logger import logger
|
6 |
from networks.filepath_converter import UrlToFilepathConverter
|
7 |
+
from networks.network_configs import IGNORE_HOSTS, REQUESTS_HEADERS
|
|
|
8 |
|
9 |
|
10 |
class HTMLFetcher:
|
|
|
23 |
def send_request(self):
|
24 |
self.request_response = requests.get(
|
25 |
url=self.url,
|
26 |
+
headers=REQUESTS_HEADERS,
|
|
|
|
|
27 |
proxies=self.enver.requests_proxies,
|
28 |
)
|
29 |
|
networks/network_configs.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
IGNORE_CLASSES = [
|
2 |
+
"sidebar",
|
3 |
+
"footer",
|
4 |
+
"related",
|
5 |
+
"comment",
|
6 |
+
"topbar",
|
7 |
+
# "menu",
|
8 |
+
"offcanvas",
|
9 |
+
"navbar",
|
10 |
+
"post_side",
|
11 |
+
]
|
12 |
+
|
13 |
+
IGNORE_HOSTS = [
|
14 |
+
"weibo.com",
|
15 |
+
"hymson.com",
|
16 |
+
]
|
17 |
+
|
18 |
+
REQUESTS_HEADERS = {
|
19 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62",
|
20 |
+
}
|