eduagarcia commited on
Commit
fcfff69
1 Parent(s): 0c023b0

First commit test

Browse files
Files changed (7) hide show
  1. .gitignore +2 -0
  2. README.MD +36 -0
  3. cc_news_pt.py +128 -0
  4. commoncrawl.py +255 -0
  5. commoncrawl_extractor.py +500 -0
  6. custom_commoncrawl_crawler.py +453 -0
  7. download.sh +6 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.ipynb
2
+ */
README.MD ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Dataset Summary
2
+
3
+ CC-News-PT is a curation of news articles from CommonCrawl News in the Portuguese language.
4
+ CommonCrawl News is a dataset containing news articles from news sites all over the world.
5
+ The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/.
6
+ This version of the dataset is the portuguese subset from [CloverSearch/cc-news-mutlilingual](https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual).
7
+
8
+ ### Data Fields
9
+
10
+ - `title`: a `string` feature.
11
+ - `text`: a `string` feature.
12
+ - `authors`: a `string` feature.
13
+ - `domain`: a `string` feature.
14
+ - `date`: a `string` feature.
15
+ - `description`: a `string` feature.
16
+ - `url`: a `string` feature.
17
+ - `image_url`: a `string` feature.
18
+ - `date_download`: a `string` feature.
19
+
20
+ ### How to use this dataset
21
+
22
+ ```python
23
+ from datasets import load_dataset
24
+ dataset = load_dataset("eduagarcia/cc_news_pt", split="train")
25
+ ```
26
+
27
+ ### Cite
28
+
29
+ ```
30
+ @misc{Acerola2023,
31
+ author = {Garcia, E.A.S.},
32
+ title = {Acerola Corpus: Towards Better Portuguese Language Models},
33
+ year = {2023},
34
+ doi = {10.57967/hf/0814}
35
+ }
36
+ ```
cc_news_pt.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from fnmatch import fnmatch
4
+
5
+ import datasets
6
+ from typing import Dict, List, Optional, Union, Callable
7
+ import textwrap
8
+ import gzip
9
+
10
+
11
+ logger = datasets.logging.get_logger(__name__)
12
+
13
+ _CC_NEWS_PT_KWARGS = dict(
14
+ name = "cc_news_pt",
15
+ description=textwrap.dedent(
16
+ """\
17
+ CC-News-PT is a curation of news articles from CommonCrawl News in the Portuguese language.
18
+ CommonCrawl News is a dataset containing news articles from news sites all over the world
19
+ The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/.
20
+ This version of the dataset is the portuguese subset from
21
+ https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual"""
22
+ ),
23
+ data_urls=[
24
+ 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2016/pt.jsonl.gz',
25
+ #'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2017/pt.jsonl.gz',
26
+ #'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2018/pt.jsonl.gz',
27
+ #'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2019/pt.jsonl.gz',
28
+ #'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2020/pt.jsonl.gz',
29
+ #'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2021/pt.jsonl.gz',
30
+ ],
31
+ citation=textwrap.dedent(
32
+ """\
33
+ @misc{Acerola2023,
34
+ author = {Garcia, E.A.S.},
35
+ title = {Acerola Corpus: Towards Better Portuguese Language Models},
36
+ year = {2023},
37
+ doi = {10.57967/hf/0814}
38
+ }"""
39
+ ),
40
+ url="https://huggingface.co/datasets/eduagarcia/cc_news_pt",
41
+ )
42
+
43
+
44
+ class AcerolaConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for Acerola."""
46
+
47
+ def __init__(
48
+ self,
49
+ data_urls: Dict[str, str],
50
+ citation: str,
51
+ url: str,
52
+ file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
53
+ **kwargs
54
+ ):
55
+ """BuilderConfig for Acerola.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(AcerolaConfig, self).__init__(version=datasets.Version("1.0.3", ""), **kwargs)
60
+ self.data_urls = data_urls
61
+ self.citation = citation
62
+ self.url = url
63
+ self.file_type = file_type
64
+
65
+
66
+ def _get_ccnews_features(config: AcerolaConfig):
67
+ return datasets.Features(
68
+ {
69
+ "title": datasets.Value("string"),
70
+ "text": datasets.Value("string"),
71
+ "authors": datasets.Value("string"),
72
+ "domain": datasets.Value("string"),
73
+ "date": datasets.Value("string"),
74
+ "description": datasets.Value("string"),
75
+ "url": datasets.Value("string"),
76
+ "image_url": datasets.Value("string"),
77
+ "date_download": datasets.Value("string")
78
+ }
79
+ )
80
+
81
+ class Acerola(datasets.GeneratorBasedBuilder):
82
+ """CC-News dataset."""
83
+
84
+ BUILDER_CONFIGS = [
85
+ AcerolaConfig(
86
+ **_CC_NEWS_PT_KWARGS
87
+ )
88
+ ]
89
+
90
+ def _info(self) -> datasets.DatasetInfo:
91
+ features = _get_ccnews_features(self.config)
92
+
93
+ return datasets.DatasetInfo(
94
+ description=self.config.description,
95
+ homepage=self.config.url,
96
+ citation=self.config.citation,
97
+ supervised_keys=None,
98
+ features=features
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ data_urls = self.config.data_urls.copy()
103
+ filepaths = dl_manager.download(data_urls)
104
+
105
+ return [
106
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}),
107
+ ]
108
+
109
+ def _generate_examples(self, filepaths):
110
+ id_ = 0
111
+ for filepath in filepaths:
112
+ with gzip.open(filepath, "rt", encoding="utf-8") as f:
113
+ for line in f:
114
+ if line:
115
+ article = json.loads(line)
116
+ yield id_, {
117
+ "title": article["title"].strip() if article["title"] is not None else "",
118
+ "text": article["maintext"].strip() if article["maintext"] is not None else "",
119
+ "authors": "; ".join([a.strip() for a in article["authors"]]) if len(article["authors"]) > 0 else "",
120
+ "domain": article["source_domain"].strip() if article["source_domain"] is not None else "",
121
+ "date": article["date_publish"].strip() if article["date_publish"] is not None else "",
122
+ "description": article["description"].strip() if article["description"] is not None else "",
123
+ "url": article["url"].strip() if article["url"] is not None else "",
124
+ "image_url": article["image_url"].strip() if article["image_url"] is not None else "",
125
+ "date_download": article["date_download"].strip() if article["date_download"] is not None else "",
126
+ }
127
+ id_ += 1
128
+
commoncrawl.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ This scripts downloads WARC files from commoncrawl.org's news crawl and extracts articles from these files. You can
4
+ define filter criteria that need to be met (see YOUR CONFIG section), otherwise an article is discarded. Currently, the
5
+ script stores the extracted articles in JSON files, but this behaviour can be adapted to your needs in the method
6
+ on_valid_article_extracted. To speed up the crawling and extraction process, the script supports multiprocessing. You can
7
+ control the number of processes with the parameter my_number_of_extraction_processes.
8
+
9
+ You can also crawl and extract articles programmatically, i.e., from within
10
+ your own code, by using the class CommonCrawlCrawler or the function
11
+ commoncrawl_crawler.crawl_from_commoncrawl(...) provided in
12
+ newsplease.crawler.commoncrawl_crawler.py. In this case there is also the
13
+ possibility of passing in a your own subclass of CommonCrawlExtractor as
14
+ extractor_cls=... . One use case here is that your subclass can customise
15
+ filtering by overriding `.filter_record(...)`.
16
+
17
+ This script uses relative imports to ensure that the latest, local version of news-please is used, instead of the one
18
+ that might have been installed with pip. Hence, you must run this script following this workflow.
19
+ git clone https://github.com/fhamborg/news-please.git
20
+ cd news-please
21
+ python3 -m newsplease.examples.commoncrawl
22
+
23
+ Note that by default the script does not extract main images since they are not contained
24
+ WARC files. You can enable extraction of main images by setting `my_fetch_images=True`
25
+ """
26
+ import hashlib
27
+ import json
28
+ import logging
29
+ import os
30
+ import sys
31
+ import datetime
32
+ from datetime import date
33
+
34
+ #import newsplease
35
+
36
+ import custom_commoncrawl_crawler
37
+ #from newsplease.crawler import commoncrawl_crawler as custom_commoncrawl_crawler
38
+ #from newsplease.crawler.commoncrawl_extractor import CommonCrawlExtractor
39
+
40
+ __author__ = "Felix Hamborg"
41
+ __copyright__ = "Copyright 2017"
42
+ __credits__ = ["Sebastian Nagel"]
43
+
44
+
45
+ ############ YOUR CONFIG ############
46
+ # download dir for warc files
47
+ my_local_download_dir_warc = './cc_download_warc/'
48
+ # download dir for articles
49
+ my_local_download_dir_article = './cc_download_articles/'
50
+ # hosts (if None or empty list, any host is OK)
51
+ my_filter_valid_hosts = [] # example: ['elrancaguino.cl']
52
+ # start date (if None, any date is OK as start date), as datetime
53
+ my_filter_start_date = None # datetime.datetime(2016, 1, 1)
54
+ # end date (if None, any date is OK as end date), as datetime
55
+ my_filter_end_date = None # datetime.datetime(2016, 12, 31)
56
+ # Only .warc files published within [my_warc_files_start_date, my_warc_files_end_date) will be downloaded.
57
+ # Note that the date a warc file has been published does not imply it contains only news
58
+ # articles from that date. Instead, you must assume that the warc file can contain articles
59
+ # from ANY time before the warc file was published, e.g., a warc file published in August 2020
60
+ # may contain news articles from December 2016.
61
+ my_warc_files_start_date = datetime.datetime(2022, 1, 1) # example: datetime.datetime(2020, 3, 1)
62
+ my_warc_files_end_date = None #datetime.datetime(2023, 1, 1) # example: datetime.datetime(2020, 3, 2)
63
+ # if date filtering is strict and news-please could not detect the date of an article, the article will be discarded
64
+ my_filter_strict_date = True
65
+ # if True, the script checks whether a file has been downloaded already and uses that file instead of downloading
66
+ # again. Note that there is no check whether the file has been downloaded completely or is valid!
67
+ my_reuse_previously_downloaded_files = False
68
+ # continue after error
69
+ my_continue_after_error = True
70
+ # show the progress of downloading the WARC files
71
+ my_show_download_progress = True
72
+ # log_level
73
+ my_log_level = logging.INFO
74
+ # json export style
75
+ my_json_export_style = 2 # 0 (minimize), 1 (pretty), 2 (jsonl)
76
+ # number of extraction processes
77
+ my_number_of_extraction_processes = 10
78
+ # if True, the WARC file will be deleted after all articles have been extracted from it
79
+ my_delete_warc_after_extraction = True
80
+ # if True, will continue extraction from the latest fully downloaded but not fully extracted WARC files and then
81
+ # crawling new WARC files. This assumes that the filter criteria have not been changed since the previous run!
82
+ my_continue_process = True
83
+ # if True, will crawl and extract main image of each article. Note that the WARC files
84
+ # do not contain any images, so that news-please will crawl the current image from
85
+ # the articles online webpage, if this option is enabled.
86
+ my_fetch_images = False
87
+ # if True, just list the WARC files to be processed, but do not actually download and process them
88
+ my_dry_run=False
89
+ #Shuffle list of warc files to download
90
+ shuffle=False
91
+ ############ END YOUR CONFIG #########
92
+
93
+
94
+ # logging
95
+ logging.basicConfig(level=my_log_level)
96
+ __logger = logging.getLogger(__name__)
97
+
98
+
99
+ def __setup__():
100
+ """
101
+ Setup
102
+ :return:
103
+ """
104
+ os.makedirs(my_local_download_dir_article, exist_ok=True)
105
+
106
+
107
+ def __get_pretty_filepath(path, article):
108
+ """
109
+ Pretty might be an euphemism, but this function tries to avoid too long filenames, while keeping some structure.
110
+ :param path:
111
+ :param name:
112
+ :return:
113
+ """
114
+ short_filename = hashlib.sha256(article.filename.encode()).hexdigest()
115
+ sub_dir = article.source_domain
116
+ final_path = os.path.join(path, sub_dir)
117
+ os.makedirs(final_path, exist_ok=True)
118
+ return os.path.join(final_path, short_filename + '.json')
119
+
120
+
121
+ def on_valid_article_extracted(article, extractor, extra_data = {}):
122
+ """
123
+ This function will be invoked for each article that was extracted successfully from the archived data and that
124
+ satisfies the filter criteria.
125
+ :param article:
126
+ :return:
127
+ """
128
+ # do whatever you need to do with the article (e.g., save it to disk, store it in ElasticSearch, etc.)
129
+ data = article.__dict__
130
+ data.update(extra_data)
131
+ if my_json_export_style != 2:
132
+ with open(__get_pretty_filepath(my_local_download_dir_article, article), 'w', encoding='utf-8') as outfile:
133
+ if my_json_export_style == 0:
134
+ json.dump(data, outfile, default=str, separators=(',', ':'), ensure_ascii=False)
135
+ elif my_json_export_style == 1:
136
+ json.dump(data, outfile, default=str, indent=4, sort_keys=True, ensure_ascii=False)
137
+ else:
138
+ warc_filename = os.path.basename(extractor.warc_path).replace('.warc.gz', '')
139
+ year = warc_filename[8:12]
140
+ os.makedirs(os.path.join(my_local_download_dir_article, year), exist_ok=True)
141
+ with open(os.path.join(my_local_download_dir_article, year, warc_filename + '.jsonl'), 'a', encoding='utf-8') as outfile:
142
+ outfile.write(json.dumps(data, default=str, separators=(',', ':'), ensure_ascii=False) + '\n')
143
+ # ...
144
+
145
+
146
+ def callback_on_warc_completed(warc_path, counter_article_passed, counter_article_discarded,
147
+ counter_article_error, counter_article_total, counter_warc_processed):
148
+ """
149
+ This function will be invoked for each WARC file that was processed completely. Parameters represent total values,
150
+ i.e., cumulated over all all previously processed WARC files.
151
+ :param warc_path:
152
+ :param counter_article_passed:
153
+ :param counter_article_discarded:
154
+ :param counter_article_error:
155
+ :param counter_article_total:
156
+ :param counter_warc_processed:
157
+ :return:
158
+ """
159
+ pass
160
+
161
+ """
162
+ class CustomExtractor(CommonCrawlExtractor):
163
+
164
+ def filter_record(self, warc_record, article=None):
165
+
166
+ url = warc_record.rec_headers.get_header('WARC-Target-URI')
167
+
168
+ # filter by host
169
+ if self.__filter_valid_hosts:
170
+ # very simple check, check if one of the required host names is contained in the url of the WARC transaction
171
+ # better would be to extract the host name from the WARC transaction Target URI and then check for equality
172
+ # because currently something like g.co?forward_url=facebook.com would yield a positive filter test for
173
+ # facebook.com even though the actual host is g.co
174
+ for valid_host in self.__filter_valid_hosts:
175
+ if valid_host in url:
176
+ break
177
+ else:
178
+ return False, article
179
+
180
+ # filter by url suffix
181
+ valid_suffixes = {'br', 'pt'}
182
+ url_suffixes = tldextract.extract(url).suffix.split('.')
183
+ valid_suffix = False
184
+ for suffix in url_suffixes:
185
+ if suffix in valid_suffixes:
186
+ valid_suffix = True
187
+ break
188
+
189
+ if not valid_suffix:
190
+ return False, article
191
+
192
+ # filter by date
193
+ if self.__filter_start_date or self.__filter_end_date:
194
+ if not article:
195
+ article = self._from_warc(warc_record)
196
+
197
+ publishing_date = self.__get_publishing_date(warc_record, article)
198
+ if not publishing_date:
199
+ if self.__filter_strict_date:
200
+ return False, article
201
+ else: # here we for sure have a date
202
+ # is article published too early?
203
+ if self.__filter_start_date and publishing_date < self.__filter_start_date:
204
+ return False, article
205
+ if self.__filter_end_date and publishing_date > self.__filter_end_date:
206
+ return False, article
207
+
208
+ return True, article
209
+ """
210
+
211
+ def main():
212
+ global my_local_download_dir_warc
213
+ global my_local_download_dir_article
214
+ global my_delete_warc_after_extraction
215
+ global my_number_of_extraction_processes
216
+
217
+ if len(sys.argv) >= 2:
218
+ my_local_download_dir_warc = sys.argv[1]
219
+ if len(sys.argv) >= 3:
220
+ my_local_download_dir_article = sys.argv[2]
221
+ if len(sys.argv) >= 4:
222
+ my_delete_warc_after_extraction = sys.argv[3] == "delete"
223
+ if len(sys.argv) >= 5:
224
+ my_number_of_extraction_processes = int(sys.argv[4])
225
+
226
+ print("my_local_download_dir_warc=" + my_local_download_dir_warc)
227
+ print("my_local_download_dir_article=" + my_local_download_dir_article)
228
+ print("my_delete_warc_after_extraction=" + str(my_delete_warc_after_extraction))
229
+ print("my_number_of_extraction_processes=" + str(my_number_of_extraction_processes))
230
+
231
+ __setup__()
232
+ custom_commoncrawl_crawler.crawl_from_commoncrawl(on_valid_article_extracted,
233
+ callback_on_warc_completed=callback_on_warc_completed,
234
+ valid_hosts=my_filter_valid_hosts,
235
+ start_date=my_filter_start_date,
236
+ end_date=my_filter_end_date,
237
+ warc_files_start_date=my_warc_files_start_date,
238
+ warc_files_end_date=my_warc_files_end_date,
239
+ strict_date=my_filter_strict_date,
240
+ reuse_previously_downloaded_files=my_reuse_previously_downloaded_files,
241
+ local_download_dir_warc=my_local_download_dir_warc,
242
+ continue_after_error=my_continue_after_error,
243
+ show_download_progress=my_show_download_progress,
244
+ number_of_extraction_processes=my_number_of_extraction_processes,
245
+ log_level=my_log_level,
246
+ delete_warc_after_extraction=my_delete_warc_after_extraction,
247
+ continue_process=True,
248
+ fetch_images=my_fetch_images,
249
+ dry_run=my_dry_run,
250
+ shuffle=shuffle,
251
+ local_download_dir_article=my_local_download_dir_article)
252
+
253
+
254
+ if __name__ == "__main__":
255
+ main()
commoncrawl_extractor.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Provides functionality to crawl and extract news articles from a single WARC file from commoncrawl.org. Filter criteria, such as publish date
4
+ and host list, can be defined. Currently, the WARC file will be downloaded to the path WORKINGDIR/cc_download_warc, if
5
+ not otherwise specified.
6
+ """
7
+ import logging
8
+ import os
9
+ import sys
10
+ import time
11
+
12
+ from ago import human
13
+ import boto3
14
+ import botocore
15
+ from dateutil import parser
16
+ from hurry.filesize import size
17
+ from scrapy.utils.log import configure_logging
18
+ from six.moves import urllib
19
+ from warcio.archiveiterator import ArchiveIterator
20
+
21
+ from newsplease import NewsPlease, EmptyResponseError
22
+ import custom_commoncrawl_crawler as commoncrawl_crawler
23
+
24
+ from bs4.dammit import EncodingDetector
25
+ import tldextract
26
+
27
+ __author__ = "Felix Hamborg"
28
+ __copyright__ = "Copyright 2017"
29
+ __credits__ = ["Sebastian Nagel"]
30
+
31
+
32
+ def extract_html_from_warc(warc_record, decode_errors="replace"):
33
+ """
34
+ Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
35
+ extractor.
36
+ :return:
37
+ """
38
+ if hasattr(warc_record, 'html'):
39
+ return warc_record.html
40
+
41
+ raw_stream = warc_record.raw_stream.read()
42
+ encoding = None
43
+ try:
44
+ encoding = (
45
+ warc_record.http_headers.get_header("Content-Type")
46
+ .split(";")[1]
47
+ .split("=")[1]
48
+ )
49
+ except:
50
+ pass
51
+ if not encoding:
52
+ encoding = EncodingDetector.find_declared_encoding(raw_stream, is_html=True)
53
+ if not encoding:
54
+ # assume utf-8
55
+ encoding = "utf-8"
56
+
57
+ html = ""
58
+
59
+ try:
60
+ html = raw_stream.decode(encoding, errors=decode_errors)
61
+ except LookupError:
62
+ # non-existent encoding: fallback to utf-9
63
+ html = raw_stream.decode("utf-8", errors=decode_errors)
64
+
65
+ warc_record.html = html
66
+
67
+ return html
68
+
69
+ class CustomNewsPlease(NewsPlease):
70
+ @staticmethod
71
+ def from_warc(warc_record, decode_errors="replace", fetch_images=True):
72
+ """
73
+ Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
74
+ extractor.
75
+ :return:
76
+ """
77
+ html = extract_html_from_warc(warc_record, decode_errors=decode_errors)
78
+ if not html:
79
+ raise EmptyResponseError()
80
+ url = warc_record.rec_headers.get_header("WARC-Target-URI")
81
+ download_date = warc_record.rec_headers.get_header("WARC-Date")
82
+ article = NewsPlease.from_html(
83
+ html, url=url, download_date=download_date, fetch_images=fetch_images
84
+ )
85
+ return article
86
+
87
+ class CommonCrawlExtractor:
88
+ # remote url where we can download the warc file
89
+ __warc_path = None
90
+ # download dir for warc files
91
+ __local_download_dir_warc = './cc_download_warc/'
92
+ # hosts (if None or empty list, any host is OK)
93
+ __filter_valid_hosts = [] # example: ['elrancaguino.cl']
94
+ # start date (if None, any date is OK as start date), as datetime
95
+ __filter_start_date = None
96
+ # end date (if None, any date is OK as end date)
97
+ __filter_end_date = None
98
+ # if date filtering is string, e.g., if we could not detect the date of an article, we will discard the article
99
+ __filter_strict_date = True
100
+ # if True, the script checks whether a file has been downloaded already and uses that file instead of downloading
101
+ # again. Note that there is no check whether the file has been downloaded completely or is valid!
102
+ __reuse_previously_downloaded_files = True
103
+ # continue after error
104
+ __continue_after_error = False
105
+ # ignore unicode errors
106
+ __ignore_unicode_errors = False
107
+ # fetch images
108
+ __fetch_images = False
109
+ # log level
110
+ __log_level = logging.INFO
111
+ __delete_warc_after_extraction = True
112
+ __log_pathname_fully_extracted_warcs = None
113
+
114
+ #output
115
+ __local_download_dir_article = None
116
+
117
+ # commoncrawl.org
118
+ __cc_base_url = 'https://data.commoncrawl.org/'
119
+ __cc_bucket = 'commoncrawl'
120
+ __cc_news_crawl_names = None
121
+
122
+ # event handler called when an article was extracted successfully and passed all filter criteria
123
+ __callback_on_article_extracted = None
124
+ # event handler called when a warc file is fully processed
125
+ __callback_on_warc_completed = None
126
+ # if the download progress is shown
127
+ __show_download_progress = False
128
+
129
+ # logging
130
+ logging.basicConfig(level=__log_level)
131
+ __logger = logging.getLogger(__name__)
132
+
133
+ #Filter utils
134
+ __html_lang_indicators = [
135
+ ('http-equiv="content-language" content="pt', 1.0),
136
+ ('property="og:locale" content="pt', 0.8),
137
+ ('meta name="language" content="pt', 0.8),
138
+ ('meta name="language" content="portugu', 0.8),
139
+ ('lang="pt', 0.6),
140
+ ('pt-br', 0.4),
141
+ ('pt-pt', 0.4),
142
+ ('pt_br', 0.4),
143
+ ('pt_pt', 0.4),
144
+ ]
145
+ __html_lang_indicators.extend([(hi[0].replace('"', "'"), hi[1]) for hi in __html_lang_indicators if hi[0].replace('"', "'") != hi[0]])
146
+ __html_lang_indicators = sorted(__html_lang_indicators, key=lambda d: d[1], reverse=True)
147
+
148
+
149
+ def __setup(self):
150
+ """
151
+ Setup
152
+ :return:
153
+ """
154
+ os.makedirs(self.__local_download_dir_warc, exist_ok=True)
155
+
156
+ # make loggers quiet
157
+ configure_logging({"LOG_LEVEL": "ERROR"})
158
+ logging.getLogger('requests').setLevel(logging.CRITICAL)
159
+ logging.getLogger('readability').setLevel(logging.CRITICAL)
160
+ logging.getLogger('PIL').setLevel(logging.CRITICAL)
161
+ logging.getLogger('newspaper').setLevel(logging.CRITICAL)
162
+ logging.getLogger('newsplease').setLevel(logging.CRITICAL)
163
+ logging.getLogger('urllib3').setLevel(logging.CRITICAL)
164
+
165
+ boto3.set_stream_logger('botocore', self.__log_level)
166
+ boto3.set_stream_logger('boto3', self.__log_level)
167
+ boto3.set_stream_logger('s3transfer', self.__log_level)
168
+
169
+ # set own logger
170
+ logging.basicConfig(level=self.__log_level)
171
+ self.__logger = logging.getLogger(__name__)
172
+ self.__logger.setLevel(self.__log_level)
173
+
174
+ def __register_fully_extracted_warc_file(self, warc_path):
175
+ """
176
+ Saves the URL warc_url in the log file for fully extracted WARC URLs
177
+ :param warc_url:
178
+ :return:
179
+ """
180
+ if self.__log_pathname_fully_extracted_warcs is not None:
181
+ with open(self.__log_pathname_fully_extracted_warcs, 'a') as log_file:
182
+ log_file.write(warc_path + '\n')
183
+
184
+ def filter_record(self, warc_record, article=None):
185
+ """
186
+ Returns true if a record passes all tests: hosts, publishing date
187
+ :param warc_record:
188
+ :return: A tuple of (True or False) and an article (might be None)
189
+ """
190
+ data = {}
191
+
192
+ language = warc_record.http_headers.get_header('Content-Language')
193
+ data['content-language'] = language
194
+ if language is not None and 'pt' in language.lower():
195
+ data['filter_pass'] = 'content-language'
196
+ data['filter_pass_meta'] = {'content-language': language}
197
+ return True, article, data
198
+
199
+ url = warc_record.rec_headers.get_header('WARC-Target-URI')
200
+ # filter by url suffix
201
+ #Domain extension of countries with portuguese spoken by the majority of the population as the mother language
202
+ #Brazil, Portugual, Angola, São Tomé and Príncipe
203
+ portuguese_majority_domains = {'br', 'pt', 'ao', 'st'}
204
+ #Domain extension of other countries that speaks portuguese
205
+ #Mozambique, Guinea-Bissau, Equatorial Guinea, East Timor, Macau, Cape Verde
206
+ portuguese_maybe_domains = {'mz', 'gw', 'gq', 'tl', 'mo', 'cv'}
207
+ url_suffixes = tldextract.extract(url).suffix.split('.')
208
+ #valid_suffix = False
209
+ for suffix in url_suffixes:
210
+ if suffix in portuguese_majority_domains:
211
+ #valid_suffix = True
212
+ data['filter_pass'] = 'domain-extension'
213
+ data['filter_pass_meta'] = {'domain-extension': suffix}
214
+ return True, article, data
215
+ if suffix in portuguese_maybe_domains:
216
+ if not article:
217
+ article = self._from_warc(warc_record)
218
+ if article.language == 'pt':
219
+ data['filter_pass'] = 'domain-extension'
220
+ data['filter_pass_meta'] = {'domain-extension': suffix}
221
+ return True, article, data
222
+
223
+ raw_html = extract_html_from_warc(warc_record).lower()
224
+ for indicator, score in self.__html_lang_indicators:
225
+ if indicator in raw_html:
226
+ data['filter_pass'] = 'html-tag'
227
+ data['filter_pass_meta'] = {'html-tag': indicator, 'score': round(score, 1)}
228
+ if score < 0.61: # floating error
229
+ if not article:
230
+ article = self._from_warc(warc_record)
231
+ if article.language != 'pt':
232
+ return False, article, data
233
+ return True, article, data
234
+
235
+ return False, article, data
236
+
237
+ #if not valid_suffix:
238
+ # return False, article
239
+
240
+ # filter by host
241
+ if self.__filter_valid_hosts:
242
+ # very simple check, check if one of the required host names is contained in the url of the WARC transaction
243
+ # better would be to extract the host name from the WARC transaction Target URI and then check for equality
244
+ # because currently something like g.co?forward_url=facebook.com would yield a positive filter test for
245
+ # facebook.com even though the actual host is g.co
246
+ for valid_host in self.__filter_valid_hosts:
247
+ if valid_host in url:
248
+ break
249
+ else:
250
+ return False, article
251
+
252
+ # filter by date
253
+ if self.__filter_start_date or self.__filter_end_date:
254
+ if not article:
255
+ article = self._from_warc(warc_record)
256
+
257
+ publishing_date = self.__get_publishing_date(warc_record, article)
258
+ if not publishing_date:
259
+ if self.__filter_strict_date:
260
+ return False, article
261
+ else: # here we for sure have a date
262
+ # is article published too early?
263
+ if self.__filter_start_date and publishing_date < self.__filter_start_date:
264
+ return False, article
265
+ if self.__filter_end_date and publishing_date > self.__filter_end_date:
266
+ return False, article
267
+
268
+ return True, article
269
+
270
+ def __get_publishing_date(self, warc_record, article):
271
+ """
272
+ Extracts the publishing date from the record
273
+ :param warc_record:
274
+ :return:
275
+ """
276
+ if hasattr(article, 'date_publish'):
277
+ return parser.parse(article.date_publish) if isinstance(article.date_publish, str) else article.date_publish
278
+ else:
279
+ return None
280
+
281
+ def __get_remote_index(self):
282
+ """
283
+ Gets the index of news crawl files from commoncrawl.org and returns an array of names
284
+ :return:
285
+ """
286
+ return commoncrawl_crawler.__get_remote_index()
287
+
288
+ def __on_download_progress_update(self, blocknum, blocksize, totalsize):
289
+ """
290
+ Prints some download progress information
291
+ :param blocknum:
292
+ :param blocksize:
293
+ :param totalsize:
294
+ :return:
295
+ """
296
+ if not self.__show_download_progress:
297
+ return
298
+
299
+ readsofar = blocknum * blocksize
300
+ if totalsize > 0:
301
+ s = "\r%s / %s" % (size(readsofar), size(totalsize))
302
+ sys.stdout.write(s)
303
+ if readsofar >= totalsize: # near the end
304
+ sys.stderr.write("\r")
305
+ else: # total size is unknown
306
+ sys.stdout.write("\rread %s" % (size(readsofar)))
307
+
308
+ def __download(self, path):
309
+ """
310
+ Download and save a file locally.
311
+ :param url: Where to download from
312
+ :return: File path name of the downloaded file
313
+ """
314
+ local_filename = urllib.parse.quote_plus(path)
315
+ local_filepath = os.path.join(self.__local_download_dir_warc, local_filename)
316
+
317
+ if os.path.isfile(local_filepath) and self.__reuse_previously_downloaded_files:
318
+ self.__logger.info("found local file %s, not downloading again due to configuration", local_filepath)
319
+ return local_filepath
320
+ else:
321
+ # cleanup
322
+ try:
323
+ os.remove(local_filepath)
324
+ except OSError:
325
+ pass
326
+
327
+ # download
328
+ if self.__s3_client:
329
+ with open(local_filepath, 'wb') as file_obj:
330
+ self.__s3_client.download_fileobj(self.__cc_bucket, path, file_obj)
331
+ return local_filepath
332
+ else:
333
+ url = self.__cc_base_url + path
334
+ self.__logger.info('downloading %s (local: %s)', url, local_filepath)
335
+ urllib.request.urlretrieve(url, local_filepath, reporthook=self.__on_download_progress_update)
336
+ self.__logger.info('download completed, local file: %s', local_filepath)
337
+ return local_filepath
338
+
339
+ def _from_warc(self, record):
340
+ return CustomNewsPlease.from_warc(record, decode_errors="replace" if self.__ignore_unicode_errors else "strict", fetch_images=self.__fetch_images)
341
+
342
+ def __process_warc_gz_file(self, path_name):
343
+ """
344
+ Iterates all transactions in one WARC file and for each transaction tries to extract an article object.
345
+ Afterwards, each article is checked against the filter criteria and if all are passed, the function
346
+ on_valid_article_extracted is invoked with the article object.
347
+ :param path_name:
348
+ :return:
349
+ """
350
+ counter_article_total = 0
351
+ counter_article_passed = 0
352
+ counter_article_discarded = 0
353
+ counter_article_error = 0
354
+ start_time = time.time()
355
+
356
+ with open(path_name, 'rb') as stream:
357
+ for record in ArchiveIterator(stream):
358
+ try:
359
+ if record.rec_type == 'response':
360
+ counter_article_total += 1
361
+ data = {}
362
+ # if the article passes filter tests, we notify the user
363
+ try:
364
+ filter_pass, article, data = self.filter_record(record)
365
+ except (UnicodeDecodeError, EmptyResponseError):
366
+ filter_pass = False
367
+ if filter_pass:
368
+ try:
369
+ if not article:
370
+ article = self._from_warc(record)
371
+
372
+ except (UnicodeDecodeError, EmptyResponseError):
373
+ filter_pass = False
374
+ if filter_pass:
375
+ counter_article_passed += 1
376
+
377
+ self.__logger.debug('article pass (%s; %s; %s)', article.source_domain, article.date_publish,
378
+ article.title)
379
+ self.__callback_on_article_extracted(article, self, extra_data = data)
380
+ else:
381
+ counter_article_discarded += 1
382
+
383
+ if article:
384
+ self.__logger.debug('article discard (%s; %s; %s)', article.source_domain,
385
+ article.date_publish,
386
+ article.title)
387
+ else:
388
+ self.__logger.debug('article discard (%s)',
389
+ record.rec_headers.get_header('WARC-Target-URI'))
390
+
391
+ if counter_article_total % 10000 == 0:
392
+ elapsed_secs = time.time() - start_time
393
+ secs_per_article = elapsed_secs / counter_article_total
394
+ self.__logger.info('statistics')
395
+ self.__logger.info('pass = %i, discard = %i, error = %i, total = %i',
396
+ counter_article_passed,
397
+ counter_article_discarded, counter_article_error, counter_article_total)
398
+ self.__logger.info('extraction from current WARC file started %s; %f s/article',
399
+ human(start_time), secs_per_article)
400
+ except:
401
+ if self.__continue_after_error:
402
+ self.__logger.error('Unexpected error: %s (%s)', *sys.exc_info()[0:2])
403
+ self.__logger.error(sys.exc_info()[2], exc_info=True)
404
+ counter_article_error += 1
405
+ pass
406
+ else:
407
+ raise
408
+
409
+ # cleanup
410
+ if self.__delete_warc_after_extraction:
411
+ os.remove(path_name)
412
+
413
+ self.__register_fully_extracted_warc_file(self.__warc_path)
414
+ self.__callback_on_warc_completed(self.__warc_path, counter_article_passed, counter_article_discarded,
415
+ counter_article_error, counter_article_total)
416
+
417
+ def __cleanup_output_file(self):
418
+ #Delete incomplete output file if exists
419
+ if self.__local_download_dir_article is not None:
420
+ warc_filename = os.path.basename(self.__warc_path).replace('.warc.gz', '')
421
+ year = warc_filename[8:12]
422
+ output_filepath = os.path.join(self.__local_download_dir_article, year, warc_filename + '.jsonl')
423
+ if os.path.exists(output_filepath):
424
+ self.__logger.info(f"Removing incomplete output file {output_filepath}")
425
+ os.remove(output_filepath)
426
+
427
+ def __run(self):
428
+ """
429
+ Main execution method, which consists of: get an up-to-date list of WARC files, and for each of them: download
430
+ and extract articles. Each article is checked against a filter. Finally, for each valid article the method
431
+ on_valid_article_extracted will be invoked after the extraction of the article has completed.
432
+ :return:
433
+ """
434
+ self.__setup()
435
+
436
+ self.__cleanup_output_file()
437
+ local_path_name = self.__download(self.__warc_path)
438
+ self.__process_warc_gz_file(local_path_name)
439
+
440
+ def extract_from_commoncrawl(self, warc_path, callback_on_article_extracted,
441
+ callback_on_warc_completed=None,
442
+ valid_hosts=None,
443
+ start_date=None, end_date=None,
444
+ strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None,
445
+ continue_after_error=True, ignore_unicode_errors=False,
446
+ show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True,
447
+ log_pathname_fully_extracted_warcs=None, fetch_images=False, local_download_dir_article=None):
448
+ """
449
+ Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
450
+ successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
451
+ article object.
452
+ :param log_pathname_fully_extracted_warcs:
453
+ :param delete_warc_after_extraction:
454
+ :param warc_path:
455
+ :param callback_on_article_extracted:
456
+ :param callback_on_warc_completed:
457
+ :param valid_hosts:
458
+ :param start_date:
459
+ :param end_date:
460
+ :param strict_date:
461
+ :param reuse_previously_downloaded_files:
462
+ :param local_download_dir_warc:
463
+ :param continue_after_error:
464
+ :param show_download_progress:
465
+ :param log_level:
466
+ :return:
467
+ """
468
+ self.warc_path = warc_path
469
+ self.__warc_path = warc_path
470
+ self.__filter_valid_hosts = valid_hosts
471
+ self.__filter_start_date = start_date
472
+ self.__filter_end_date = end_date
473
+ self.__filter_strict_date = strict_date
474
+ if local_download_dir_warc:
475
+ self.__local_download_dir_warc = local_download_dir_warc
476
+ self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
477
+ self.__continue_after_error = continue_after_error
478
+ self.__ignore_unicode_errors = ignore_unicode_errors
479
+ self.__fetch_images = fetch_images
480
+ self.__callback_on_article_extracted = callback_on_article_extracted
481
+ self.__callback_on_warc_completed = callback_on_warc_completed
482
+ self.__show_download_progress = show_download_progress
483
+ self.__log_level = log_level
484
+ self.__delete_warc_after_extraction = delete_warc_after_extraction
485
+ self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
486
+ self.__local_download_dir_article = local_download_dir_article
487
+
488
+ self.__s3_client = None
489
+
490
+ try:
491
+ s3_client = boto3.client('s3')
492
+ # Verify access to commoncrawl bucket
493
+ s3_client.head_bucket(Bucket=self.__cc_bucket)
494
+ self.__s3_client = s3_client
495
+ except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
496
+ self.__logger.info('Failed to read %s bucket, using monthly WARC file listings', self.__cc_bucket)
497
+ self.__logger.warning(str(e))
498
+
499
+
500
+ self.__run()
custom_commoncrawl_crawler.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Provides functionality to crawl and extract news articles from commoncrawl.org. Filter criteria, such as publish date
4
+ and host list, can be defined. Currently, all WARC files will be downloaded to the path WORKINGDIR/cc_download_warc, if
5
+ not otherwise specified.
6
+ """
7
+ import logging
8
+ import os
9
+ import time
10
+ from functools import partial
11
+ from multiprocessing import Pool
12
+ import datetime
13
+ import gzip
14
+ from urllib.parse import urlparse
15
+
16
+ import boto3
17
+ import botocore
18
+ from dateutil import parser
19
+ import requests
20
+ from scrapy.utils.log import configure_logging
21
+
22
+ import random
23
+
24
+ from commoncrawl_extractor import CommonCrawlExtractor
25
+
26
+ __author__ = "Felix Hamborg"
27
+ __copyright__ = "Copyright 2017"
28
+ __credits__ = ["Sebastian Nagel"]
29
+
30
+ # commoncrawl.org
31
+ __cc_base_url = 'https://data.commoncrawl.org/'
32
+ __cc_bucket = 'commoncrawl'
33
+
34
+ # log file of fully extracted WARC files
35
+ __log_pathname_fully_extracted_warcs = None
36
+
37
+ # logging
38
+ logging.basicConfig(level=logging.INFO)
39
+ __logger = logging.getLogger(__name__)
40
+
41
+ __number_of_warc_files_on_cc = 0
42
+
43
+ __extern_callback_on_warc_completed = None
44
+ __counter_article_passed = 0
45
+ __counter_article_discarded = 0
46
+ __counter_article_error = 0
47
+ __counter_article_total = 0
48
+ __counter_warc_skipped = 0
49
+ __counter_warc_processed = 0
50
+ __start_time = time.time()
51
+
52
+ __shufle = False
53
+
54
+ # When Common Crawl started.
55
+ __common_crawl_start_date = datetime.datetime(2016, 8, 26)
56
+
57
+ def __setup(local_download_dir_warc, log_level):
58
+ """
59
+ Setup
60
+ :return:
61
+ """
62
+ os.makedirs(local_download_dir_warc, exist_ok=True)
63
+
64
+ global __log_pathname_fully_extracted_warcs
65
+ __log_pathname_fully_extracted_warcs = os.path.join(local_download_dir_warc, 'fullyextractedwarcs.list')
66
+
67
+ # make loggers quiet
68
+ configure_logging({"LOG_LEVEL": "ERROR"})
69
+ logging.getLogger('requests').setLevel(logging.CRITICAL)
70
+ logging.getLogger('readability').setLevel(logging.CRITICAL)
71
+ logging.getLogger('PIL').setLevel(logging.CRITICAL)
72
+ logging.getLogger('newspaper').setLevel(logging.CRITICAL)
73
+ logging.getLogger('newsplease').setLevel(logging.CRITICAL)
74
+ logging.getLogger('urllib3').setLevel(logging.CRITICAL)
75
+ logging.getLogger('jieba').setLevel(logging.CRITICAL)
76
+
77
+ boto3.set_stream_logger('botocore', log_level)
78
+ boto3.set_stream_logger('boto3', log_level)
79
+
80
+ # set own logger
81
+ logging.basicConfig(level=log_level)
82
+ __logger = logging.getLogger(__name__)
83
+ __logger.setLevel(log_level)
84
+
85
+
86
+ def __get_publishing_date(warc_record, article):
87
+ """
88
+ Extracts the publishing date from the article
89
+ :param warc_record:
90
+ :return:
91
+ """
92
+ if article.publish_date:
93
+ return parser.parse(article.publish_date)
94
+ else:
95
+ return None
96
+
97
+
98
+ def __get_download_url(name):
99
+ """
100
+ Creates a download url given the name
101
+ :param name:
102
+ :return:
103
+ """
104
+ return __cc_base_url + name
105
+
106
+
107
+ def __iterate_by_month(start_date=None, end_date=None, month_step=1):
108
+ if start_date is None:
109
+ # The starting month of Common Crawl.
110
+ start_date = __common_crawl_start_date
111
+ if end_date is None:
112
+ # Until now.
113
+ end_date = datetime.datetime.today()
114
+ current_date = start_date
115
+ yield current_date
116
+ while True:
117
+ carry, new_month = divmod(current_date.month - 1 + month_step, 12)
118
+ new_month += 1
119
+ current_date = current_date.replace(year=current_date.year + carry,
120
+ month=new_month)
121
+ yield current_date
122
+ if current_date > end_date:
123
+ break
124
+
125
+
126
+ def __extract_date_from_warc_filename(path):
127
+ fn = os.path.basename(path)
128
+ # Assume the filename pattern is CC-NEWS-20160911145202-00018.warc.gz
129
+ fn = fn.replace('CC-NEWS-', '')
130
+ dt = fn.split('-')[0]
131
+
132
+ try:
133
+ return datetime.datetime.strptime(dt, '%Y%m%d%H%M%S')
134
+ except:
135
+ # return date clearly outside the range
136
+ return datetime.datetime(1900, 1, 1)
137
+
138
+
139
+ def __date_within_period(date, start_date=None, end_date=None):
140
+ if start_date is None:
141
+ # The starting month of Common Crawl.
142
+ start_date = __common_crawl_start_date
143
+ if end_date is None:
144
+ # Until now.
145
+ end_date = datetime.datetime.today()
146
+ return start_date <= date < end_date
147
+
148
+
149
+ def __get_remote_index(warc_files_start_date=None, warc_files_end_date=None):
150
+ """
151
+ Gets the index of news crawl files from commoncrawl.org and returns an array of names
152
+ :param warc_files_start_date: only list .warc files with greater or equal date in
153
+ their filename
154
+ :param warc_files_end_date: only list .warc files with smaller date in their filename
155
+ :return:
156
+ """
157
+
158
+ s3_client = boto3.client('s3')
159
+ # Verify access to commoncrawl bucket
160
+ try:
161
+ s3_client.head_bucket(Bucket=__cc_bucket)
162
+ except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
163
+ __logger.info('Failed to read %s bucket, using monthly WARC file listings', __cc_bucket)
164
+ self.__logger.warning(str(e))
165
+ s3_client = None
166
+
167
+ objects = []
168
+
169
+ if s3_client:
170
+ def s3_list_objects(bucket, prefix):
171
+ #response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
172
+ paginator = s3_client.get_paginator('list_objects')
173
+ pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
174
+
175
+ data = []
176
+ for page in pages:
177
+ if 'Contents' not in page:
178
+ continue
179
+ for obj in page['Contents']:
180
+ data.append(obj['Key'])
181
+
182
+ return data
183
+ #if 'Contents' not in response:
184
+ # return []
185
+ #return [x['Key'] for x in response['Contents']]
186
+
187
+ if warc_files_start_date or warc_files_end_date:
188
+ # The news files are grouped per year and month in separate folders
189
+ warc_dates = __iterate_by_month(start_date=warc_files_start_date, end_date=warc_files_end_date)
190
+ for date in warc_dates:
191
+ year = date.strftime('%Y')
192
+ month = date.strftime('%m')
193
+ prefix = 'crawl-data/CC-NEWS/%s/%s/' % (year, month)
194
+ __logger.debug('Listing objects on S3 bucket %s and prefix %s', __cc_bucket, prefix)
195
+ objects += s3_list_objects(__cc_bucket, prefix)
196
+ else:
197
+ objects = s3_list_objects(__cc_bucket, 'crawl-data/CC-NEWS/')
198
+
199
+ else:
200
+ # The news files are grouped per year and month in separate folders
201
+ warc_dates = __iterate_by_month(start_date=warc_files_start_date, end_date=warc_files_end_date)
202
+ for date in warc_dates:
203
+ year = date.strftime('%Y')
204
+ month = date.strftime('%m')
205
+ url = '%scrawl-data/CC-NEWS/%s/%s/warc.paths.gz' % (__cc_base_url, year, month)
206
+ __logger.debug('Fetching WARC paths listing %s', url)
207
+ response = requests.get(url)
208
+ if response:
209
+ objects += gzip.decompress(response.content).decode('ascii').strip().split('\n')
210
+ else:
211
+ __logger.info('Failed to fetch WARC file list %s: %s', url, response)
212
+
213
+ if warc_files_start_date or warc_files_end_date:
214
+ # Now filter further on day of month, hour, minute
215
+ objects = [
216
+ p for p in objects if __date_within_period(
217
+ __extract_date_from_warc_filename(p),
218
+ start_date=warc_files_start_date,
219
+ end_date=warc_files_end_date,
220
+ )
221
+ ]
222
+
223
+ __logger.info('Found %i WARC files', len(objects))
224
+
225
+ return objects
226
+
227
+ def __get_url_path(url_or_path):
228
+ if url_or_path.startswith('http:') or url_or_path.startswith('https:'):
229
+ try:
230
+ url = urlparse(url_or_path)
231
+ return url.path.lstrip('/') # trim leading slash
232
+ except:
233
+ pass
234
+ return url_or_path
235
+
236
+ def __get_list_of_fully_extracted_warc_paths():
237
+ """
238
+ Reads in the log file that contains a list of all previously, fully extracted WARC urls
239
+ :return:
240
+ """
241
+ if not os.path.isfile(__log_pathname_fully_extracted_warcs):
242
+ return []
243
+
244
+ with open(__log_pathname_fully_extracted_warcs) as log_file:
245
+ list_warcs = log_file.readlines()
246
+ # remove break lines
247
+ list_warcs = [x.strip() for x in list_warcs]
248
+
249
+ # (back-ward compatibility) if it's a URL keep only the path
250
+ list_warcs = [__get_url_path(x) for x in list_warcs]
251
+
252
+ return list_warcs
253
+
254
+
255
+ def __callback_on_warc_completed(warc_path, counter_article_passed, counter_article_discarded, counter_article_error,
256
+ counter_article_total):
257
+ """
258
+ Internal callback on completion of one WARC file. Calculating some statistics on processing speed.
259
+ :param warc_path:
260
+ :param counter_article_passed:
261
+ :param counter_article_discarded:
262
+ :param counter_article_error:
263
+ :param counter_article_total:
264
+ :return:
265
+ """
266
+ # have to use the global keyword in order to assign a value to a global variable (see https://stackoverflow.com/a/9936482)
267
+ global __counter_article_passed
268
+ global __counter_article_discarded
269
+ global __counter_article_error
270
+ global __counter_article_total
271
+ global __counter_warc_processed
272
+ # global __counter_warc_skipped
273
+
274
+ elapsed_secs = time.time() - __start_time
275
+
276
+ __counter_article_discarded += counter_article_discarded
277
+ __counter_article_error += counter_article_error
278
+ __counter_article_passed += counter_article_passed
279
+ __counter_article_total += counter_article_total
280
+ __counter_warc_processed += 1
281
+
282
+ sec_per_article = elapsed_secs / counter_article_total
283
+ h_per_warc = elapsed_secs / __counter_warc_processed / 3600
284
+ remaining_warcs = __number_of_warc_files_on_cc - (__counter_warc_processed + __counter_warc_skipped)
285
+
286
+ __logger.info("warc processing statistics")
287
+ __logger.info("warc files skipped = %i, processed = %i, remaining = %i, total = %i", __counter_warc_skipped,
288
+ __counter_warc_processed, remaining_warcs, __number_of_warc_files_on_cc)
289
+ __logger.info("global [s/article] = %f", sec_per_article)
290
+ __logger.info("global [h/warc] = %.3f", h_per_warc)
291
+ __logger.info("estimated remaining time [h] = %f", remaining_warcs * h_per_warc)
292
+
293
+ # invoke the external callback
294
+ __extern_callback_on_warc_completed(warc_path, __counter_article_passed, __counter_article_discarded,
295
+ __counter_article_error, __counter_article_total, __counter_warc_processed)
296
+
297
+
298
+ def __start_commoncrawl_extractor(warc_path, callback_on_article_extracted=None,
299
+ callback_on_warc_completed=None, valid_hosts=None,
300
+ start_date=None, end_date=None,
301
+ strict_date=True, reuse_previously_downloaded_files=True,
302
+ local_download_dir_warc=None,
303
+ continue_after_error=True, show_download_progress=False,
304
+ log_level=logging.ERROR,
305
+ delete_warc_after_extraction=True,
306
+ continue_process=True,
307
+ log_pathname_fully_extracted_warcs=None,
308
+ extractor_cls=CommonCrawlExtractor,
309
+ fetch_images=False,
310
+ local_download_dir_article=None):
311
+ """
312
+ Starts a single CommonCrawlExtractor
313
+ :param warc_path: path to the WARC file on s3://commoncrawl/ resp. https://data.commoncrawl.org/
314
+ :param callback_on_article_extracted:
315
+ :param callback_on_warc_completed:
316
+ :param valid_hosts:
317
+ :param start_date:
318
+ :param end_date:
319
+ :param strict_date:
320
+ :param reuse_previously_downloaded_files:
321
+ :param local_download_dir_warc:
322
+ :param continue_after_error:
323
+ :param show_download_progress:
324
+ :param log_level:
325
+ :param extractor_cls: A subclass of CommonCrawlExtractor, which can be used
326
+ to add custom filtering by overriding .filter_record(...)
327
+ :return:
328
+ """
329
+ commoncrawl_extractor = extractor_cls()
330
+ commoncrawl_extractor.extract_from_commoncrawl(warc_path, callback_on_article_extracted,
331
+ callback_on_warc_completed=callback_on_warc_completed,
332
+ valid_hosts=valid_hosts,
333
+ start_date=start_date, end_date=end_date,
334
+ strict_date=strict_date,
335
+ reuse_previously_downloaded_files=reuse_previously_downloaded_files,
336
+ local_download_dir_warc=local_download_dir_warc,
337
+ continue_after_error=continue_after_error,
338
+ show_download_progress=show_download_progress,
339
+ log_level=log_level,
340
+ delete_warc_after_extraction=delete_warc_after_extraction,
341
+ log_pathname_fully_extracted_warcs=__log_pathname_fully_extracted_warcs,
342
+ fetch_images=fetch_images,
343
+ local_download_dir_article=local_download_dir_article)
344
+
345
+
346
+ def crawl_from_commoncrawl(callback_on_article_extracted, callback_on_warc_completed=None, valid_hosts=None,
347
+ start_date=None, end_date=None, warc_files_start_date=None, warc_files_end_date=None, strict_date=True,
348
+ reuse_previously_downloaded_files=True, local_download_dir_warc=None,
349
+ continue_after_error=True, show_download_progress=False,
350
+ number_of_extraction_processes=4, log_level=logging.ERROR,
351
+ delete_warc_after_extraction=True, continue_process=True,
352
+ extractor_cls=CommonCrawlExtractor, fetch_images=False,
353
+ dry_run=False, shuffle=__shufle, local_download_dir_article=None):
354
+ """
355
+ Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
356
+ successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
357
+ article object.
358
+ :param continue_process:
359
+ :param delete_warc_after_extraction:
360
+ :param number_of_extraction_processes:
361
+ :param callback_on_article_extracted:
362
+ :param valid_hosts:
363
+ :param start_date:
364
+ :param end_date:
365
+ :param warc_files_start_date
366
+ :param warc_files_end_date
367
+ :param strict_date:
368
+ :param reuse_previously_downloaded_files:
369
+ :param local_download_dir_warc:
370
+ :param continue_after_error:
371
+ :param show_download_progress:
372
+ :param log_level:
373
+ :param extractor_cls:
374
+ :param dry_run: if True just list the WARC files to be processed but do not actually process them
375
+ :return:
376
+ """
377
+ __setup(local_download_dir_warc, log_level)
378
+
379
+ global __extern_callback_on_warc_completed
380
+ __extern_callback_on_warc_completed = callback_on_warc_completed
381
+
382
+ cc_news_crawl_names = __get_remote_index(warc_files_start_date, warc_files_end_date)
383
+ global __number_of_warc_files_on_cc
384
+ __number_of_warc_files_on_cc = len(cc_news_crawl_names)
385
+ __logger.info('found %i files at commoncrawl.org', __number_of_warc_files_on_cc)
386
+
387
+ if shuffle:
388
+ random.seed(42)
389
+ random.shuffle(cc_news_crawl_names)
390
+ __logger.info('shuffled the list of WARC files')
391
+
392
+ # multiprocessing (iterate the list of crawl_names, and for each: download and process it)
393
+ __logger.info('creating extraction process pool with %i processes', number_of_extraction_processes)
394
+ warc_paths = []
395
+ fully_extracted_warc_paths = __get_list_of_fully_extracted_warc_paths()
396
+ for warc_path in cc_news_crawl_names:
397
+ if continue_process:
398
+ # check if the current WARC has already been fully extracted (assuming that the filter criteria have not
399
+ # been changed!)
400
+ if warc_path in fully_extracted_warc_paths:
401
+ __logger.info('skipping WARC because fully extracted: %s', warc_path)
402
+ global __counter_warc_skipped
403
+ __counter_warc_skipped += 1
404
+ pass
405
+ else:
406
+ warc_paths.append(warc_path)
407
+
408
+ else:
409
+ # if not continue process, then always add
410
+ warc_paths.append(warc_path)
411
+
412
+ if dry_run:
413
+ for warc_path in warc_paths:
414
+ __logger.info('(Dry run) Selected WARC file for processing: %s', warc_path)
415
+
416
+ # run the crawler in the current, single process if number of extraction processes is set to 1
417
+ elif number_of_extraction_processes > 1:
418
+ with Pool(number_of_extraction_processes) as extraction_process_pool:
419
+ extraction_process_pool.map(partial(__start_commoncrawl_extractor,
420
+ callback_on_article_extracted=callback_on_article_extracted,
421
+ callback_on_warc_completed=__callback_on_warc_completed,
422
+ valid_hosts=valid_hosts,
423
+ start_date=start_date, end_date=end_date,
424
+ strict_date=strict_date,
425
+ reuse_previously_downloaded_files=reuse_previously_downloaded_files,
426
+ local_download_dir_warc=local_download_dir_warc,
427
+ continue_after_error=continue_after_error,
428
+ show_download_progress=show_download_progress,
429
+ log_level=log_level,
430
+ delete_warc_after_extraction=delete_warc_after_extraction,
431
+ log_pathname_fully_extracted_warcs=__log_pathname_fully_extracted_warcs,
432
+ extractor_cls=extractor_cls,
433
+ fetch_images=fetch_images,
434
+ local_download_dir_article=local_download_dir_article),
435
+ warc_paths)
436
+ else:
437
+ for warc_path in warc_paths:
438
+ __start_commoncrawl_extractor(warc_path,
439
+ callback_on_article_extracted=callback_on_article_extracted,
440
+ callback_on_warc_completed=__callback_on_warc_completed,
441
+ valid_hosts=valid_hosts,
442
+ start_date=start_date, end_date=end_date,
443
+ strict_date=strict_date,
444
+ reuse_previously_downloaded_files=reuse_previously_downloaded_files,
445
+ local_download_dir_warc=local_download_dir_warc,
446
+ continue_after_error=continue_after_error,
447
+ show_download_progress=show_download_progress,
448
+ log_level=log_level,
449
+ delete_warc_after_extraction=delete_warc_after_extraction,
450
+ log_pathname_fully_extracted_warcs=__log_pathname_fully_extracted_warcs,
451
+ extractor_cls=extractor_cls,
452
+ fetch_images=fetch_images,
453
+ local_download_dir_article=local_download_dir_article)
download.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2016/pt.jsonl.gz -O 2016_pt.jsonl.gz
2
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2017/pt.jsonl.gz -O 2017_pt.jsonl.gz
3
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2018/pt.jsonl.gz -O 2018_pt.jsonl.gz
4
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2019/pt.jsonl.gz -O 2019_pt.jsonl.gz
5
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2020/pt.jsonl.gz -O 2020_pt.jsonl.gz
6
+ wget https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2021/pt.jsonl.gz -O 2021_pt.jsonl.gz