update
Browse files- corpuscrawler-ga.py +3 -1
corpuscrawler-ga.py
CHANGED
@@ -83,7 +83,7 @@ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
|
|
83 |
if not self.config.name:
|
84 |
raise ValueError(f"Scrape set must be specified, but got name={self.config.name}")
|
85 |
scrape_set = self.config.name
|
86 |
-
sset= self.config.name.split('_')[0]
|
87 |
dl_path = dl_manager.download(_DATA_URL.format(sset))
|
88 |
|
89 |
return [
|
@@ -110,6 +110,8 @@ class CorpusCrawlerIrish(datasets.GeneratorBasedBuilder):
|
|
110 |
_id = 1
|
111 |
for link in links:
|
112 |
res = self._fetch_page(link, data_dir)
|
|
|
|
|
113 |
if scfg == "documents":
|
114 |
text = ["\n".join(res.get('text', []))]
|
115 |
else:
|
|
|
83 |
if not self.config.name:
|
84 |
raise ValueError(f"Scrape set must be specified, but got name={self.config.name}")
|
85 |
scrape_set = self.config.name
|
86 |
+
sset = self.config.name.split('_')[0]
|
87 |
dl_path = dl_manager.download(_DATA_URL.format(sset))
|
88 |
|
89 |
return [
|
|
|
110 |
_id = 1
|
111 |
for link in links:
|
112 |
res = self._fetch_page(link, data_dir)
|
113 |
+
if res is None:
|
114 |
+
raise Exception("Failed to read " + link + " from " + data_dir)
|
115 |
if scfg == "documents":
|
116 |
text = ["\n".join(res.get('text', []))]
|
117 |
else:
|