File size: 7,014 Bytes
ae9c66b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e01c5e8
ae9c66b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751787a
ae9c66b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
# Copyright 2021 Phonetics and Speech Laboratory, Trinity College, Dublin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3

import os
from pathlib import Path

import datasets

from bs4 import BeautifulSoup
import requests

_DESCRIPTION = """\
Foinse was an Irish-language magazine site.
This script uses a list of articles retrieved from the
Wayback Machine to build a corpus
"""

_DATA_URL = "https://huggingface.co/datasets/jimregan/foinse/raw/main/urls.txt"


class FoinseDataset(datasets.GeneratorBasedBuilder):
    """Scraper dataset for Foinse."""

    VERSION = datasets.Version("1.1.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="documents", version=VERSION, description="Plain text portion of the corpus: whole documents"),
        datasets.BuilderConfig(name="paragraphs", version=VERSION, description="Plain text portion of the corpus: paragraphs"),
    ]

    def _info(self):
        features = datasets.Features(
            {
                "title": datasets.Value("string"),
                "url": datasets.Value("string"),
                "author": datasets.Value("string"),
                "date_text": datasets.Value("string"),
                "text": datasets.Value("string"),
                "category": datasets.Value("string"),
                "subcategory": datasets.Value("string"),
                "summary": datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_path = dl_manager.download(_DATA_URL)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "split": "train",
                    "data_file": dl_path
                },
            ),
        ]

    def _generate_examples(
        self, split, data_file
    ):
        """ Yields examples as (key, example) tuples. """
        links = _get_links(data_file)
        _id = 1
        for url in links:
            content = get_content(url)

            paras = content.get("text", [])
            if self.config.name == "documents":
                paras = ['\n'.join(paras)]
            for para in paras:
                yield _id, {
                    "title": content.get("title", ""),
                    "url": url,
                    "author": content.get("author", ""),
                    "date_text": content.get("published", ""),
                    "category": content.get("category", ""),
                    "subcategory": content.get("subcategory", ""),
                    "summary": content.get("summary", ""),
                    "text": para
                }
                _id += 1


def get_content(url):
    out = {}
    page = requests.get(url)
    if page.status_code != 200:
        return {}
    page_content = page.text

    soup = BeautifulSoup(page_content, "lxml")

    content = soup.find("div", {"class": "item-page"})
    if not content:
        content = soup.find("div", {"id": "ja-main"})
    if not content:
        return {}
    
    breadcrumbs = soup.find("div", {"class": "ja-breadcrums"})
    if breadcrumbs:
        here = breadcrumbs.find("a", {"class": "pathway"})
        if not here:
            here = breadcrumbs.find("span", {"class": "pathway"})
        if here:
            out["category"] = here.text.strip()
    
    # junk
    jc = content.find("div", {"id": "jc"})
    if jc:
        jc.extract()
    pagenav = content.find("ul", {"class": "pagenav"})
    if pagenav:
        pagenav.extract()
    for js in content.find_all("script", {"type": "text/javascript"}):
        js.extract()

    h2 = content.find("h2")
    if h2:
        title = h2.text.strip()
        if title:
            out["title"] = title
        h2.extract()

    h1 = content.find("h1")
    if h1:
        heading = h1.text.strip()
        if heading:
            out["subcategory"] = heading
        h1.extract()

    published_tag = content.find("dd", {"class": "published"})
    if not published_tag:
        published_tag = content.find("span", {"class": "createdate"})
    if published_tag:
        out["published"] = published_tag.text.strip()

    author_tag = content.find("dd", {"class": "createdby"})
    if not author_tag:
        author_tag = content.find("span", {"class": "createby"})
    if author_tag:
        out["author"] = author_tag.text.strip()
    artinfo = content.find("dl", {"class": "article-info"})
    if not artinfo:
        artinfo = content.find("div", {"class": "article-meta"})
    if artinfo:
        artinfo.extract()

    paragraphs_tags = content.find_all("p")
    paragraphs = [p.text.replace("\xa0", " ").strip() for p in paragraphs_tags]
    out["text"] = paragraphs
    
    raw_text = content.text
    
    raw_out = []
    for raw_line in raw_text.split("\n"):
        line = raw_line.replace("\xa0", " ").strip()
        if line == "":
            continue
        raw_out.append(line)
    if paragraphs != raw_out:
        out["text"] = raw_out
        
    summary = extract_summary(out["text"])
    if summary:
        out["summary"] = summary
    out["text"] = filter_para_list(out["text"])

    vocab_list = []
    for vocab in content.find_all("a", {"class": "glossarylink"}):
        item = {}
        item["en"] = vocab.get("title").strip()
        item["ga"] = vocab.text.strip()
        vocab_list.append(item)
    out["vocab"] = vocab_list
    
    return out


def extract_summary(inlist):
    if len(inlist) > 2:
        if inlist[-2] == "Did you understand this story? Here are the main points:":
            return inlist[-1]
    return ""


def filter_para_list(inlist):
    out = []
    for para in inlist:
        if para == "":
            continue
        elif para.strip() == "Foinse - News as Gaeilge":
            return out
        elif para.strip() == "Did you understand this story? Here are the main points:":
            return out
        else:
            out.append(para)
    return out


def _get_links(scrape):
    links = set()
    if not os.path.exists(scrape):
        raise Exception(f"File {scrape} does not exist")
    with open(scrape) as f:
        for url in f.readlines():
            links.add(url.rstrip())
    return list(links)