Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
shamikbose89 commited on
Commit
7dde5ee
·
1 Parent(s): 47a7ae2

Upload lancaster_newsbooks.py

Browse files
Files changed (1) hide show
  1. lancaster_newsbooks.py +101 -0
lancaster_newsbooks.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """The Lampeter Corpus of Early Modern English Tracts is a collection of texts
16
+ on various subject matter published between 1640 and 1740 – a time that is marked
17
+ by the rise of mass publication, the development of a public discourse in many
18
+ areas of everyday life and, last but not least, the standardisation of British English."""
19
+
20
+
21
+ import os
22
+ import glob
23
+ import datasets
24
+ from bs4 import BeautifulSoup
25
+
26
+ _CITATION = """ @misc{20.500.12024/2531,
27
+ title = {The Lancaster Newsbooks Corpus},
28
+ author = {Thomason, George, d. 1666},
29
+ url = {http://hdl.handle.net/20.500.12024/2531},
30
+ note = {Oxford Text Archive},
31
+ copyright = {Distributed by the University of Oxford under a Creative Commons Attribution-{NonCommercial}-{ShareAlike} 3.0 Unported License.},
32
+ year = {2005} }
33
+ """
34
+
35
+ _DESCRIPTION = """This corpus consists of two collections of seventeenth-century English "newsbooks". Both were drawn from the Thomason Tracts collection, which is held at the British Library and available in graphical form via Early English Books Online (EEBO). The construction of these keyboarded versions were in both cases funded by the British Academy.
36
+ The FIRST collection (1654_newsbooks) consists of every newsbook published in London and still surviving in the Thomason Tracts from the first half of 1654 (to be precise, for the second half of December 1653 to the end of May 1654, with one or two additions from the first week in June, 1654). This was constructed for the project "Looking at text re-use in a corpus of seventeenth-century news reportage", funded by the British Academy, grant reference SG-33825.
37
+ The SECOND collection (mercurius_fumigosus) consists of every surviving issue published of the highly idiosyncratic newsbook "Mercurius Fumigosus", written by John Crouch between summer 1654 and early autumn 1655. This was constructed for the project "Decoding the news - Mercurius Fumigosus as a source of news in the interregnum, 1654-1655", funded by the British Academy, grant reference LRG-35423.
38
+ This is version 1.0 of the corpus, released April 2007; it supercedes earlier versions circulated informally.
39
+ For more information about the corpus, see www.ling.lancs.ac.uk/newsbooks
40
+ """
41
+
42
+ _HOMEPAGE = "https://ota.bodleian.ox.ac.uk/repository/xmlui/handle/20.500.12024/2531"
43
+
44
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License"
45
+
46
+ _URL = "https://ota.bodleian.ox.ac.uk/repository/xmlui/handle/20.500.12024/2531/allzip"
47
+
48
+ logger = datasets.utils.logging.get_logger(__name__)
49
+
50
+ class LancasterNewsbooks(datasets.GeneratorBasedBuilder):
51
+ """ This corpus consists of two collections of seventeenth-century English "newsbooks" stored as a set of 303 XML files
52
+ """
53
+
54
+ VERSION = datasets.Version("1.0.0")
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "title": datasets.Value("string")
62
+ }
63
+ )
64
+ return datasets.DatasetInfo(
65
+ description=_DESCRIPTION,
66
+ features=features,
67
+ homepage=_HOMEPAGE,
68
+ license=_LICENSE,
69
+ citation=_CITATION,
70
+ )
71
+
72
+ def _split_generators(self, dl_manager):
73
+ data_dir = dl_manager.download_and_extract(_URL)
74
+ subdirs=[]
75
+ for zipfile in glob.glob(os.path.join(data_dir, "*.zip")):
76
+ subdir=dl_manager.extract(zipfile)
77
+ subdir=os.path.join(subdir, os.path.basename(zipfile)[:-4])
78
+ subdirs.append(subdir)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "data_dirs": subdirs,
84
+ },
85
+ ),
86
+ ]
87
+
88
+ def _generate_examples(self, data_dirs):
89
+ for subdir in data_dirs:
90
+ for file in glob.glob(os.path.join(subdir,"*.xml")):
91
+ text_parts=[]
92
+ with open(file, 'r') as fp:
93
+ soup=BeautifulSoup(fp, features="xml")
94
+ title=soup.find("title").text
95
+ id = soup.newsbookDoc.attrs["id"]
96
+ for text_part in soup.find_all("p"):
97
+ text_parts.append(text_part.text)
98
+ full_text=" ".join(text_parts)
99
+ data_point = {"id": id, "title": title, "text": full_text}
100
+ yield id, data_point
101
+