File size: 3,447 Bytes
ebb16e0
 
46e758c
 
62c7aee
46e758c
62c7aee
 
46e758c
554bdd4
46e758c
 
 
 
554bdd4
 
 
46e758c
 
 
554bdd4
46e758c
554bdd4
46e758c
62c7aee
46e758c
 
 
 
 
 
 
554bdd4
46e758c
 
 
 
 
62c7aee
46e758c
e07d4c1
 
 
 
 
 
 
 
46e758c
62c7aee
 
 
46e758c
554bdd4
46e758c
62c7aee
46e758c
 
 
554bdd4
46e758c
 
 
 
 
 
 
 
 
554bdd4
46e758c
 
ae79a9c
 
 
 
e07d4c1
ae79a9c
 
 
 
e07d4c1
 
 
 
ae79a9c
46e758c
62c7aee
46e758c
 
62c7aee
 
 
 
 
46e758c
62c7aee
554bdd4
46e758c
62c7aee
 
e07d4c1
 
ae79a9c
e07d4c1
62c7aee
46e758c
 
62c7aee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554bdd4
46e758c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
""" Generates dataset from CCEL ThML files """

import re
import os
import json
import xml.etree.ElementTree as ET
from languagemodels.embeddings import embed
from time import perf_counter


def get_refs(root):
    refs = []

    for child in root:
        if child.tag == "scripRef":
            if "osisRef" in child.attrib:
                refs.append(child.attrib["osisRef"])

    return refs


def get_text_content(root, strip_ref=False):
    """Return the plain text content of a node

    This will remove all notes, footnotes, and references
    """

    # Start with the root text prior to the first node
    text = root.text or ""

    for child in root:
        if child.text:
            if child.tag != "scripRef" or not strip_ref:
                text += child.text
        if child.tail:
            # Add the root node text between this child and the next child
            text += child.tail

    return text

def get_field(root, field):
    try:
        text = root.find(f".//{field}").text
        text = re.sub(r"\s+", " ", text)
    except:
        text = None

    return text

log = open("log.txt", "w")
jsonl = open("train.jsonl", "w")
count = 0


def get_paras():
    global count
    paras = []

    from pathlib import Path

    for filename in list(Path(".").rglob("*.xml")):
        filename = str(filename)
        if "authInfo." in filename:
            continue
        try:
            tree = ET.parse(filename)
        except:
            print("ERROR: Unable to parse:", filename)
            continue

        root = tree.getroot()

        # Skip content with copyright restrictions
        # The copyright statements aren’t consistent.
        # The ones that contain the text "public domain" (case insensitive)
        # or nothing at all should be safe.
        rights = get_field(root, "DC.Rights")

        if rights and 'public domain' not in rights.lower():
            print(f"Skipped {filename} due to copyright: {rights}", file=log, flush=True)
            continue

        title = get_field(root, "title")
        published = get_field(root, "firstPublished")
        author = get_field(root, "authorID")

        for i, p in enumerate(root.findall(".//p")):
            xml = ET.tostring(p, encoding="unicode")
            text = get_text_content(p)

            if "id" in p.attrib:
                start = perf_counter()
                refs = get_refs(p)
                emb = list(float(n) for n in embed([text])[0])
                count += 1

                row = {
                    "id": filename + ":" + p.attrib["id"],
                    "text": text,
                    "thml": xml,
                    "refs": refs,
                    "author": author,
                    "title": title,
                    "rights": rights,
                    "published": published,
                    "all-MiniLM-L6-v2": emb,
                }

                print(
                    count,
                    f"{perf_counter()-start:.2f}",
                    len(row["text"]),
                    len(row["thml"]),
                    row["id"],
                    row["all-MiniLM-L6-v2"][:2],
                    file=log,
                    flush=True,
                )

                print(json.dumps(row), file=jsonl, flush=True)

                yield row


from datasets import Dataset

ds = Dataset.from_generator(get_paras)
ds.push_to_hub("jncraton/ccel-paragraphs")