""" Generates dataset from CCEL ThML files """ import re import os import json import xml.etree.ElementTree as ET from languagemodels.embeddings import embed from time import perf_counter def get_refs(root): refs = [] for child in root: if child.tag == "scripRef": if "osisRef" in child.attrib: refs.append(child.attrib["osisRef"]) return refs def get_text_content(root, strip_ref=False): """Return the plain text content of a node This will remove all notes, footnotes, and references """ # Start with the root text prior to the first node text = root.text or "" for child in root: if child.text: if child.tag != "scripRef" or not strip_ref: text += child.text if child.tail: # Add the root node text between this child and the next child text += child.tail return text def get_field(root, field): try: text = root.find(f".//{field}").text text = re.sub(r"\s+", " ", text) except: text = None return text log = open("log.txt", "w") jsonl = open("train.jsonl", "w") count = 0 def get_paras(): global count paras = [] from pathlib import Path for filename in list(Path(".").rglob("*.xml")): filename = str(filename) if "authInfo." in filename: continue try: tree = ET.parse(filename) except: print("ERROR: Unable to parse:", filename) continue root = tree.getroot() # Skip content with copyright restrictions # The copyright statements aren’t consistent. # The ones that contain the text "public domain" (case insensitive) # or nothing at all should be safe. rights = get_field(root, "DC.Rights") if rights and 'public domain' not in rights.lower(): print(f"Skipped {filename} due to copyright: {rights}", file=log, flush=True) continue title = get_field(root, "title") published = get_field(root, "firstPublished") author = get_field(root, "authorID") for i, p in enumerate(root.findall(".//p")): xml = ET.tostring(p, encoding="unicode") text = get_text_content(p) if "id" in p.attrib: start = perf_counter() refs = get_refs(p) emb = list(float(n) for n in embed([text])[0]) count += 1 row = { "id": filename + ":" + p.attrib["id"], "text": text, "thml": xml, "refs": refs, "author": author, "title": title, "rights": rights, "published": published, "all-MiniLM-L6-v2": emb, } print( count, f"{perf_counter()-start:.2f}", len(row["text"]), len(row["thml"]), row["id"], row["all-MiniLM-L6-v2"][:2], file=log, flush=True, ) print(json.dumps(row), file=jsonl, flush=True) yield row from datasets import Dataset ds = Dataset.from_generator(get_paras) ds.push_to_hub("jncraton/ccel-paragraphs")