SuperWiki-1.5 / Scripts /dictation_processor.py
KaraKaraWitch's picture
Inital Commit
365cd4e
import multiprocessing
import pathlib
import traceback
from bs4 import BeautifulSoup
import orjson
import unidecode
from RojaStringRemixer import MultilangWikipediaProcessor
mp = MultilangWikipediaProcessor()
class WikitonaryProcessor(MultilangWikipediaProcessor):
def soup_section(self, input_soup: BeautifulSoup, title:str):
# Wiktionary is typically single layered
languages = []
lang_buffer = []
sub_cat_buffer = []
# print(input_soup)
# for html_element in
for sect in input_soup.select(".mw-body-content section"):
sect.unwrap()
[meta.decompose() for meta in input_soup.select(".mw-body-content meta")]
[meta.decompose() for meta in input_soup.select(".mw-body-content link")]
[meta.decompose() for meta in input_soup.select(".disambig-see-also")]
# print(input_soup.selec("section"))
# print([i.name for i in list(input_soup.select_one(".mw-body-content").children)])
for html_element in input_soup.select_one(".mw-body-content").children:
if html_element.decomposed or html_element is None:
continue
if html_element.name == "h1":
print("[?] Expecting lowest header element to be h1")
continue
if html_element.name == "h2":
# lang_buffer.append(sub_cat_buffer)
if sub_cat_buffer:
lang_buffer.append(sub_cat_buffer)
sub_cat_buffer = [self.md.convert_soup(html_element).strip()]
else:
sub_cat_buffer = [self.md.convert_soup(html_element).strip()]
if lang_buffer:
languages.append(lang_buffer)
lang_buffer = []
elif html_element.name == "h3":
lang_buffer.append(sub_cat_buffer)
sub_cat_buffer = [self.md.convert_soup(html_element).strip()]
else:
if html_element.get_text().strip():
sub_cat_buffer.append(self.md.convert_soup(html_element).strip())
if sub_cat_buffer:
lang_buffer.append(sub_cat_buffer)
if lang_buffer:
languages.append(lang_buffer)
# print(languages)
final_txt = {}
for root_lang in languages:
lang_data = []
for sub_pair in root_lang[1:]:
concat = " \n".join(sub_pair[1:])
lang_data.append(f"## {sub_pair[0]}\n\n{concat}")
lang_final = f"# {title.title()}: {root_lang[0][0]}\n\n" + "\n\n".join(lang_data)
txt = unidecode.unidecode(lang_final.replace("“","\"").replace("”","\"").replace("\t"," ")).encode('utf-8', errors='replace').decode('unicode_escape', errors='ignore')
final_txt[root_lang[0][0].lower()] = txt
# print(final_txt)
return final_txt
all_selectors = [
"style", # Remove styling
"sup.reference", # Seems to still exist across
"table.nomobile", # Seems to still exist across
"div.sister-wikipedia",
".floatright",
]
def convert_soup(self, input_soup: BeautifulSoup):
# Remove Generic notices
[i.unwrap() for i in input_soup.select('[data-mw^="interface"]')]
[i.decompose() for i in input_soup.select(", ".join(self.all_selectors))]
[i.decompose() for i in input_soup.select("[class*=\"NavHead\"]")]
title = input_soup.select_one("title").extract()
for i in input_soup.select(".mw-collapsible"):
hidden = i.select_one("div.hidden-content")
if hidden:
# Expose collapsed content
hidden["class"].remove("hidden-content")
# Cleanup meta
for i in input_soup.select("[data-mw]"):
del i["data-mw"]
for i in input_soup.select("[rel]"):
del i["rel"]
for i in input_soup.select("link[href]"):
del i["href"]
for i in input_soup.select("link[typeof]"):
del i["typeof"]
for i in input_soup.select("[id]"):
del i["id"]
for i in input_soup.select("[about]"):
del i["about"]
for i in input_soup.select("[lang]"):
del i["lang"]
ifbs = [i.extract() for i in input_soup.select("table.infobox")]
ifbs += [i.extract() for i in input_soup.select("table.sidebar.vcard.hlist")]
ifbs += [i.extract() for i in input_soup.select("table.infobox.vcard")]
ifbs = [self.process_infobox(ifb) for ifb in ifbs]
[fig.decompose ()for fig in input_soup.select('figure[typeof^="mw:File/Thumb"]')]
return input_soup, ifbs, None, title
def convert(self, wiki_data: bytes):
data = orjson.loads(wiki_data.rstrip(b"\n"))
try:
templates = [
":".join(template["name"].split(":")[1:])
for template in data.get("templates", [])
]
categories = [
":".join(category["name"].split(":")[1:])
for category in data.get("categories", [])
]
if not data["article_body"].get("wikitext"):
return None
soup = BeautifulSoup(data["article_body"]["html"], "lxml")
is_stub = self.is_stub(soup)
soup, infobox, figures, title = self.convert_soup(soup)
sections = self.soup_section(soup, data["name"])
text = list(sections.values())[0][2:]
return orjson.dumps(
{
"id": data["identifier"],
"title": data["name"],
"url": data["url"],
"text": text,
"stub": is_stub,
"template": templates,
"category": categories,
"license": [lic["name"] for lic in data["license"]],
"wikitext": data["article_body"].get("wikitext"),
"lang": data["in_language"]["identifier"],
"abstract": data.get("abstract", ""),
"sections": sections,
"infobox_html": infobox,
"figures_dict": figures,
}
)
except Exception as e:
print(f"Exception at Soup Conversion: {e} [{data['name']}]\n{traceback.format_exception(e)[-1]}")
return None
mp = WikitonaryProcessor()
fs = [
pathlib.Path(f"wiktionary/enwiktionary_namespace_0_{i}.ndjson") for i in range(16)
]
# for line in pathlib.Path("")
def main():
with open("en-wiktionary.jsonl","wb") as fout:
with multiprocessing.Pool(32) as pool:
for file in fs:
tasks = []
with open(file,"rb") as f:
for line in f:
tasks.append(pool.apply_async(mp.convert,(line,)))
if len(tasks) % 5000 == 0:
for task in tasks:
r = task.get()
if r is None:
continue
fout.write(r + b"\n")
tasks = []
for task in tasks:
r = task.get()
if r is None:
continue
fout.write(r + b"\n")
# mp.convert(pathlib.Path("misbriefs.json").read_bytes())
main()