Processing of the increments, without duplicate
Browse files- pmc_open_access_xml.py +82 -22
pmc_open_access_xml.py
CHANGED
@@ -538,16 +538,21 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
538 |
oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
|
539 |
oa_package_list = oa_package_list[["File"]]
|
540 |
oa_package_list.sort_index(inplace=True)
|
541 |
-
|
542 |
-
# Baselines
|
543 |
-
for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
|
544 |
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
551 |
content = file.read()
|
552 |
try:
|
553 |
text = content.decode("utf-8").strip()
|
@@ -561,7 +566,6 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
561 |
|
562 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
563 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
564 |
-
pmcid = data["AccessionID"]
|
565 |
data = {
|
566 |
"introduction": content_d["introduction"],
|
567 |
"methods": content_d["methods"],
|
@@ -582,7 +586,6 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
582 |
"footnote": content_d["footnote"],
|
583 |
"graphic": content_d["graphic"],
|
584 |
"media": content_d["media"],
|
585 |
-
|
586 |
# "question": content_d["question"],
|
587 |
"unknown_pub": content_d["unknown_pub"],
|
588 |
"references": reference_d,
|
@@ -597,17 +600,74 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
597 |
"citation": data["Article Citation"],
|
598 |
"package_file": data["File"],
|
599 |
}
|
600 |
-
|
601 |
yield pmcid, data
|
602 |
|
603 |
-
|
604 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
605 |
|
606 |
-
|
607 |
-
# if incremental_file_lists:
|
608 |
-
# for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
|
609 |
-
# incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
|
610 |
-
# for path, file in incremental_archive:
|
611 |
-
# ...
|
612 |
-
#
|
613 |
-
# yield key, data
|
|
|
538 |
oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID")
|
539 |
oa_package_list = oa_package_list[["File"]]
|
540 |
oa_package_list.sort_index(inplace=True)
|
541 |
+
processed_ids = set()
|
|
|
|
|
542 |
|
543 |
+
# Incrementals
|
544 |
+
if incremental_file_lists:
|
545 |
+
for incremental_file_list, incremental_archive in zip(incremental_file_lists[::-1], incremental_archives[::-1]):
|
546 |
+
incrementals = pd.read_csv(incremental_file_list, index_col="AccessionID")
|
547 |
+
incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File")
|
548 |
+
incrementals.File = incrementals.File.fillna('')
|
549 |
+
incrementals = incrementals.to_dict(orient="index")
|
550 |
+
|
551 |
+
for path, file in incremental_archive:
|
552 |
+
data = incrementals.pop(path)
|
553 |
+
pmcid = data["AccessionID"]
|
554 |
+
if pmcid in processed_ids: #oa_package_list.loc[pmcid, "yet_processed"]:
|
555 |
+
continue
|
556 |
content = file.read()
|
557 |
try:
|
558 |
text = content.decode("utf-8").strip()
|
|
|
566 |
|
567 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
568 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
|
|
569 |
data = {
|
570 |
"introduction": content_d["introduction"],
|
571 |
"methods": content_d["methods"],
|
|
|
586 |
"footnote": content_d["footnote"],
|
587 |
"graphic": content_d["graphic"],
|
588 |
"media": content_d["media"],
|
|
|
589 |
# "question": content_d["question"],
|
590 |
"unknown_pub": content_d["unknown_pub"],
|
591 |
"references": reference_d,
|
|
|
600 |
"citation": data["Article Citation"],
|
601 |
"package_file": data["File"],
|
602 |
}
|
603 |
+
processed_ids.add(pmcid)
|
604 |
yield pmcid, data
|
605 |
|
606 |
+
# Baselines
|
607 |
+
for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
|
608 |
+
|
609 |
+
#try:
|
610 |
+
baselines = pd.read_csv(baseline_file_list, index_col="AccessionID")
|
611 |
+
baselines = baselines.join(oa_package_list).reset_index().set_index("Article File")
|
612 |
+
baselines.File = baselines.File.fillna('')
|
613 |
+
baselines = baselines.to_dict(orient="index")
|
614 |
+
|
615 |
+
for path, file in baseline_archive:
|
616 |
+
data = baselines.pop(path)
|
617 |
+
pmcid = data["AccessionID"]
|
618 |
+
if pmcid in processed_ids:
|
619 |
+
continue
|
620 |
+
content = file.read()
|
621 |
+
try:
|
622 |
+
text = content.decode("utf-8").strip()
|
623 |
+
except UnicodeDecodeError as e:
|
624 |
+
text = content.decode("latin-1").strip()
|
625 |
+
text = clean_raw(text)
|
626 |
+
try:
|
627 |
+
article_tree = etree.ElementTree(etree.fromstring(text))
|
628 |
+
except etree.XMLSyntaxError: #In some files, xml is broken
|
629 |
+
continue
|
630 |
+
|
631 |
+
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
632 |
+
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
633 |
+
data = {
|
634 |
+
"introduction": content_d["introduction"],
|
635 |
+
"methods": content_d["methods"],
|
636 |
+
"results": content_d["results"],
|
637 |
+
"discussion": content_d["discussion"],
|
638 |
+
"conclusion": content_d["conclusion"],
|
639 |
+
"front": content_d["front"],
|
640 |
+
"body": content_d["body"],
|
641 |
+
"back": content_d["back"],
|
642 |
+
"figure": content_d["figure"],
|
643 |
+
"table": content_d["table"],
|
644 |
+
"formula": content_d["formula"],
|
645 |
+
"box": content_d["box"],
|
646 |
+
"code": content_d["code"],
|
647 |
+
"quote": content_d["quote"],
|
648 |
+
"chem": content_d["chem"],
|
649 |
+
"supplementary": content_d["supplementary"],
|
650 |
+
"footnote": content_d["footnote"],
|
651 |
+
"graphic": content_d["graphic"],
|
652 |
+
"media": content_d["media"],
|
653 |
+
# "question": content_d["question"],
|
654 |
+
"unknown_pub": content_d["unknown_pub"],
|
655 |
+
"references": reference_d,
|
656 |
+
"references_text": reference_text_d,
|
657 |
+
"glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]},
|
658 |
+
"n_references": n_ref,
|
659 |
+
"pmid": data["PMID"],
|
660 |
+
"accession_id": pmcid,
|
661 |
+
"license": data["License"],
|
662 |
+
"last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
|
663 |
+
"retracted": data["Retracted"],
|
664 |
+
"citation": data["Article Citation"],
|
665 |
+
"package_file": data["File"],
|
666 |
+
}
|
667 |
+
processed_ids.add(pmcid)
|
668 |
+
yield pmcid, data
|
669 |
+
|
670 |
+
#except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
671 |
+
# continue
|
672 |
|
673 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|