# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This dataset script is based on pmc/open_access.py loading script. """PMC Open Access Subset sections parsed (plain text)""" import datetime import pandas as pd import numpy as np from itertools import compress, chain from collections import defaultdict import re from lxml import etree import json import html import unicodedata import datasets from datasets.tasks import LanguageModeling # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = "" _DESCRIPTION = """\ The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under license terms that allow reuse. Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more liberal redistribution and reuse than a traditional copyrighted work. The PMC Open Access Subset is one part of the PMC Article Datasets This version takes XML version as source, benefiting from the structured text to split the articles in sections, naming the introduction, methods, results, discussion and conclusion, front, body and back. XML is then removed and format it to plain text. """ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" # TODO: Add the licence for the dataset here if you can find it _LICENSE = """ https://www.ncbi.nlm.nih.gov/pmc/about/copyright/ Within the PMC Open Access Subset, there are three groupings: Commercial Use Allowed - CC0, CC BY, CC BY-SA, CC BY-ND licenses Non-Commercial Use Only - CC BY-NC, CC BY-NC-SA, CC BY-NC-ND licenses; and Other - no machine-readable Creative Commons license, no license, or a custom license. """ _URL_ROOT = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/" _URL = _URL_ROOT+"oa_bulk/{subset}/xml/" _SUBSETS = { "commercial": "oa_comm", "non_commercial": "oa_noncomm", "other": "oa_other", } _BASELINE_DATE = "2023-12-18" begin_doc_rgx = re.compile(""" 0)] for baseline in baselines: baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" baseline_archive_url = f"{url}{basename}{baseline}.tar.gz" baseline_file_list = dl_manager.download(baseline_file_list_url) baseline_archive = dl_manager.download(baseline_archive_url) baseline_file_lists.append(baseline_file_list) baseline_archives.append(baseline_archive) baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" # Incremental commented because some articles are already in the main parts (updates?) # Need to find a way to add them to the dataset without duplicating the articles. # Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated. date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE) incremental_dates = [ (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat() for i in range(date_delta.days) ] incrementals = [f"incr.{date}" for date in incremental_dates] for incremental in incrementals: incremental_file_list_url = f"{url}{basename}{incremental}.filelist.csv" incremental_archive_url = f"{url}{basename}{incremental}.tar.gz" try: incremental_file_list = dl_manager.download(incremental_file_list_url) incremental_archive = dl_manager.download(incremental_archive_url) except FileNotFoundError: # Some increment might not exist continue incremental_paths["incremental_file_lists"].append(incremental_file_list) incremental_paths["incremental_archives"].append(incremental_archive) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "baseline_file_lists": baseline_file_lists, "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives], "baseline_package_list": baseline_package_list, "incremental_file_lists": incremental_paths["incremental_file_lists"], "incremental_archives": [dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]], }, ), ] def _generate_examples(self, baseline_file_lists, baseline_archives, baseline_package_list, incremental_file_lists, incremental_archives): #Loading the file listing folders of individual PMC Article package (with medias and graphics) oa_package_list = pd.read_csv(baseline_package_list, index_col="Accession ID") oa_package_list = oa_package_list[["File"]] oa_package_list.sort_index(inplace=True) processed_ids = set() # Incrementals if incremental_file_lists: for incremental_file_list, incremental_archive in zip(incremental_file_lists[::-1], incremental_archives[::-1]): try: incrementals = pd.read_csv(incremental_file_list, index_col="AccessionID") except FileNotFoundError: # File not found can happen here in stream mode continue incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File") incrementals.File = incrementals.File.fillna('') incrementals = incrementals.to_dict(orient="index") for path, file in incremental_archive: data = incrementals.pop(path) pmcid = data["AccessionID"] if pmcid in processed_ids: #oa_package_list.loc[pmcid, "yet_processed"]: continue content = file.read() try: text = content.decode("utf-8").strip() except UnicodeDecodeError as e: text = content.decode("latin-1").strip() text = clean_raw(text) try: article_tree = etree.ElementTree(etree.fromstring(text)) except etree.XMLSyntaxError: #In some files, xml is broken continue content_d = construct_datadict(article_tree) data = { "introduction": "\n".join(content_d["introduction"]), "methods": "\n".join(content_d["methods"]), "results": "\n".join(content_d["results"]), "discussion": "\n".join(content_d["discussion"]), "conclusion": "\n".join(content_d["conclusion"]), "front": "\n".join(content_d["front"]), "body": "\n".join(content_d["body"]), "back": "\n".join(content_d["back"]), "pmid": data["PMID"], "accession_id": pmcid, "license": data["License"], "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], "retracted": data["Retracted"], "citation": data["Article Citation"], "package_file": data["File"], } processed_ids.add(pmcid) yield pmcid, data # Baselines for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives): #try: baselines = pd.read_csv(baseline_file_list, index_col="AccessionID") baselines = baselines.join(oa_package_list).reset_index().set_index("Article File") baselines.File = baselines.File.fillna('') baselines = baselines.to_dict(orient="index") for path, file in baseline_archive: data = baselines.pop(path) pmcid = data["AccessionID"] if pmcid in processed_ids: continue content = file.read() try: text = content.decode("utf-8").strip() except UnicodeDecodeError as e: text = content.decode("latin-1").strip() text = clean_raw(text) try: article_tree = etree.ElementTree(etree.fromstring(text)) except etree.XMLSyntaxError: #In some files, xml is broken continue content_d = construct_datadict(article_tree) data = { "introduction": "\n".join(content_d["introduction"]), "methods": "\n".join(content_d["methods"]), "results": "\n".join(content_d["results"]), "discussion": "\n".join(content_d["discussion"]), "conclusion": "\n".join(content_d["conclusion"]), "front": "\n".join(content_d["front"]), "body": "\n".join(content_d["body"]), "back": "\n".join(content_d["back"]), "pmid": data["PMID"], "accession_id": pmcid, "license": data["License"], "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], "retracted": data["Retracted"], "citation": data["Article Citation"], "package_file": data["File"], } processed_ids.add(pmcid) yield pmcid, data