PAIR / PAIR.py
mskrt's picture
Upload PAIR.py
5bb01b2 verified
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import datasets
from datasets import BuilderConfig
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{duan2024boosting,
title={Boosting the Predictive Power of Protein Representations with a Corpus of Text Annotations},
author={Duan, Haonan and Skreta, Marta and Cotta, Leonardo and Rajaonson, Ella Miray and Dhawan, Nikita and Aspuru-Guzik, Alán and Maddison, Chris J},
journal={bioRxiv},
pages={2024--07},
year={2024},
publisher={Cold Spring Harbor Laboratory}
}
"""
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"first_domain": "https://huggingface.co/datasets/mskrt/PAIR/raw/main/test.json",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
annotation2type = {
"names": datasets.Value("string"),
"function": datasets.Value("string"),
"EC": datasets.Sequence(datasets.Value("string")),
}
class CustomConfig(datasets.BuilderConfig):
"""CustomConfig."""
def __init__(self, **kwargs):
"""__init__.
Parameters
----------
kwargs :
kwargs
"""
self.annotation_type = kwargs.pop("annotation_type", "function")
super(CustomConfig, self).__init__(**kwargs)
class PAIRDataset(datasets.GeneratorBasedBuilder):
"""PAIRDataset."""
BUILDER_CONFIGS = [
CustomConfig(
name="custom_config",
version="1.0.0",
description="your description",
),
] # Configs initialization
BUILDER_CONFIG_CLASS = CustomConfig # Must specify this to use custom config
def _info(self):
"""_info."""
self.annotation_type = self.config_kwargs["annotation_type"]
# Confirm annotation_type is set before continuing
return datasets.DatasetInfo(
description="My custom dataset.",
features=datasets.Features(
{
self.annotation_type: annotation2type[self.annotation_type],
"sequence": datasets.Value("string"),
"pid": datasets.Value("string"),
}
),
supervised_keys=None,
)
# "annotation_type": datasets.Value("string"),
# "annotation": datasets.Value("string"),
def _split_generators(self, dl_manager):
"""_split_generators.
Parameters
----------
dl_manager :
dl_manager
"""
# Implement logic to download and extract data files
# For simplicity, assume data_files is a dict with paths to your data
print("in generator self.annotation", self.annotation_type)
data_files = {
"train": "train.json",
"test": "test.json",
}
return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={'filepath': data_files['train']},
# ),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_files["test"]},
),
]
def _generate_examples(self, filepath):
"""_generate_examples.
Parameters
----------
filepath :
filepath
"""
# Implement your data reading logic here
print("in generator 2 self.annotation", self.annotation_type)
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
counter = 0
for idx, annotation_type in enumerate(data.keys()):
print(annotation_type, self.annotation_type)
if annotation_type != self.annotation_type:
continue
# Parse your line into the appropriate fields
samples = data[annotation_type]
for idx_2, elem in enumerate(samples):
# example = parse_line_to_example(line)
if elem["content"] != [None]:
content = elem["content"][0]
# print(literal_eval(content), "done")
yield counter, {
"sequence": elem["seq"],
"pid": elem["pid"],
annotation_type: content,
}
counter += 1
# "annotation_type": annotation_type,