import datasets import os _DESCRIPTION = """\ Dataset of meeting transcriptions and summaries of AMI courpus. """ _CITATION = """\ @InProceedings{Zhu_2015_ICCV, title = {AMI for summarization task}, author = {Filipp Abapolov}, month = {Fubruary}, year = {2023} } """ _REPO = "https://huggingface.co/datasets/pheepa/ami-summary/resolve/main" _URL = f"{_REPO}/data/ami-summary.tar.gz" class AmiSummary(datasets.GeneratorBasedBuilder): """AmiSummary dataset.""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name='ami-summary', version=datasets.Version("1.0.0"), description=_DESCRIPTION ) ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "summary": datasets.Value("string") } ), supervised_keys=None, citation=_CITATION ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "train_ami.txt")} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "test_ami.txt")} ) ] def _generate_examples(self, filepath): """Yields examples.""" with open(filepath, 'r') as f: lines = f.read().split('\n') pairs = [] for i in range(0, len(lines[:-1]), 2): pairs.append((lines[i], lines[i + 1])) for i, comment in enumerate(pairs): yield i, {'text': pairs[0], 'summary': pairs[1]}