Datasets:
metadata
dataset_info:
features:
- name: article
dtype: string
- name: summary
dtype: string
- name: article_len_approx
dtype: int64
- name: summary_len_approx
dtype: int64
- name: orig_idx
dtype: int64
splits:
- name: train
num_bytes: 992678294
num_examples: 3783821
- name: validation
num_bytes: 49616640
num_examples: 188811
- name: test
num_bytes: 468746
num_examples: 1822
download_size: 567157084
dataset_size: 1042763680
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
task_categories:
- summarization
language:
- en
pretty_name: Gigatrue
size_categories:
- 1M<n<10M
Gigatrue abstractive summarisation dataset.
- It is a cleaned version of https://huggingface.co/datasets/Harvard/gigaword.
- Added generated number values.
- Applied truecasing using https://github.com/daltonfury42/truecase.