Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
""" | |
A simple CLI to updates descriptive statistics on all datasets. | |
Example use: | |
python update_descriptive_statistics.py --dataset wikisource | |
""" | |
import argparse | |
import json | |
import logging | |
import multiprocessing | |
from dataclasses import dataclass | |
from pathlib import Path | |
from textwrap import dedent | |
from typing import Self, cast | |
import pandas as pd | |
import plotnine as pn | |
from datasets import Dataset, load_dataset | |
from transformers import AutoTokenizer | |
from git_utilities import check_is_ancestor, get_current_revision, get_latest_revision | |
from tests.readme_parsing import get_tag_content, read_frontmatter_and_body, replace_tag | |
logger = logging.getLogger(__name__) | |
repo_path = Path(__file__).parent.parent | |
tokenizer_name = "AI-Sweden-Models/Llama-3-8B-instruct" | |
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=True) | |
def human_readable_large_int(value: int) -> str: | |
thresholds = [ | |
(1_000_000_000, "B"), | |
(1_000_000, "M"), | |
(1_000, "K"), | |
] | |
for threshold, label in thresholds: | |
if value > threshold: | |
return f"{value/threshold:.2f}{label}" | |
return str(value) | |
def calculate_average_document_length( | |
dataset: Dataset, text_column: str = "text" | |
) -> float: | |
texts = sum(len(t) for t in dataset[text_column]) | |
return texts / len(dataset) | |
def _count_tokens(batch): | |
return { | |
"token_count": [ | |
len(tokens) | |
for tokens in tokenizer(batch["text"], padding=False)["input_ids"] # type: ignore | |
] | |
} | |
def calculate_number_of_tokens( | |
dataset: Dataset, | |
text_column: str = "text", | |
) -> int: | |
token_counts = dataset.map( | |
_count_tokens, | |
batched=True, | |
batch_size=1000, | |
num_proc=multiprocessing.cpu_count(), | |
) | |
return sum(token_counts["token_count"]) | |
class DescriptiveStatsOverview: | |
number_of_samples: int | |
average_document_length: float | |
number_of_tokens: int | |
language: str = "dan, dansk, Danish" | |
def from_dataset(cls, dataset: Dataset) -> Self: | |
return cls( | |
number_of_samples=len(dataset), | |
average_document_length=calculate_average_document_length(dataset), | |
number_of_tokens=calculate_number_of_tokens(dataset), | |
) | |
def to_markdown(self) -> str: | |
format = dedent(f""" | |
- **Language**: {self.language} | |
- **Number of samples**: {human_readable_large_int(self.number_of_samples)} | |
- **Number of tokens (Llama 3)**: {human_readable_large_int(self.number_of_tokens)} | |
- **Average document length (characters)**: {self.average_document_length:.2f} | |
""") | |
return format | |
def add_to_markdown(self, markdown: str | Path) -> str: | |
return replace_tag( | |
markdown=markdown, package=self.to_markdown(), tag="DESC-STATS" | |
) | |
def to_disk( | |
self, path: Path | |
): # TODO: instead write this to the yaml header (and revision should not be added here) | |
data = self.__dict__ | |
data["revision"] = get_current_revision() | |
with path.with_suffix(".json").open("w") as f: | |
json.dump(self.__dict__, f) | |
def from_disk(cls, path: Path): | |
with path.open("r") as f: | |
data = json.load(f) | |
if "revision" in data: | |
data.pop("revision") | |
obj = cls(**data) | |
return obj | |
sample_template = """ | |
```py | |
{sample} | |
``` | |
### Data Fields | |
An entry in the dataset consists of the following fields: | |
- `text`(`str`): The content of the document. | |
- `source` (`str`): The source of the document (see [Source Data](#source-data)). | |
- `id` (`str`): An unique identifier for each document. | |
- `added` (`str`): An date for when the document was added to this collection. | |
- `created` (`str`): An date range for when the document was originally created. | |
- `license` (`str`): The license of the document. The licenses vary according to the source. | |
- `domain` (`str`): The domain of the source | |
- `metadata/source-pretty` (`str`): The long form version of the short-form source name | |
- `metadata/*`: Potentially additional metadata | |
""" | |
def add_sample(markdown_path: Path, dataset: Dataset, max_str_len: int = 100): | |
logger.info("Adding dataset sample to readme") | |
sample = dataset[0] | |
for k in sample: | |
if isinstance(k, str) and len(sample[k]) > max_str_len: | |
sample[k] = sample[k][:max_str_len] + "[...]" | |
json_sample = json.dumps(sample, indent=2, ensure_ascii=False) | |
sample_str = sample_template.format(sample=json_sample) | |
replace_tag(markdown=markdown_path, package=sample_str, tag="SAMPLE") | |
DATASET_PLOTS_template = """ | |
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" /> | |
<img> | |
""" | |
def add_descriptive_statistics_plots( | |
markdown_path: Path, | |
dataset: Dataset, | |
): | |
logger.info("Adding descriptive statistics plot to readme.") | |
lengths = [len(s) for s in dataset["text"]] | |
df = pd.DataFrame({"lengths": lengths, "Source": dataset["source"]}) | |
plot = ( | |
pn.ggplot(df, pn.aes(x="lengths", y=pn.after_stat("count"))) | |
+ pn.geom_histogram(bins=100) | |
+ pn.labs( | |
x="Document Length (Characters)", | |
y="Count", | |
title="Distribution of Document Lengths", | |
) | |
+ pn.theme_minimal() | |
+ pn.facet_wrap("Source", scales="free", ncol=3) | |
) | |
img_path = markdown_path.parent / "images" | |
img_path.mkdir(parents=False, exist_ok=True) | |
pn.ggsave( | |
plot, | |
img_path / "dist_document_length.png", | |
dpi=500, | |
width=10, | |
height=10, | |
units="in", | |
verbose=False, | |
) | |
replace_tag( | |
markdown=markdown_path, package=DATASET_PLOTS_template, tag="DATASET PLOTS" | |
) | |
def add_desc_statitics( | |
markdown_path: Path, | |
dataset: Dataset, | |
desc_stats_path: Path, | |
) -> None: | |
logger.info("Adding descriptive statistics to readme.") | |
desc_stats = DescriptiveStatsOverview.from_dataset(dataset) | |
desc_stats.to_disk(desc_stats_path) | |
desc_stats.add_to_markdown(markdown_path) | |
def update_dataset( | |
dataset_path: Path, | |
name: str, | |
readme_name: None | str = None, | |
force: bool = False, | |
) -> None: | |
rev = get_latest_revision(dataset_path) | |
desc_stats_path = dataset_path / "descriptive_stats.json" | |
if desc_stats_path.exists() and force is False: | |
with desc_stats_path.open("r") as f: | |
last_update = json.load(f).get("revision", None) | |
if last_update is None: | |
logger.warning(f"revision is not defined in {desc_stats_path}.") | |
elif check_is_ancestor(ancestor_rev=last_update, rev=rev): | |
logger.info( | |
f"descriptive statistics for '{name}' is already up to date, skipping." | |
) | |
return | |
readme_name = f"{name}.md" if readme_name is None else readme_name | |
markdown_path = dataset_path / readme_name | |
logger.info(f"Updating dataset: {name}") | |
ds = load_dataset(str(repo_path), name, split="train") | |
ds = cast(Dataset, ds) | |
add_desc_statitics(markdown_path, ds, desc_stats_path) | |
add_sample(markdown_path, ds) | |
add_descriptive_statistics_plots(markdown_path, ds) | |
def create_parser(): | |
parser = argparse.ArgumentParser( | |
description="Calculated descriptive statistics of the datasets in tha data folder" | |
) | |
parser.add_argument( | |
"--dataset", | |
default=None, | |
type=str, | |
help="Use to specify if you only want to compute the statistics from a singular dataset.", | |
) | |
parser.add_argument( | |
"--logging_level", | |
default=20, | |
type=int, | |
help="Sets the logging level. Default to 20 (INFO), other reasonable levels are 10 (DEBUG) and 30 (WARNING).", | |
) | |
parser.add_argument( | |
"--force", | |
type=bool, | |
default=False, | |
action=argparse.BooleanOptionalAction, | |
help="Should the statistics be forcefully recomputed. By default it checks the difference in commit ids.", | |
) | |
parser.add_argument( | |
"--repo_path", | |
default=str(repo_path), | |
type=str, | |
help="The repository where to calculate the descriptive statistics from", | |
) | |
return parser | |
def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, str]: | |
frontmatter, _ = read_frontmatter_and_body(repo_path / "README.md") | |
datasets = [ | |
cfg["config_name"] | |
for cfg in frontmatter["configs"] | |
if cfg["config_name"] != "default" | |
] | |
table = { | |
"Source": [], | |
"Description": [], | |
# "Domain": [], # TODO Add domain | |
"N. Tokens": [], | |
"License": [], | |
} | |
readme_references = "" | |
license_references = ( | |
"[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n" | |
+ "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n" | |
) | |
for dataset in datasets: | |
dataset_path = repo_path / "data" / dataset | |
readme_path = dataset_path / f"{dataset_path.name}.md" | |
frontmatter, body = read_frontmatter_and_body(readme_path) | |
desc_stats = DescriptiveStatsOverview.from_disk( | |
dataset_path / "descriptive_stats.json" | |
) | |
short_description = get_tag_content(body, tag="SHORT DESCRIPTION").strip()[ | |
:-1 | |
] # to exclude "." | |
license, license_name = frontmatter["license"], frontmatter["license_name"] | |
table["Source"] += [f"[{dataset_path.name}]"] | |
readme_references += ( | |
f"[{dataset_path.name}]: data/{dataset_path.name}/{dataset_path.name}.md\n" | |
) | |
table["License"] += [f"[{license_name}]"] | |
if license == "other": | |
license_references += f"[{license_name}]: ./data/{dataset_path.name}/{dataset_path.name}.md#license-information\n" | |
table["Description"] += [short_description] | |
table["N. Tokens"] += [desc_stats.number_of_tokens] | |
# total | |
table["Source"] += ["**Total**"] | |
# table["Domain"] += [""] | |
table["License"] += [""] | |
table["Description"] += [""] | |
table["N. Tokens"] += [sum(table["N. Tokens"])] | |
df = pd.DataFrame.from_dict(table) | |
df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int) | |
return df, readme_references, license_references | |
def update_main_table(repo_path: Path = repo_path) -> None: | |
logger.info("Updating MAIN TABLE") | |
main_table, readme_references, license_references = create_main_table(repo_path) | |
readme_path = repo_path / "README.md" | |
with readme_path.open("r") as f: | |
markdown = f.read() | |
package = f"{main_table.to_markdown(index=False)}\n\n{readme_references}\n\n{license_references}\n\n" | |
markdown = replace_tag(markdown, package=package, tag="MAIN TABLE") | |
with readme_path.open("w") as f: | |
f.write(markdown) | |
def main( | |
dataset: str | None = None, | |
logging_level: int = 20, | |
force: bool = False, | |
repo_path: Path = repo_path, | |
) -> None: | |
logging.basicConfig(level=logging_level) | |
if dataset and dataset != "default": | |
dataset_path = repo_path / "data" / dataset | |
update_dataset(dataset_path, dataset_path.name, force=force) | |
return | |
if dataset is None: | |
datasets = (repo_path / "data").glob("*") | |
for dataset_path in datasets: | |
update_dataset(dataset_path, dataset_path.name, force=force) | |
if dataset is None or dataset == "default": | |
update_dataset(repo_path, "default", "README.md", force=force) | |
update_main_table(repo_path) | |
if __name__ == "__main__": | |
parser = create_parser() | |
args = parser.parse_args() | |
main( | |
args.dataset, | |
logging_level=args.logging_level, | |
force=args.force, | |
repo_path=Path(args.repo_path), | |
) | |