Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
File size: 2,203 Bytes
6a88cbd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
"""download lexdk from alexandrainst/lexdk-open"""
from datetime import datetime
from pathlib import Path
from typing import cast
import pandas as pd
from datasets import Dataset, load_dataset
column_order = [
"text",
"source",
"id",
"added",
"created",
"license",
"domain",
"metadata",
]
def convert_sample(example: dict) -> dict:
# from sample:
# {
# "url": "https://denstoredanske.lex.dk/Kullmanns_M%C3%B8lle",
# "title": "Kullmanns Mølle",
# "clarification": "",
# "authors": ["https://brugere.lex.dk/6929"],
# "date": "2021-01-20T13:23:20+01:00",
# "license": "fri anvendelse",
# "text": "Kullmanns Mølle er en mølle i Gudhjem, opkaldt efter Matts Kullmann, der byggede møllen i 1893 til sin søn, Christian Kullmann, se Gudhjem Mølle.",
# }
date = datetime.fromisoformat(example["date"])
text = f"{example["title"]}\n\npubliceret: {date}\n{example["text"]}"
new_example = dict(
text_new=text,
id=example["url"],
source="lexdk",
domain="Conversation",
license="cc-by-sa-4.0",
added="2025-01-04",
created=f"{date.date()}, {date.date()}",
metadata={"source-pretty": "Lex.dk"},
)
return new_example
def main():
ds = load_dataset("alexandrainst/lexdk-open", split="train")
ds = cast(Dataset, ds)
dates = [datetime.fromisoformat(date).date() for date in ds["date"]]
print(str(min(dates)), ",", str(max(dates))) # 2009-01-28, 2023-09-05
assert len(set(ds["url"])) == len(ds)
ds = ds.map(convert_sample, num_proc=4)
ds = ds.select_columns(column_order[1:] + ["text_new"])
ds = ds.rename_columns({"text_new": "text"})
# ensure order
ds = ds.select_columns(column_order)
df = ds.to_pandas()
df = cast(pd.DataFrame, df)
dedup_df = df.drop_duplicates(keep="first", subset=["text"])
print("N. duplicates: ", df.shape[0] - dedup_df.shape[0]) # 0
ds = ds.select(dedup_df.index)
assert len(set(ds["text"])) == len(ds)
save_path = Path(__file__).parent / "lexdk.parquet"
ds.to_parquet(save_path)
if __name__ == "__main__":
main()
|