Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
File size: 848 Bytes
4628cea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbfe366
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
from datasets import DatasetDict, load_dataset


def construct_hf_dataset(repo_name: str):
    """Construct a HF DatasetDict class from the HICRIC outcome data."""

    # Load the datasets from JSONL files
    train_jsonl_path = "outcomes/train_backgrounds_suff.jsonl"
    test_jsonl_path = "outcomes/test_backgrounds_suff.jsonl"
    train_dataset = load_dataset("json", data_files=train_jsonl_path, split="train")
    test_dataset = load_dataset("json", data_files=test_jsonl_path, split="train")

    # Create a DatasetDict to combine both splits
    dataset = DatasetDict({"train": train_dataset, "test": test_dataset})

    # Save each sub-directory dataset as a separate dataset within a DatasetDict
    dataset.push_to_hub(repo_name, private=True)

    return None


if __name__ == "__main__":
    construct_hf_dataset("persius/imr-appeals")