File size: 1,079 Bytes
32e9f98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from datasets import Dataset, DatasetDict
from spacy.tokens import DocBin
import spacy


def convert_spacy_docs_to_hf_entry(doc) -> dict:
    return doc.to_json()

train = "/Users/au561649/Github/DaCy/training/dane_plus/train.spacy"
dev = "/Users/au561649/Github/DaCy/training/dane_plus/dev.spacy"
test = "/Users/au561649/Github/DaCy/training/dane_plus/test.spacy"

nlp = spacy.blank("da")

train_docs = list(DocBin().from_disk(train).get_docs(nlp.vocab))
dev_docs = list(DocBin().from_disk(dev).get_docs(nlp.vocab))
test_docs = list(DocBin().from_disk(test).get_docs(nlp.vocab))


# my_list = [{"a": 1}, {"a": 2}, {"a": 3}]
# dataset = Dataset.from_list(my_list)

train_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in train_docs])
dev_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in dev_docs])
test_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in test_docs])

dataset_dict = DatasetDict({"train": train_dataset, "dev": dev_dataset, "test": test_dataset})

dataset_dict.push_to_hub("dane_plus")