Datasets:
dataloader
Browse files
nbnn_language_detection.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator
|
2 |
from datasets.features import Features, Value
|
3 |
import json
|
4 |
|
@@ -16,15 +16,17 @@ class MyDataset(DatasetBuilder):
|
|
16 |
)
|
17 |
|
18 |
def _split_generators(self, dl_manager):
|
19 |
-
|
20 |
-
'train': 'train.jsonl',
|
21 |
-
'dev': 'dev.jsonl',
|
22 |
-
'test': 'test.jsonl'
|
23 |
}
|
|
|
|
|
24 |
|
25 |
return [
|
26 |
-
SplitGenerator(name=split, gen_kwargs={'filepath':
|
27 |
-
for split
|
28 |
]
|
29 |
|
30 |
def _generate_examples(self, filepath):
|
|
|
1 |
+
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator
|
2 |
from datasets.features import Features, Value
|
3 |
import json
|
4 |
|
|
|
16 |
)
|
17 |
|
18 |
def _split_generators(self, dl_manager):
|
19 |
+
urls = {
|
20 |
+
'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
|
21 |
+
'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
|
22 |
+
'test': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/test.jsonl',
|
23 |
}
|
24 |
+
|
25 |
+
downloaded_files = dl_manager.download(urls)
|
26 |
|
27 |
return [
|
28 |
+
SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
|
29 |
+
for split in urls.keys()
|
30 |
]
|
31 |
|
32 |
def _generate_examples(self, filepath):
|