parquet-converter commited on
Commit
ddb2cc4
1 Parent(s): acbcd60

Update parquet files

Browse files
Persian-conversational-dataset.py DELETED
@@ -1,100 +0,0 @@
1
- # coding=utf-8
2
- # persian-conversational-dataset
3
- """TODO(empathetic_dialogues): Add a description here."""
4
-
5
-
6
- import csv
7
- import json
8
-
9
- import datasets
10
- from datasets.tasks import QuestionAnsweringExtractive
11
-
12
-
13
-
14
- logger = datasets.logging.get_logger(__name__)
15
-
16
- _DESCRIPTION = """\
17
- persian-conversational-dataset
18
- """
19
-
20
- _URL = "https://huggingface.co/datasets/Kamtera/Persian-conversational-dataset/blob/main/"
21
- _URLS = [
22
- "dadrah_dataset.json",
23
- "dadrah_dataset1-1000_10000.json",
24
- "dadrah_dataset1-10000_100000.json",
25
- "dadrah_dataset1-100000_276342.json",
26
- ]
27
-
28
- class persianConversation(datasets.GeneratorBasedBuilder):
29
-
30
- # VERSION = datasets.Version("0.1.0")
31
-
32
- def _info(self):
33
- # TODO(empathetic_dialogues): Specifies the datasets.DatasetInfo object
34
- return datasets.DatasetInfo(
35
- # This is the description that will appear on the datasets page.
36
- description=_DESCRIPTION,
37
- # datasets.features.FeatureConnectors
38
- features=datasets.Features(
39
- {
40
- "title": datasets.Value("string"),
41
- "question": datasets.Value("string"),
42
- "answers": datasets.Sequence(datasets.Value("string")),
43
- "keywords": datasets.Sequence(datasets.Value("string")),
44
- # These are the features of your dataset like images, labels ...
45
- }
46
- ),
47
- # If there's a common (input, target) tuple from the features,
48
- # specify them here. They'll be used if as_supervised=True in
49
- # builder.as_dataset.
50
- supervised_keys=None,
51
-
52
-
53
- )
54
-
55
- def _split_generators(self, dl_manager):
56
- """Returns SplitGenerators."""
57
- # TODO(empathetic_dialogues): Downloads the data and defines the splits
58
- # dl_manager is a datasets.download.DownloadManager that can be used to
59
- # download and extract URLs
60
- downloaded_files = dl_manager.download(_URLS)
61
- logger.info("| > downloaded files")
62
- logger.info(downloaded_files)
63
- return [
64
- datasets.SplitGenerator(
65
- name=datasets.Split.TRAIN,
66
- # These kwargs will be passed to _generate_examples
67
- gen_kwargs={
68
- "files": downloaded_files[1:],
69
- "split_file": "train",
70
- },
71
- ),
72
- datasets.SplitGenerator(
73
- name=datasets.Split.TEST,
74
- # These kwargs will be passed to _generate_examples
75
- gen_kwargs={
76
- "files": downloaded_files[0:1],
77
- "split_file": "test"
78
- },
79
- ),
80
- ]
81
-
82
- def _generate_examples(self, files, split_file):
83
- """Yields examples."""
84
- import json
85
- logger.info("| > generate examples for "+split_file)
86
- logger.info(files)
87
- for path in files:
88
- with open(path, 'r', encoding='utf-8') as fmm:
89
- data=json.load(fmm)
90
- for id_, row in enumerate(data):
91
- title=row[0]
92
- question=row[1]
93
- answers=row[2]
94
- keywords=row[3]
95
- yield id_, {
96
- "title": title,
97
- "question": question,
98
- "answers": answers,
99
- "keywords": keywords,
100
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- task_categories:
4
- - conversational
5
- - text-generation
6
- language:
7
- - fa
8
- pretty_name: persianConversation
9
- ---
10
- persianConversation
 
 
 
 
 
 
 
 
 
 
 
dadrah_dataset.json DELETED
The diff for this file is too large to render. See raw diff
 
dadrah_dataset1-100000_276342.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1899baa9c9ec851411892b866c11f58437cc624d82d96cb2fcc94ff2358bbbed
3
- size 530182133
 
 
 
 
dadrah_dataset1-1000_10000.json → default/persian-conversational-dataset-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f5e92f0b08f5a07357f9ac2d697e438b5a2d9dd57f3e4c744f0422eb01e52be
3
- size 29072550
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0d7ed11dc7185835edf23ea2d8370a76a0da73bcd311649a073795285e5da11
3
+ size 755507
dadrah_dataset1-10000_100000.json → default/persian-conversational-dataset-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bdee94d5c8d8f0d95ee796a86fc1ce070435ad2394028d319072fd2ad78b2eb
3
- size 281292004
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:813c9d35fe7987fb5aef5e2234ed0572e5ed98ddc3a12f91252df616f24a239c
3
+ size 143700175