system HF staff commited on
Commit
4a70b3a
1 Parent(s): 81c888a

Update files from the datasets library (from 1.8.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.8.0

Files changed (3) hide show
  1. README.md +21 -3
  2. arcd.py +6 -0
  3. dataset_infos.json +1 -1
README.md CHANGED
@@ -1,4 +1,22 @@
1
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  paperswithcode_id: arcd
3
  ---
4
 
@@ -90,9 +108,9 @@ The data fields are the same among all splits.
90
 
91
  ### Data Splits
92
 
93
- | name |train|validation|
94
- |----------|----:|---------:|
95
- |plain_text| 693| 702|
96
 
97
  ## Dataset Creation
98
 
 
1
  ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language_creators:
5
+ - crowdsourced
6
+ languages:
7
+ - ar-SA
8
+ licenses:
9
+ - mit
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 1K<n<10K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - question-answering
18
+ task_ids:
19
+ - extractive-qa
20
  paperswithcode_id: arcd
21
  ---
22
 
 
108
 
109
  ### Data Splits
110
 
111
+ | name | train | validation |
112
+ | ---------- | ----: | ---------: |
113
+ | plain_text | 693 | 702 |
114
 
115
  ## Dataset Creation
116
 
arcd.py CHANGED
@@ -4,6 +4,7 @@
4
  import json
5
 
6
  import datasets
 
7
 
8
 
9
  logger = datasets.logging.get_logger(__name__)
@@ -79,6 +80,11 @@ class Arcd(datasets.GeneratorBasedBuilder):
79
  supervised_keys=None,
80
  homepage="https://github.com/husseinmozannar/SOQAL/tree/master/data",
81
  citation=_CITATION,
 
 
 
 
 
82
  )
83
 
84
  def _split_generators(self, dl_manager):
 
4
  import json
5
 
6
  import datasets
7
+ from datasets.tasks import QuestionAnsweringExtractive
8
 
9
 
10
  logger = datasets.logging.get_logger(__name__)
 
80
  supervised_keys=None,
81
  homepage="https://github.com/husseinmozannar/SOQAL/tree/master/data",
82
  citation=_CITATION,
83
+ task_templates=[
84
+ QuestionAnsweringExtractive(
85
+ question_column="question", context_column="context", answers_column="answers"
86
+ )
87
+ ],
88
  )
89
 
90
  def _split_generators(self, dl_manager):
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"plain_text": {"description": " Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles.\n", "citation": "@inproceedings{mozannar-etal-2019-neural,\n title = \"Neural {A}rabic Question Answering\",\n author = \"Mozannar, Hussein and\n Maamary, Elie and\n El Hajal, Karl and\n Hajj, Hazem\",\n booktitle = \"Proceedings of the Fourth Arabic Natural Language Processing Workshop\",\n month = aug,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W19-4612\",\n doi = \"10.18653/v1/W19-4612\",\n pages = \"108--118\",\n abstract = \"This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.\",\n}\n", "homepage": "https://github.com/husseinmozannar/SOQAL/tree/master/data", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "arcd", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 811934, "num_examples": 693, "dataset_name": "arcd"}, "validation": {"name": "validation", "num_bytes": 886528, "num_examples": 702, "dataset_name": "arcd"}}, "download_checksums": {"https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-train.json": {"num_bytes": 939840, "checksum": "6a973fda9f0b066e0547a85a3396e7294fa917e24b6efd7ce430769033a6ce15"}, "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-test.json": {"num_bytes": 1002559, "checksum": "b4ba4fb4227841bbce71e01b3eaecb33e9f17a08cde1ec91e5bc335da2c75135"}}, "download_size": 1942399, "dataset_size": 1698462, "size_in_bytes": 3640861}}
 
1
+ {"plain_text": {"description": " Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles.\n", "citation": "@inproceedings{mozannar-etal-2019-neural,\n title = {Neural {A}rabic Question Answering},\n author = {Mozannar, Hussein and Maamary, Elie and El Hajal, Karl and Hajj, Hazem},\n booktitle = {Proceedings of the Fourth Arabic Natural Language Processing Workshop},\n month = {aug},\n year = {2019},\n address = {Florence, Italy},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W19-4612},\n doi = {10.18653/v1/W19-4612},\n pages = {108--118},\n abstract = {This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.}\n}\n", "homepage": "https://github.com/husseinmozannar/SOQAL/tree/master/data", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "question-answering-extractive", "question_column": "question", "context_column": "context", "answers_column": "answers"}], "builder_name": "arcd", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 811064, "num_examples": 693, "dataset_name": "arcd"}, "validation": {"name": "validation", "num_bytes": 885648, "num_examples": 702, "dataset_name": "arcd"}}, "download_checksums": {"https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-train.json": {"num_bytes": 939840, "checksum": "6a973fda9f0b066e0547a85a3396e7294fa917e24b6efd7ce430769033a6ce15"}, "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-test.json": {"num_bytes": 1002559, "checksum": "b4ba4fb4227841bbce71e01b3eaecb33e9f17a08cde1ec91e5bc335da2c75135"}}, "download_size": 1942399, "post_processing_size": null, "dataset_size": 1696712, "size_in_bytes": 3639111}}