Datasets:

License:
Zaid commited on
Commit
8697c9a
·
verified ·
1 Parent(s): d2d59a4

Update masader.py

Browse files
Files changed (1) hide show
  1. masader.py +20 -67
masader.py CHANGED
@@ -1,25 +1,7 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Arabic Poetry Metric dataset."""
18
-
19
-
20
- import os
21
  import datasets
22
- import pandas as pd
 
 
23
 
24
  _DESCRIPTION = """\
25
  Masader is the largest public catalogue for Arabic NLP datasets, which consists of more than 200 datasets annotated with 25 attributes.
@@ -42,7 +24,6 @@ class MasaderConfig(datasets.BuilderConfig):
42
 
43
  def __init__(self, **kwargs):
44
  """BuilderConfig for MetRec.
45
-
46
  Args:
47
  **kwargs: keyword arguments forwarded to super.
48
  """
@@ -106,51 +87,23 @@ class Masader(datasets.GeneratorBasedBuilder):
106
  supervised_keys=None,
107
  homepage="https://github.com/arbml/Masader",
108
  citation=_CITATION,)
 
 
 
 
 
 
109
 
110
- def _split_generators(self, dl_manager):
111
- sheet_id = "1YO-Vl4DO-lnp8sQpFlcX1cDtzxFoVkCmU1PVw_ZHJDg"
112
- sheet_name = "filtered_clean"
113
- url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={sheet_name}"
114
-
115
- return [
116
- datasets.SplitGenerator(
117
- name=datasets.Split.TRAIN, gen_kwargs={"url":url }
118
- ),
119
- ]
120
 
121
- def _generate_examples(self, url):
122
- """Generate examples."""
123
- # For labeled examples, extract the label from the path.
124
-
125
-
126
- df = pd.read_csv(url, usecols=range(35))
127
- df.columns.values[0] = "No."
128
- df.columns.values[1] = "Name"
129
- subsets = {}
130
- entry_list = []
131
- i = 0
132
- idx = 0
133
-
134
- while i < len(df.values):
135
-
136
- if i < len(df.values) - 1:
137
- next_entry = df.values[i+1]
138
- else:
139
- next_entry = []
140
 
141
- curr_entry = df.values[i]
142
-
143
- i+= 1
144
- if str(curr_entry[0]) != "nan":
145
- entry_list = curr_entry
146
- subsets = []
147
-
148
- if len(next_entry) > 0 and str(next_entry[0]) == "nan":
149
- subsets.append({'Name': next_entry[2], 'Dialect':next_entry[8], 'Volume':next_entry[13], 'Unit':next_entry[14]})
150
- continue
151
- idx += 1
152
- masader_entry = {col:entry_list[j+1] for j,col in enumerate(df.columns[1:]) if j != 1}
153
- masader_entry['Year'] = int(entry_list[6])
154
-
155
- masader_entry['Subsets'] = subsets
156
- yield idx, masader_entry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import datasets
2
+ from glob import glob
3
+ import json
4
+ import zipfile
5
 
6
  _DESCRIPTION = """\
7
  Masader is the largest public catalogue for Arabic NLP datasets, which consists of more than 200 datasets annotated with 25 attributes.
 
24
 
25
  def __init__(self, **kwargs):
26
  """BuilderConfig for MetRec.
 
27
  Args:
28
  **kwargs: keyword arguments forwarded to super.
29
  """
 
87
  supervised_keys=None,
88
  homepage="https://github.com/arbml/Masader",
89
  citation=_CITATION,)
90
+
91
+ def extract_all(self, dir):
92
+ zip_files = glob(dir+'/**/**.zip', recursive=True)
93
+ for file in zip_files:
94
+ with zipfile.ZipFile(file) as item:
95
+ item.extractall('/'.join(file.split('/')[:-1]))
96
 
 
 
 
 
 
 
 
 
 
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ def _split_generators(self, dl_manager):
100
+ url = ['https://github.com/ARBML/masader/archive/main.zip']
101
+ downloaded_files = dl_manager.download_and_extract(url)
102
+ self.extract_all(downloaded_files[0])
103
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths':{'inputs':sorted(glob(downloaded_files[0]+'/masader-main/datasets/**.json')),} })]
104
+
105
+ def _generate_examples(self, filepaths):
106
+ for idx,filepath in enumerate(filepaths['inputs']):
107
+ with open(filepath, 'r') as f:
108
+ data = json.load(f)
109
+ yield idx, data