ingerid commited on
Commit
ae7d5d9
1 Parent(s): a5506cd

fix failed

Browse files
Files changed (1) hide show
  1. nb_samtale.py +37 -39
nb_samtale.py CHANGED
@@ -17,11 +17,15 @@
17
 
18
 
19
  from collections import defaultdict
 
20
  from email.policy import default
21
  from importlib import metadata
 
22
  import json
23
  import os
24
  from re import split
 
 
25
 
26
  from huggingface_hub import hf_hub_url
27
  import datasets
@@ -61,6 +65,16 @@ _DATA_URL= "https://huggingface.co/datasets/Sprakbanken/nb_samtale/resolve/main/
61
  # "test": ["test_bm_1.tar.gz", "test_nn_1.tar.gz"],
62
  #}
63
 
 
 
 
 
 
 
 
 
 
 
64
  class NBSamtaleConfig(datasets.BuilderConfig):
65
  """BuilderConfig for NBSamtale"""
66
 
@@ -134,72 +148,56 @@ class NBSamtale(datasets.GeneratorBasedBuilder):
134
  )
135
 
136
 
137
- def _split_generators(self, dl_manager) -> datasets.SplitGenerator:
138
  """Download data and extract to datasets.Splits"""
139
  dl_manager.download_config.ignore_url_params = True
140
  audio_path = {}
141
- local_extracted_archive = {}
142
- metadata_path = {}
143
  split_type = {
144
  "train": datasets.Split.TRAIN,
145
  "test": datasets.Split.TEST,
146
  "dev": datasets.Split.VALIDATION,
147
  }
148
  for split in split_type:
149
- audio_path[split] = dl_manager.download([f"{_DATA_URL}/{split}_{lang}_1.tar.gz" for lang in ["bm", "nn"]])
150
- local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
151
- metadata_path[split] = dl_manager.download_and_extract(f"{_DATA_URL}/{split}_metadata.jsonl")
152
 
153
  return [
154
  datasets.SplitGenerator(
155
- name=dsplit,
156
  gen_kwargs={
157
- "local_extracted_archive": local_extracted_archive[split],
158
- "audio_files": dl_manager.iter_archive(audio_path[split]),
159
- "metadata": metadata_path[split],
160
  }
161
- ) for split, dsplit in split_type.items()
162
  ]
163
 
164
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
  def _generate_examples(self, local_extracted_archive, audio_files, metadata):
166
  """Loads the data files and extract the features."""
167
- data_fields = list(self._info().features.keys())
168
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
169
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
170
  meta = {}
171
  with open(metadata, encoding="utf-8") as mf:
172
- for row in mf.read().splitlines():
 
173
  data = json.loads(row)
174
  audio_path = data["file_name"]
175
- # if data is incomplete, fill with empty values
176
  data["transcription"] = normalize_transcription(data["transcription"], config=self.config.name)
177
- for field in data_fields:
178
- if field not in data:
179
- data[field] = ""
180
-
181
  meta[audio_path] = data
182
 
183
  id_ = 0
184
- for archive in audio_files:
185
- for path, audiofile in archive:
186
- if path in meta:
187
- result = dict(meta[path])
188
- # set the audio feature and the path to the extracted file
189
- path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
190
- result["audio"] = {"path": path, "bytes": audiofile.read()}
191
- result["path"] = path
192
- yield id_, result
193
- id_ += 1
194
- else:
195
- print(f"Missing metadata for {path}")
196
-
197
 
198
- def normalize_transcription(transcription: str, config="annotations"):
199
- """Normalize transcriptions according to orthographic standards, or verbatim."""
200
- # TODO: Implement normalization
201
- if config == "orthographic":
202
- return transcription
203
- elif config == "verbatim":
204
- return transcription
205
- return transcription
 
 
 
 
 
17
 
18
 
19
  from collections import defaultdict
20
+ from email.mime import audio
21
  from email.policy import default
22
  from importlib import metadata
23
+ import io
24
  import json
25
  import os
26
  from re import split
27
+ import tarfile
28
+ from typing import List
29
 
30
  from huggingface_hub import hf_hub_url
31
  import datasets
 
65
  # "test": ["test_bm_1.tar.gz", "test_nn_1.tar.gz"],
66
  #}
67
 
68
+
69
+ def normalize_transcription(transcription: str, config="annotations"):
70
+ """Normalize transcriptions according to orthographic standards, or verbatim."""
71
+ # TODO: Implement normalization
72
+ if config == "orthographic":
73
+ return transcription
74
+ elif config == "verbatim":
75
+ return transcription
76
+ return transcription
77
+
78
  class NBSamtaleConfig(datasets.BuilderConfig):
79
  """BuilderConfig for NBSamtale"""
80
 
 
148
  )
149
 
150
 
151
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
152
  """Download data and extract to datasets.Splits"""
153
  dl_manager.download_config.ignore_url_params = True
154
  audio_path = {}
 
 
155
  split_type = {
156
  "train": datasets.Split.TRAIN,
157
  "test": datasets.Split.TEST,
158
  "dev": datasets.Split.VALIDATION,
159
  }
160
  for split in split_type:
161
+ #audio_path[split] = dl_manager.download([f"data/{split}_{lang}_1.tar.gz" for lang in ["bm", "nn"]])
162
+ audio_path[split] = dl_manager.download(f"data/{split}_bm_1.tar.gz")
 
163
 
164
  return [
165
  datasets.SplitGenerator(
166
+ name=split_type[split],
167
  gen_kwargs={
168
+ "local_extracted_archive": dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None,
169
+ "audio_files": dl_manager.iter_archive(audio_path[split]),#[dl_manager.iter_archive(archive) for archive in audio_path[split]],
170
+ "metadata": dl_manager.download_and_extract(f"data/{split}_metadata.jsonl"),
171
  }
172
+ ) for split in split_type
173
  ]
174
 
175
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
176
  def _generate_examples(self, local_extracted_archive, audio_files, metadata):
177
  """Loads the data files and extract the features."""
178
+ #data_fields = list(self._info().features.keys())
179
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
180
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
181
  meta = {}
182
  with open(metadata, encoding="utf-8") as mf:
183
+ datalines = mf.read().splitlines()
184
+ for row in datalines:
185
  data = json.loads(row)
186
  audio_path = data["file_name"]
 
187
  data["transcription"] = normalize_transcription(data["transcription"], config=self.config.name)
 
 
 
 
188
  meta[audio_path] = data
189
 
190
  id_ = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
+ #for archive in audio_files:
193
+ for path, audio_file in audio_files:
194
+ if not path in meta:
195
+ print(f"{path} not in metadata")
196
+ else:
197
+ result = dict(meta[path])
198
+ # set the audio feature and the path to the extracted file
199
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
200
+ result["audio"] = {"path": path, "bytes": audio_file.read()}
201
+ result["path"] = path
202
+ yield id_, result
203
+ id_ += 1