soeren commited on
Commit
fb12e65
·
1 Parent(s): 22fa011

build script

Browse files
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.parquet.gzip filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Pipfile
2
+ Pipfile.lock
3
+
4
+ data/dataset_audio_annotated_and_embedding_with_probs.parquet.gzip
5
+ data/dataset_audio_test.parquet.gzip
6
+ data/dataset_audio_train.parquet.gzip
7
+ data/dataset_audio_validation.parquet.gzip
data/clip_metadata.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ _SPLIT = "validation"
4
+
5
+ df = pd.read_parquet("data/dataset_audio_" + _SPLIT +".parquet.gzip")
6
+
7
+ clipped_df = df.filter(["Probability", "Predicted Label", "Annotated Labels", "Probability Vector", "embedding_reduced"],
8
+ axis=1)
9
+
10
+ clipped_df.to_parquet("data/dataset_audio_" + _SPLIT +"_clipped.parquet.gzip")
data/create_enriched_annotated_speechcommands.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Create embeddings with the transformer library
5
+ #
6
+ # We use the Huggingface transformers library to create an embedding for a an audio dataset
7
+ #
8
+ #
9
+ #
10
+
11
+ # ## tldr; Play as callable functions
12
+
13
+
14
+
15
+ import datasets
16
+ from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification
17
+ import torch
18
+ from renumics import spotlight
19
+ import pandas as pd
20
+ import umap
21
+ import numpy as np
22
+
23
+ _SPLIT = "train"
24
+
25
+ def __set_device():
26
+ device = "cuda" if torch.cuda.is_available() else "cpu"
27
+ if device == "cuda":
28
+ torch.cuda.empty_cache()
29
+ return device
30
+
31
+
32
+ def extract_embeddings(model, feature_extractor):
33
+ """Utility to compute embeddings."""
34
+ device = model.device
35
+
36
+ def pp(batch):
37
+ audios = [element["array"] for element in batch["audio"]]
38
+ inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True).to(device)
39
+ embeddings = model(**inputs).last_hidden_state[:, 0].cpu()
40
+
41
+ return {"embedding": embeddings}
42
+
43
+
44
+ return pp
45
+
46
+
47
+ def huggingface_embedding(dataset, modelname, batched=True, batch_size=8):
48
+ # initialize huggingface model
49
+ feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
50
+ model = AutoModel.from_pretrained(modelname, output_hidden_states=True)
51
+
52
+ #compute embedding
53
+ device = __set_device()
54
+ extract_fn = extract_embeddings(model.to(device), feature_extractor)
55
+ updated_dataset = dataset.map(extract_fn, batched=batched, batch_size=batch_size)
56
+
57
+ return updated_dataset
58
+
59
+
60
+
61
+
62
+
63
+
64
+ def batch_probabilities(model, feature_extractor):
65
+ device = model.device
66
+
67
+ def processing(batch):
68
+ audios = [element["array"] for element in batch["audio"]]
69
+ inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device)
70
+ outputs = model(**inputs)
71
+ return {"logits": outputs.logits}
72
+ return processing
73
+
74
+
75
+ def annotate_probabilities(dataset, modelname, batched=True, batch_size= 8):
76
+ model = ASTForAudioClassification.from_pretrained(modelname)
77
+ feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True)
78
+
79
+ device = __set_device()
80
+ calc_outputs = batch_probabilities (model.to(device), feature_extractor)
81
+ output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size)
82
+
83
+ return output_dataset
84
+
85
+
86
+
87
+
88
+
89
+
90
+ def annotate_batch(model, dataset):
91
+ device = model.device
92
+
93
+ def batch_annotation(batch):
94
+ logits = [torch.tensor(element) for element in batch["logits"]]
95
+ probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits]
96
+ predicted_class_ids = [torch.argmax(logit).item() for logit in logits]
97
+ predicted_labels = [model.config.id2label[predicted_class_id] for predicted_class_id in predicted_class_ids]
98
+ # pre-trained model to different amount of classes
99
+ # -> id2label only reflects "internal label", not actual dataset label
100
+ annotated_labels = [labels[element] for element in batch["label"]]
101
+ probabilities = []
102
+ for index, prob_per_class in enumerate(probabilities_per_class):
103
+ probabilities.append(prob_per_class[predicted_class_ids[index]].item())
104
+ return {"Probability": probabilities, "Predicted Label": predicted_labels,
105
+ "Annotated Labels": annotated_labels, "Probability Vector": probabilities_per_class}
106
+
107
+ return batch_annotation
108
+
109
+ def annotate_dataset(dataset, modelname, batched=True, batch_size=8):
110
+ model = ASTForAudioClassification.from_pretrained(modelname)
111
+ device = __set_device()
112
+
113
+ annotate = annotate_batch(model.to(device), dataset)
114
+ annotated_dataset = dataset.map(annotate, batched=batched, batch_size=batch_size)
115
+
116
+ return annotated_dataset
117
+
118
+
119
+
120
+
121
+
122
+ # ## Step-by-step example on speech-commands
123
+ #
124
+ # ### Load speech-commands from Huggingface hub
125
+
126
+ # Use validation split to evaluate model's performance on unseen data
127
+
128
+
129
+
130
+ dataset = datasets.load_dataset('speech_commands', 'v0.01', split=_SPLIT)
131
+
132
+
133
+ labels = dataset.features["label"].names
134
+
135
+
136
+ # Let's have a look at all of the labels that we want to predict
137
+
138
+ print(labels)
139
+
140
+
141
+ # ### Compute probabilities and annotate dataset
142
+
143
+ # First, calculate logits per sample
144
+
145
+
146
+ # calculate logits for each sample and annotate
147
+ dataset_annotated = annotate_probabilities(dataset, "MIT/ast-finetuned-speech-commands-v2")
148
+
149
+
150
+ # Now annotate labels and probabilities
151
+
152
+ dataset_annotated_complete = annotate_dataset(dataset_annotated, "MIT/ast-finetuned-speech-commands-v2")
153
+
154
+
155
+ # ### Compute embedding with vision transformer from Huggingface
156
+
157
+ dataset_enriched = huggingface_embedding(dataset_annotated_complete, "MIT/ast-finetuned-speech-commands-v2")
158
+
159
+
160
+ # ### Reduce embeddings for faster visualization
161
+
162
+ embeddings = np.stack(np.array(dataset_enriched['embedding']))
163
+ reducer = umap.UMAP()
164
+ reduced_embedding = reducer.fit_transform(embeddings)
165
+ dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding))
166
+
167
+
168
+ print(dataset_enriched.features)
169
+
170
+
171
+ df = dataset_enriched.to_pandas()
172
+
173
+
174
+ df.to_parquet("data/dataset_audio_" + _SPLIT + ".parquet.gzip", compression='gzip')
data/dataset_audio_test_clipped.parquet.gzip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b6ae3710263e5e83c9f6784e1945b15a97bde88a6b7d1b45587b0d6278345c
3
+ size 698777
data/dataset_audio_train_clipped.parquet.gzip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e208d54feef82c4a7e1f120787e4b1074cf7cad75035e3640714636965e84f
3
+ size 8525172
data/dataset_audio_validation_clipped.parquet.gzip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390eaf7ec5fdc8a832651b6255948aeba7291ae1b2afc5de73229aa25c48a73c
3
+ size 1566541
speech_commands_enriched.py CHANGED
@@ -22,6 +22,7 @@ import datasets
22
 
23
  from pathlib import Path
24
 
 
25
 
26
  _CITATION = """
27
  @article{speechcommandsv2,
@@ -51,6 +52,7 @@ Version 0.01 of the data set (configuration `"v0.01"`) was released on August 3r
51
  In version 0.01 thirty different words were recoded: "Yes", "No", "Up", "Down", "Left",
52
  "Right", "On", "Off", "Stop", "Go", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
53
  "Bed", "Bird", "Cat", "Dog", "Happy", "House", "Marvin", "Sheila", "Tree", "Wow".
 
54
 
55
 
56
  In version 0.02 more words were added: "Backward", "Forward", "Follow", "Learn", "Visual".
@@ -130,17 +132,17 @@ class SpeechCommandsConfig(datasets.BuilderConfig):
130
 
131
  class SpeechCommands(datasets.GeneratorBasedBuilder):
132
  BUILDER_CONFIGS = [
133
- SpeechCommandsConfig(
134
- name="v0.01",
135
- description=textwrap.dedent(
136
- """\
137
- Version 0.01 of the SpeechCommands dataset. Contains 30 words
138
- (20 of them are auxiliary) and background noise.
139
- """
140
- ),
141
- labels=LABELS_V1,
142
- version=datasets.Version("0.1.0"),
143
- ),
144
  SpeechCommandsConfig(
145
  name="v0.02",
146
  description=textwrap.dedent(
@@ -165,6 +167,12 @@ class SpeechCommands(datasets.GeneratorBasedBuilder):
165
  "is_unknown": datasets.Value("bool"),
166
  "speaker_id": datasets.Value("string"),
167
  "utterance_id": datasets.Value("int8"),
 
 
 
 
 
 
168
  }
169
  ),
170
  homepage=_URL,
@@ -182,35 +190,48 @@ class SpeechCommands(datasets.GeneratorBasedBuilder):
182
  "test": _DL_URL.format(name=self.config.name, split="test"),
183
  }
184
  )
 
 
 
 
 
 
 
 
185
 
186
  return [
187
  datasets.SplitGenerator(
188
  name=datasets.Split.TRAIN,
189
  gen_kwargs={
190
  "archive_path": dl_manager.download_and_extract(archive_paths["train"]),
 
191
  },
192
  ),
193
  datasets.SplitGenerator(
194
  name=datasets.Split.VALIDATION,
195
  gen_kwargs={
196
  "archive_path": dl_manager.download_and_extract(archive_paths["validation"]),
 
197
  },
198
  ),
199
  datasets.SplitGenerator(
200
  name=datasets.Split.TEST,
201
  gen_kwargs={
202
  "archive_path": dl_manager.download_and_extract(archive_paths["test"]),
 
203
  },
204
  ),
205
  ]
206
 
207
- def _generate_examples(self, archive_path):
208
 
209
-
210
 
211
  pathlist = Path(archive_path).glob('**/*.wav')
212
 
213
- for path in pathlist:
 
 
214
 
215
  pathcomponents = str(path).split("/")
216
  word = pathcomponents[-2]
@@ -236,8 +257,16 @@ class SpeechCommands(datasets.GeneratorBasedBuilder):
236
  "is_unknown": is_unknown,
237
  "speaker_id": speaker_id,
238
  "utterance_id": utterance_id,
 
 
 
 
 
 
239
  }
240
 
241
- #for debugging
242
- #if __name__ == "__main__":
243
- # ds = datasets.load_dataset("speech_commands_enriched.py", 'v0.01', split="train", streaming=False)
 
 
 
22
 
23
  from pathlib import Path
24
 
25
+ import pandas as pd
26
 
27
  _CITATION = """
28
  @article{speechcommandsv2,
 
52
  In version 0.01 thirty different words were recoded: "Yes", "No", "Up", "Down", "Left",
53
  "Right", "On", "Off", "Stop", "Go", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
54
  "Bed", "Bird", "Cat", "Dog", "Happy", "House", "Marvin", "Sheila", "Tree", "Wow".
55
+ This version is not yet supported.
56
 
57
 
58
  In version 0.02 more words were added: "Backward", "Forward", "Follow", "Learn", "Visual".
 
132
 
133
  class SpeechCommands(datasets.GeneratorBasedBuilder):
134
  BUILDER_CONFIGS = [
135
+ #SpeechCommandsConfig(
136
+ # name="v0.01",
137
+ # description=textwrap.dedent(
138
+ # """\
139
+ # Version 0.01 of the SpeechCommands dataset. Contains 30 words
140
+ # (20 of them are auxiliary) and background noise.
141
+ # """
142
+ # ),
143
+ # labels=LABELS_V1,
144
+ # version=datasets.Version("0.1.0"),
145
+ #),
146
  SpeechCommandsConfig(
147
  name="v0.02",
148
  description=textwrap.dedent(
 
167
  "is_unknown": datasets.Value("bool"),
168
  "speaker_id": datasets.Value("string"),
169
  "utterance_id": datasets.Value("int8"),
170
+ #enriched features:
171
+ "Probability": datasets.Value("float64"),
172
+ "Predicted Label": datasets.Value("string"),
173
+ "Annotated Labels": datasets.Value("string"),
174
+ "Probability Vector": datasets.Sequence(feature=datasets.Value("float64"), length=35),
175
+ "embedding_reduced": datasets.Sequence(feature=datasets.Value("float32"), length=2),
176
  }
177
  ),
178
  homepage=_URL,
 
190
  "test": _DL_URL.format(name=self.config.name, split="test"),
191
  }
192
  )
193
+
194
+ metadata_paths = dl_manager.download(
195
+ {
196
+ "train": "data/dataset_audio_train_clipped.parquet.gzip",
197
+ "test": "data/dataset_audio_test_clipped.parquet.gzip",
198
+ "validation": "data/dataset_audio_validation_clipped.parquet.gzip"
199
+ }
200
+ )
201
 
202
  return [
203
  datasets.SplitGenerator(
204
  name=datasets.Split.TRAIN,
205
  gen_kwargs={
206
  "archive_path": dl_manager.download_and_extract(archive_paths["train"]),
207
+ "metadata": pd.read_parquet(metadata_paths["train"]),
208
  },
209
  ),
210
  datasets.SplitGenerator(
211
  name=datasets.Split.VALIDATION,
212
  gen_kwargs={
213
  "archive_path": dl_manager.download_and_extract(archive_paths["validation"]),
214
+ "metadata": pd.read_parquet(metadata_paths["validation"]),
215
  },
216
  ),
217
  datasets.SplitGenerator(
218
  name=datasets.Split.TEST,
219
  gen_kwargs={
220
  "archive_path": dl_manager.download_and_extract(archive_paths["test"]),
221
+ "metadata": pd.read_parquet(metadata_paths["test"]),
222
  },
223
  ),
224
  ]
225
 
226
+ def _generate_examples(self, archive_path, metadata):
227
 
228
+ # HINT: metadata should already be the split-specific metadata
229
 
230
  pathlist = Path(archive_path).glob('**/*.wav')
231
 
232
+ for path, row in zip(pathlist, metadata.iterrows()):
233
+
234
+ # row is a tuple containg an index and a pandas series
235
 
236
  pathcomponents = str(path).split("/")
237
  word = pathcomponents[-2]
 
257
  "is_unknown": is_unknown,
258
  "speaker_id": speaker_id,
259
  "utterance_id": utterance_id,
260
+ #enriched features:
261
+ "Probability": row[1]["Probability"],
262
+ "Predicted Label": row[1]["Predicted Label"],
263
+ "Annotated Labels": row[1]["Annotated Labels"],
264
+ "Probability Vector": row[1]["Probability Vector"],
265
+ "embedding_reduced": row[1]["embedding_reduced"]
266
  }
267
 
268
+ #for debugging, comment out after
269
+ if __name__ == "__main__":
270
+ datasets.builder.has_sufficient_disk_space = lambda needed_bytes, directory='.': True
271
+ ds = datasets.load_dataset("speech_commands_enriched.py", 'v0.02', split="train",
272
+ streaming=False)