csikasote commited on
Commit
2c48756
·
verified ·
1 Parent(s): 8c5f3f6

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-toigen-female-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-toigen-female-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.6705
22
- - Wer: 0.4209
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - toigen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-toigen-female-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: toigen
19
+ type: toigen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.44457831325301206
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-toigen-female-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the toigen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.5737
34
+ - Wer: 0.4446
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_loss": 0.5737143158912659,
4
+ "eval_runtime": 102.0573,
5
+ "eval_samples": 221,
6
+ "eval_samples_per_second": 2.165,
7
+ "eval_steps_per_second": 0.549,
8
+ "eval_wer": 0.44457831325301206,
9
+ "total_flos": 8.1240150638592e+18,
10
+ "train_loss": 0.8661212004423141,
11
+ "train_runtime": 2219.5582,
12
+ "train_samples": 995,
13
+ "train_samples_per_second": 18.022,
14
+ "train_steps_per_second": 2.253
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_loss": 0.5737143158912659,
4
+ "eval_runtime": 102.0573,
5
+ "eval_samples": 221,
6
+ "eval_samples_per_second": 2.165,
7
+ "eval_steps_per_second": 0.549,
8
+ "eval_wer": 0.44457831325301206
9
+ }
runs/Jan05_18-28-48_srvrocgpu011.uct.ac.za/events.out.tfevents.1736097109.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:244f838b514e1fb7be8e3479bcf8947d5ae5eeca58b1b7c4a552fbe4e5a21dd3
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 8.1240150638592e+18,
4
+ "train_loss": 0.8661212004423141,
5
+ "train_runtime": 2219.5582,
6
+ "train_samples": 995,
7
+ "train_samples_per_second": 18.022,
8
+ "train_steps_per_second": 2.253
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5737143158912659,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-toigen-female-model/checkpoint-400",
4
+ "epoch": 8.0,
5
+ "eval_steps": 200,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.20080321285140562,
13
+ "grad_norm": 61.12749481201172,
14
+ "learning_rate": 4.2000000000000006e-07,
15
+ "loss": 6.6534,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.40160642570281124,
20
+ "grad_norm": 43.05365753173828,
21
+ "learning_rate": 9.200000000000001e-07,
22
+ "loss": 5.1469,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.6024096385542169,
27
+ "grad_norm": 38.88462448120117,
28
+ "learning_rate": 1.42e-06,
29
+ "loss": 3.6082,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.8032128514056225,
34
+ "grad_norm": 33.215606689453125,
35
+ "learning_rate": 1.9200000000000003e-06,
36
+ "loss": 2.5276,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 1.0,
41
+ "grad_norm": 24.052446365356445,
42
+ "learning_rate": 2.42e-06,
43
+ "loss": 2.0062,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 1.2008032128514057,
48
+ "grad_norm": 31.749963760375977,
49
+ "learning_rate": 2.92e-06,
50
+ "loss": 1.5108,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 1.4016064257028114,
55
+ "grad_norm": 26.5882625579834,
56
+ "learning_rate": 3.4200000000000007e-06,
57
+ "loss": 1.35,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 1.6024096385542168,
62
+ "grad_norm": 24.725419998168945,
63
+ "learning_rate": 3.920000000000001e-06,
64
+ "loss": 1.3083,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 1.6024096385542168,
69
+ "eval_loss": 0.6686530113220215,
70
+ "eval_runtime": 103.3468,
71
+ "eval_samples_per_second": 2.138,
72
+ "eval_steps_per_second": 0.542,
73
+ "eval_wer": 0.5309236947791165,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 1.8032128514056225,
78
+ "grad_norm": 23.32423973083496,
79
+ "learning_rate": 4.42e-06,
80
+ "loss": 1.2009,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 2.0,
85
+ "grad_norm": 23.683210372924805,
86
+ "learning_rate": 4.92e-06,
87
+ "loss": 1.2218,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 2.2008032128514055,
92
+ "grad_norm": 17.880393981933594,
93
+ "learning_rate": 5.420000000000001e-06,
94
+ "loss": 0.7736,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 2.4016064257028114,
99
+ "grad_norm": 16.88402557373047,
100
+ "learning_rate": 5.92e-06,
101
+ "loss": 0.676,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 2.602409638554217,
106
+ "grad_norm": 19.675437927246094,
107
+ "learning_rate": 6.42e-06,
108
+ "loss": 0.744,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 2.8032128514056227,
113
+ "grad_norm": 24.291088104248047,
114
+ "learning_rate": 6.92e-06,
115
+ "loss": 0.7339,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 3.0,
120
+ "grad_norm": 11.682101249694824,
121
+ "learning_rate": 7.420000000000001e-06,
122
+ "loss": 0.7098,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 3.2008032128514055,
127
+ "grad_norm": 10.226051330566406,
128
+ "learning_rate": 7.92e-06,
129
+ "loss": 0.3381,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 3.2008032128514055,
134
+ "eval_loss": 0.5737143158912659,
135
+ "eval_runtime": 102.7558,
136
+ "eval_samples_per_second": 2.151,
137
+ "eval_steps_per_second": 0.545,
138
+ "eval_wer": 0.44457831325301206,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 3.4016064257028114,
143
+ "grad_norm": 16.29640769958496,
144
+ "learning_rate": 8.42e-06,
145
+ "loss": 0.3465,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 3.602409638554217,
150
+ "grad_norm": 16.958227157592773,
151
+ "learning_rate": 8.920000000000001e-06,
152
+ "loss": 0.3707,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 3.8032128514056227,
157
+ "grad_norm": 20.855695724487305,
158
+ "learning_rate": 9.42e-06,
159
+ "loss": 0.445,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 4.0,
164
+ "grad_norm": 10.269837379455566,
165
+ "learning_rate": 9.920000000000002e-06,
166
+ "loss": 0.4447,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 4.2008032128514055,
171
+ "grad_norm": 15.100114822387695,
172
+ "learning_rate": 9.953333333333333e-06,
173
+ "loss": 0.1814,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 4.401606425702811,
178
+ "grad_norm": 13.603893280029297,
179
+ "learning_rate": 9.89777777777778e-06,
180
+ "loss": 0.2124,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 4.602409638554217,
185
+ "grad_norm": 13.522416114807129,
186
+ "learning_rate": 9.842222222222223e-06,
187
+ "loss": 0.2537,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 4.803212851405623,
192
+ "grad_norm": 17.89769172668457,
193
+ "learning_rate": 9.786666666666667e-06,
194
+ "loss": 0.2639,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 4.803212851405623,
199
+ "eval_loss": 0.6059707999229431,
200
+ "eval_runtime": 102.8339,
201
+ "eval_samples_per_second": 2.149,
202
+ "eval_steps_per_second": 0.545,
203
+ "eval_wer": 0.42971887550200805,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 5.0,
208
+ "grad_norm": 6.560233116149902,
209
+ "learning_rate": 9.731111111111113e-06,
210
+ "loss": 0.2428,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 5.2008032128514055,
215
+ "grad_norm": 7.097219467163086,
216
+ "learning_rate": 9.675555555555555e-06,
217
+ "loss": 0.1016,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 5.401606425702811,
222
+ "grad_norm": 14.023282051086426,
223
+ "learning_rate": 9.620000000000001e-06,
224
+ "loss": 0.1447,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 5.602409638554217,
229
+ "grad_norm": 8.226017951965332,
230
+ "learning_rate": 9.564444444444445e-06,
231
+ "loss": 0.1111,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 5.803212851405623,
236
+ "grad_norm": 11.734825134277344,
237
+ "learning_rate": 9.508888888888889e-06,
238
+ "loss": 0.1351,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 6.0,
243
+ "grad_norm": 9.586465835571289,
244
+ "learning_rate": 9.453333333333335e-06,
245
+ "loss": 0.1444,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 6.2008032128514055,
250
+ "grad_norm": 7.064974784851074,
251
+ "learning_rate": 9.397777777777779e-06,
252
+ "loss": 0.0732,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 6.401606425702811,
257
+ "grad_norm": 7.27360725402832,
258
+ "learning_rate": 9.342222222222223e-06,
259
+ "loss": 0.0831,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 6.401606425702811,
264
+ "eval_loss": 0.6426535844802856,
265
+ "eval_runtime": 103.0495,
266
+ "eval_samples_per_second": 2.145,
267
+ "eval_steps_per_second": 0.543,
268
+ "eval_wer": 0.40321285140562246,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 6.602409638554217,
273
+ "grad_norm": 5.486606597900391,
274
+ "learning_rate": 9.286666666666667e-06,
275
+ "loss": 0.0764,
276
+ "step": 825
277
+ },
278
+ {
279
+ "epoch": 6.803212851405623,
280
+ "grad_norm": 11.913525581359863,
281
+ "learning_rate": 9.231111111111111e-06,
282
+ "loss": 0.0753,
283
+ "step": 850
284
+ },
285
+ {
286
+ "epoch": 7.0,
287
+ "grad_norm": 4.659058094024658,
288
+ "learning_rate": 9.175555555555557e-06,
289
+ "loss": 0.1132,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 7.2008032128514055,
294
+ "grad_norm": 2.5287861824035645,
295
+ "learning_rate": 9.12e-06,
296
+ "loss": 0.0583,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 7.401606425702811,
301
+ "grad_norm": 3.8375866413116455,
302
+ "learning_rate": 9.064444444444447e-06,
303
+ "loss": 0.0513,
304
+ "step": 925
305
+ },
306
+ {
307
+ "epoch": 7.602409638554217,
308
+ "grad_norm": 10.924978256225586,
309
+ "learning_rate": 9.008888888888889e-06,
310
+ "loss": 0.0674,
311
+ "step": 950
312
+ },
313
+ {
314
+ "epoch": 7.803212851405623,
315
+ "grad_norm": 7.799922943115234,
316
+ "learning_rate": 8.953333333333335e-06,
317
+ "loss": 0.0612,
318
+ "step": 975
319
+ },
320
+ {
321
+ "epoch": 8.0,
322
+ "grad_norm": 0.7825481295585632,
323
+ "learning_rate": 8.897777777777779e-06,
324
+ "loss": 0.0776,
325
+ "step": 1000
326
+ },
327
+ {
328
+ "epoch": 8.0,
329
+ "eval_loss": 0.6705393195152283,
330
+ "eval_runtime": 102.2471,
331
+ "eval_samples_per_second": 2.161,
332
+ "eval_steps_per_second": 0.548,
333
+ "eval_wer": 0.42088353413654617,
334
+ "step": 1000
335
+ },
336
+ {
337
+ "epoch": 8.0,
338
+ "step": 1000,
339
+ "total_flos": 8.1240150638592e+18,
340
+ "train_loss": 0.8661212004423141,
341
+ "train_runtime": 2219.5582,
342
+ "train_samples_per_second": 18.022,
343
+ "train_steps_per_second": 2.253
344
+ }
345
+ ],
346
+ "logging_steps": 25,
347
+ "max_steps": 5000,
348
+ "num_input_tokens_seen": 0,
349
+ "num_train_epochs": 41,
350
+ "save_steps": 200,
351
+ "stateful_callbacks": {
352
+ "EarlyStoppingCallback": {
353
+ "args": {
354
+ "early_stopping_patience": 3,
355
+ "early_stopping_threshold": 0.0
356
+ },
357
+ "attributes": {
358
+ "early_stopping_patience_counter": 3
359
+ }
360
+ },
361
+ "TrainerControl": {
362
+ "args": {
363
+ "should_epoch_stop": false,
364
+ "should_evaluate": false,
365
+ "should_log": false,
366
+ "should_save": true,
367
+ "should_training_stop": true
368
+ },
369
+ "attributes": {}
370
+ }
371
+ },
372
+ "total_flos": 8.1240150638592e+18,
373
+ "train_batch_size": 4,
374
+ "trial_name": null,
375
+ "trial_params": null
376
+ }