csikasote commited on
Commit
7ad781c
1 Parent(s): 749a73a

End of training

Browse files
Files changed (5) hide show
  1. README.md +16 -4
  2. all_results.json +15 -0
  3. eval_results.json +9 -0
  4. train_results.json +9 -0
  5. trainer_state.json +311 -0
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-lozgen-male-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-lozgen-male-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.8436
22
- - Wer: 0.3887
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - lozgen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-lozgen-male-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: lozgen
19
+ type: lozgen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.4796960341961529
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-lozgen-male-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the lozgen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.7984
34
+ - Wer: 0.4797
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.940298507462687,
3
+ "eval_loss": 0.7984297275543213,
4
+ "eval_runtime": 124.2218,
5
+ "eval_samples": 310,
6
+ "eval_samples_per_second": 2.496,
7
+ "eval_steps_per_second": 0.628,
8
+ "eval_wer": 0.4796960341961529,
9
+ "total_flos": 6.531871408128e+18,
10
+ "train_loss": 0.8537300661206245,
11
+ "train_runtime": 1916.7552,
12
+ "train_samples": 536,
13
+ "train_samples_per_second": 20.869,
14
+ "train_steps_per_second": 2.609
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.940298507462687,
3
+ "eval_loss": 0.7984297275543213,
4
+ "eval_runtime": 124.2218,
5
+ "eval_samples": 310,
6
+ "eval_samples_per_second": 2.496,
7
+ "eval_steps_per_second": 0.628,
8
+ "eval_wer": 0.4796960341961529
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 11.940298507462687,
3
+ "total_flos": 6.531871408128e+18,
4
+ "train_loss": 0.8537300661206245,
5
+ "train_runtime": 1916.7552,
6
+ "train_samples": 536,
7
+ "train_samples_per_second": 20.869,
8
+ "train_steps_per_second": 2.609
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7984297275543213,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-lozgen-male-model/checkpoint-200",
4
+ "epoch": 11.940298507462687,
5
+ "eval_steps": 200,
6
+ "global_step": 800,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.373134328358209,
13
+ "grad_norm": 67.36864471435547,
14
+ "learning_rate": 4.2000000000000006e-07,
15
+ "loss": 5.9515,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.746268656716418,
20
+ "grad_norm": 50.873260498046875,
21
+ "learning_rate": 9.200000000000001e-07,
22
+ "loss": 4.7813,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 1.1194029850746268,
27
+ "grad_norm": 34.35158157348633,
28
+ "learning_rate": 1.42e-06,
29
+ "loss": 3.468,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 1.4925373134328357,
34
+ "grad_norm": 42.986968994140625,
35
+ "learning_rate": 1.9200000000000003e-06,
36
+ "loss": 2.385,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 1.8656716417910446,
41
+ "grad_norm": 29.906461715698242,
42
+ "learning_rate": 2.42e-06,
43
+ "loss": 2.0003,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 2.2388059701492535,
48
+ "grad_norm": 22.721763610839844,
49
+ "learning_rate": 2.92e-06,
50
+ "loss": 1.5359,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 2.611940298507463,
55
+ "grad_norm": 23.2641658782959,
56
+ "learning_rate": 3.4200000000000007e-06,
57
+ "loss": 1.2475,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 2.9850746268656714,
62
+ "grad_norm": 26.96329116821289,
63
+ "learning_rate": 3.920000000000001e-06,
64
+ "loss": 1.2961,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 2.9850746268656714,
69
+ "eval_loss": 0.7984297275543213,
70
+ "eval_runtime": 127.2992,
71
+ "eval_samples_per_second": 2.435,
72
+ "eval_steps_per_second": 0.613,
73
+ "eval_wer": 0.4796960341961529,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 3.3582089552238807,
78
+ "grad_norm": 12.527202606201172,
79
+ "learning_rate": 4.42e-06,
80
+ "loss": 0.7541,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 3.7313432835820897,
85
+ "grad_norm": 21.142616271972656,
86
+ "learning_rate": 4.92e-06,
87
+ "loss": 0.7149,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 4.104477611940299,
92
+ "grad_norm": 9.611239433288574,
93
+ "learning_rate": 5.420000000000001e-06,
94
+ "loss": 0.5993,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 4.477611940298507,
99
+ "grad_norm": 12.687724113464355,
100
+ "learning_rate": 5.92e-06,
101
+ "loss": 0.3943,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 4.850746268656716,
106
+ "grad_norm": 19.18768310546875,
107
+ "learning_rate": 6.42e-06,
108
+ "loss": 0.3778,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 5.223880597014926,
113
+ "grad_norm": 15.153021812438965,
114
+ "learning_rate": 6.92e-06,
115
+ "loss": 0.208,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 5.597014925373134,
120
+ "grad_norm": 10.51365852355957,
121
+ "learning_rate": 7.420000000000001e-06,
122
+ "loss": 0.1744,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 5.970149253731344,
127
+ "grad_norm": 9.576289176940918,
128
+ "learning_rate": 7.92e-06,
129
+ "loss": 0.2221,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 5.970149253731344,
134
+ "eval_loss": 0.8183491230010986,
135
+ "eval_runtime": 129.9768,
136
+ "eval_samples_per_second": 2.385,
137
+ "eval_steps_per_second": 0.6,
138
+ "eval_wer": 0.4540489194965566,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 6.343283582089552,
143
+ "grad_norm": 20.339366912841797,
144
+ "learning_rate": 8.42e-06,
145
+ "loss": 0.12,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 6.7164179104477615,
150
+ "grad_norm": 6.8028669357299805,
151
+ "learning_rate": 8.920000000000001e-06,
152
+ "loss": 0.1084,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 7.08955223880597,
157
+ "grad_norm": 16.080068588256836,
158
+ "learning_rate": 9.42e-06,
159
+ "loss": 0.1097,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 7.462686567164179,
164
+ "grad_norm": 6.925537109375,
165
+ "learning_rate": 9.920000000000002e-06,
166
+ "loss": 0.0899,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 7.835820895522388,
171
+ "grad_norm": 15.699053764343262,
172
+ "learning_rate": 9.953333333333333e-06,
173
+ "loss": 0.1049,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 8.208955223880597,
178
+ "grad_norm": 10.956867218017578,
179
+ "learning_rate": 9.89777777777778e-06,
180
+ "loss": 0.0883,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 8.582089552238806,
185
+ "grad_norm": 10.130331993103027,
186
+ "learning_rate": 9.842222222222223e-06,
187
+ "loss": 0.0952,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 8.955223880597014,
192
+ "grad_norm": 16.053123474121094,
193
+ "learning_rate": 9.786666666666667e-06,
194
+ "loss": 0.0963,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 8.955223880597014,
199
+ "eval_loss": 0.8390884399414062,
200
+ "eval_runtime": 124.2772,
201
+ "eval_samples_per_second": 2.494,
202
+ "eval_steps_per_second": 0.628,
203
+ "eval_wer": 0.3889812396105438,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 9.328358208955224,
208
+ "grad_norm": 5.976753234863281,
209
+ "learning_rate": 9.731111111111113e-06,
210
+ "loss": 0.0706,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 9.701492537313433,
215
+ "grad_norm": 4.946258544921875,
216
+ "learning_rate": 9.675555555555555e-06,
217
+ "loss": 0.0533,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 10.074626865671641,
222
+ "grad_norm": 2.1235852241516113,
223
+ "learning_rate": 9.620000000000001e-06,
224
+ "loss": 0.0535,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 10.447761194029852,
229
+ "grad_norm": 6.762497425079346,
230
+ "learning_rate": 9.564444444444445e-06,
231
+ "loss": 0.0426,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 10.82089552238806,
236
+ "grad_norm": 10.996703147888184,
237
+ "learning_rate": 9.508888888888889e-06,
238
+ "loss": 0.037,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 11.194029850746269,
243
+ "grad_norm": 11.038252830505371,
244
+ "learning_rate": 9.453333333333335e-06,
245
+ "loss": 0.0537,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 11.567164179104477,
250
+ "grad_norm": 5.437051296234131,
251
+ "learning_rate": 9.397777777777779e-06,
252
+ "loss": 0.0398,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 11.940298507462687,
257
+ "grad_norm": 39.8909797668457,
258
+ "learning_rate": 9.342222222222223e-06,
259
+ "loss": 0.0457,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 11.940298507462687,
264
+ "eval_loss": 0.8435798287391663,
265
+ "eval_runtime": 129.9146,
266
+ "eval_samples_per_second": 2.386,
267
+ "eval_steps_per_second": 0.6,
268
+ "eval_wer": 0.3887437663262883,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 11.940298507462687,
273
+ "step": 800,
274
+ "total_flos": 6.531871408128e+18,
275
+ "train_loss": 0.8537300661206245,
276
+ "train_runtime": 1916.7552,
277
+ "train_samples_per_second": 20.869,
278
+ "train_steps_per_second": 2.609
279
+ }
280
+ ],
281
+ "logging_steps": 25,
282
+ "max_steps": 5000,
283
+ "num_input_tokens_seen": 0,
284
+ "num_train_epochs": 75,
285
+ "save_steps": 200,
286
+ "stateful_callbacks": {
287
+ "EarlyStoppingCallback": {
288
+ "args": {
289
+ "early_stopping_patience": 3,
290
+ "early_stopping_threshold": 0.0
291
+ },
292
+ "attributes": {
293
+ "early_stopping_patience_counter": 3
294
+ }
295
+ },
296
+ "TrainerControl": {
297
+ "args": {
298
+ "should_epoch_stop": false,
299
+ "should_evaluate": false,
300
+ "should_log": false,
301
+ "should_save": true,
302
+ "should_training_stop": true
303
+ },
304
+ "attributes": {}
305
+ }
306
+ },
307
+ "total_flos": 6.531871408128e+18,
308
+ "train_batch_size": 4,
309
+ "trial_name": null,
310
+ "trial_params": null
311
+ }