pakphum commited on
Commit
9823daf
·
verified ·
1 Parent(s): ad1cce6

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,7 @@ license: llama3.2
4
  base_model: meta-llama/Llama-3.2-3B-Instruct
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: qlora-llama3b-iterative
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # qlora-llama3b-iterative
17
 
18
- This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.0051
21
 
 
4
  base_model: meta-llama/Llama-3.2-3B-Instruct
5
  tags:
6
  - llama-factory
7
+ - lora
8
  - generated_from_trainer
9
  model-index:
10
  - name: qlora-llama3b-iterative
 
16
 
17
  # qlora-llama3b-iterative
18
 
19
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on the train-iterative dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.0051
22
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "eval_loss": 0.005105508491396904,
4
+ "eval_runtime": 8.0865,
5
+ "eval_samples_per_second": 12.366,
6
+ "eval_steps_per_second": 12.366,
7
+ "total_flos": 1.038706354200576e+16,
8
+ "train_loss": 0.1301481350355316,
9
+ "train_runtime": 1611.4177,
10
+ "train_samples_per_second": 2.482,
11
+ "train_steps_per_second": 0.31
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "eval_loss": 0.005105508491396904,
4
+ "eval_runtime": 8.0865,
5
+ "eval_samples_per_second": 12.366,
6
+ "eval_steps_per_second": 12.366
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "total_flos": 1.038706354200576e+16,
4
+ "train_loss": 0.1301481350355316,
5
+ "train_runtime": 1611.4177,
6
+ "train_samples_per_second": 2.482,
7
+ "train_steps_per_second": 0.31
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.444444444444445,
5
+ "eval_steps": 10,
6
+ "global_step": 500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08888888888888889,
13
+ "grad_norm": 6.322239398956299,
14
+ "learning_rate": 4e-05,
15
+ "loss": 2.1156,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.08888888888888889,
20
+ "eval_loss": 1.5894473791122437,
21
+ "eval_runtime": 8.0398,
22
+ "eval_samples_per_second": 12.438,
23
+ "eval_steps_per_second": 12.438,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.17777777777777778,
28
+ "grad_norm": 4.342599391937256,
29
+ "learning_rate": 8e-05,
30
+ "loss": 1.1893,
31
+ "step": 20
32
+ },
33
+ {
34
+ "epoch": 0.17777777777777778,
35
+ "eval_loss": 0.6867849826812744,
36
+ "eval_runtime": 8.0305,
37
+ "eval_samples_per_second": 12.453,
38
+ "eval_steps_per_second": 12.453,
39
+ "step": 20
40
+ },
41
+ {
42
+ "epoch": 0.26666666666666666,
43
+ "grad_norm": 2.0583019256591797,
44
+ "learning_rate": 0.00012,
45
+ "loss": 0.5218,
46
+ "step": 30
47
+ },
48
+ {
49
+ "epoch": 0.26666666666666666,
50
+ "eval_loss": 0.45551854372024536,
51
+ "eval_runtime": 8.0298,
52
+ "eval_samples_per_second": 12.454,
53
+ "eval_steps_per_second": 12.454,
54
+ "step": 30
55
+ },
56
+ {
57
+ "epoch": 0.35555555555555557,
58
+ "grad_norm": 1.83012056350708,
59
+ "learning_rate": 0.00016,
60
+ "loss": 0.5292,
61
+ "step": 40
62
+ },
63
+ {
64
+ "epoch": 0.35555555555555557,
65
+ "eval_loss": 0.3795148730278015,
66
+ "eval_runtime": 8.0289,
67
+ "eval_samples_per_second": 12.455,
68
+ "eval_steps_per_second": 12.455,
69
+ "step": 40
70
+ },
71
+ {
72
+ "epoch": 0.4444444444444444,
73
+ "grad_norm": 2.1445064544677734,
74
+ "learning_rate": 0.0002,
75
+ "loss": 0.3866,
76
+ "step": 50
77
+ },
78
+ {
79
+ "epoch": 0.4444444444444444,
80
+ "eval_loss": 0.30648669600486755,
81
+ "eval_runtime": 8.0213,
82
+ "eval_samples_per_second": 12.467,
83
+ "eval_steps_per_second": 12.467,
84
+ "step": 50
85
+ },
86
+ {
87
+ "epoch": 0.5333333333333333,
88
+ "grad_norm": 1.3674068450927734,
89
+ "learning_rate": 0.00019975640502598244,
90
+ "loss": 0.3232,
91
+ "step": 60
92
+ },
93
+ {
94
+ "epoch": 0.5333333333333333,
95
+ "eval_loss": 0.20737296342849731,
96
+ "eval_runtime": 8.0166,
97
+ "eval_samples_per_second": 12.474,
98
+ "eval_steps_per_second": 12.474,
99
+ "step": 60
100
+ },
101
+ {
102
+ "epoch": 0.6222222222222222,
103
+ "grad_norm": 1.0751285552978516,
104
+ "learning_rate": 0.00019902680687415705,
105
+ "loss": 0.1802,
106
+ "step": 70
107
+ },
108
+ {
109
+ "epoch": 0.6222222222222222,
110
+ "eval_loss": 0.15315091609954834,
111
+ "eval_runtime": 8.025,
112
+ "eval_samples_per_second": 12.461,
113
+ "eval_steps_per_second": 12.461,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.7111111111111111,
118
+ "grad_norm": 0.568341076374054,
119
+ "learning_rate": 0.00019781476007338058,
120
+ "loss": 0.21,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.7111111111111111,
125
+ "eval_loss": 0.13480396568775177,
126
+ "eval_runtime": 8.0237,
127
+ "eval_samples_per_second": 12.463,
128
+ "eval_steps_per_second": 12.463,
129
+ "step": 80
130
+ },
131
+ {
132
+ "epoch": 0.8,
133
+ "grad_norm": 0.9985604286193848,
134
+ "learning_rate": 0.0001961261695938319,
135
+ "loss": 0.158,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.8,
140
+ "eval_loss": 0.13721750676631927,
141
+ "eval_runtime": 8.0237,
142
+ "eval_samples_per_second": 12.463,
143
+ "eval_steps_per_second": 12.463,
144
+ "step": 90
145
+ },
146
+ {
147
+ "epoch": 0.8888888888888888,
148
+ "grad_norm": 0.8096336722373962,
149
+ "learning_rate": 0.00019396926207859084,
150
+ "loss": 0.1629,
151
+ "step": 100
152
+ },
153
+ {
154
+ "epoch": 0.8888888888888888,
155
+ "eval_loss": 0.12762245535850525,
156
+ "eval_runtime": 8.0161,
157
+ "eval_samples_per_second": 12.475,
158
+ "eval_steps_per_second": 12.475,
159
+ "step": 100
160
+ },
161
+ {
162
+ "epoch": 0.9777777777777777,
163
+ "grad_norm": 1.2173279523849487,
164
+ "learning_rate": 0.0001913545457642601,
165
+ "loss": 0.0966,
166
+ "step": 110
167
+ },
168
+ {
169
+ "epoch": 0.9777777777777777,
170
+ "eval_loss": 0.10031093657016754,
171
+ "eval_runtime": 8.0285,
172
+ "eval_samples_per_second": 12.456,
173
+ "eval_steps_per_second": 12.456,
174
+ "step": 110
175
+ },
176
+ {
177
+ "epoch": 1.0666666666666667,
178
+ "grad_norm": 0.7404722571372986,
179
+ "learning_rate": 0.00018829475928589271,
180
+ "loss": 0.0643,
181
+ "step": 120
182
+ },
183
+ {
184
+ "epoch": 1.0666666666666667,
185
+ "eval_loss": 0.08794313669204712,
186
+ "eval_runtime": 8.0399,
187
+ "eval_samples_per_second": 12.438,
188
+ "eval_steps_per_second": 12.438,
189
+ "step": 120
190
+ },
191
+ {
192
+ "epoch": 1.1555555555555554,
193
+ "grad_norm": 0.9548615217208862,
194
+ "learning_rate": 0.0001848048096156426,
195
+ "loss": 0.0726,
196
+ "step": 130
197
+ },
198
+ {
199
+ "epoch": 1.1555555555555554,
200
+ "eval_loss": 0.08720792084932327,
201
+ "eval_runtime": 8.2778,
202
+ "eval_samples_per_second": 12.08,
203
+ "eval_steps_per_second": 12.08,
204
+ "step": 130
205
+ },
206
+ {
207
+ "epoch": 1.2444444444444445,
208
+ "grad_norm": 1.0252933502197266,
209
+ "learning_rate": 0.00018090169943749476,
210
+ "loss": 0.0493,
211
+ "step": 140
212
+ },
213
+ {
214
+ "epoch": 1.2444444444444445,
215
+ "eval_loss": 0.09057007730007172,
216
+ "eval_runtime": 8.2874,
217
+ "eval_samples_per_second": 12.067,
218
+ "eval_steps_per_second": 12.067,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 1.3333333333333333,
223
+ "grad_norm": 0.5175765156745911,
224
+ "learning_rate": 0.0001766044443118978,
225
+ "loss": 0.0746,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 1.3333333333333333,
230
+ "eval_loss": 0.058685798197984695,
231
+ "eval_runtime": 8.2384,
232
+ "eval_samples_per_second": 12.138,
233
+ "eval_steps_per_second": 12.138,
234
+ "step": 150
235
+ },
236
+ {
237
+ "epoch": 1.4222222222222223,
238
+ "grad_norm": 0.33382534980773926,
239
+ "learning_rate": 0.0001719339800338651,
240
+ "loss": 0.0473,
241
+ "step": 160
242
+ },
243
+ {
244
+ "epoch": 1.4222222222222223,
245
+ "eval_loss": 0.0560651533305645,
246
+ "eval_runtime": 8.1982,
247
+ "eval_samples_per_second": 12.198,
248
+ "eval_steps_per_second": 12.198,
249
+ "step": 160
250
+ },
251
+ {
252
+ "epoch": 1.511111111111111,
253
+ "grad_norm": 1.0913978815078735,
254
+ "learning_rate": 0.00016691306063588583,
255
+ "loss": 0.0644,
256
+ "step": 170
257
+ },
258
+ {
259
+ "epoch": 1.511111111111111,
260
+ "eval_loss": 0.05025744438171387,
261
+ "eval_runtime": 8.239,
262
+ "eval_samples_per_second": 12.137,
263
+ "eval_steps_per_second": 12.137,
264
+ "step": 170
265
+ },
266
+ {
267
+ "epoch": 1.6,
268
+ "grad_norm": 0.8013315796852112,
269
+ "learning_rate": 0.0001615661475325658,
270
+ "loss": 0.0366,
271
+ "step": 180
272
+ },
273
+ {
274
+ "epoch": 1.6,
275
+ "eval_loss": 0.030684156343340874,
276
+ "eval_runtime": 8.2772,
277
+ "eval_samples_per_second": 12.081,
278
+ "eval_steps_per_second": 12.081,
279
+ "step": 180
280
+ },
281
+ {
282
+ "epoch": 1.6888888888888889,
283
+ "grad_norm": 0.2898242175579071,
284
+ "learning_rate": 0.0001559192903470747,
285
+ "loss": 0.0247,
286
+ "step": 190
287
+ },
288
+ {
289
+ "epoch": 1.6888888888888889,
290
+ "eval_loss": 0.023328043520450592,
291
+ "eval_runtime": 8.2724,
292
+ "eval_samples_per_second": 12.088,
293
+ "eval_steps_per_second": 12.088,
294
+ "step": 190
295
+ },
296
+ {
297
+ "epoch": 1.7777777777777777,
298
+ "grad_norm": 0.0667782798409462,
299
+ "learning_rate": 0.00015000000000000001,
300
+ "loss": 0.01,
301
+ "step": 200
302
+ },
303
+ {
304
+ "epoch": 1.7777777777777777,
305
+ "eval_loss": 0.021545417606830597,
306
+ "eval_runtime": 8.2753,
307
+ "eval_samples_per_second": 12.084,
308
+ "eval_steps_per_second": 12.084,
309
+ "step": 200
310
+ },
311
+ {
312
+ "epoch": 1.8666666666666667,
313
+ "grad_norm": 1.8411855697631836,
314
+ "learning_rate": 0.00014383711467890774,
315
+ "loss": 0.0393,
316
+ "step": 210
317
+ },
318
+ {
319
+ "epoch": 1.8666666666666667,
320
+ "eval_loss": 0.012232878245413303,
321
+ "eval_runtime": 8.275,
322
+ "eval_samples_per_second": 12.085,
323
+ "eval_steps_per_second": 12.085,
324
+ "step": 210
325
+ },
326
+ {
327
+ "epoch": 1.9555555555555557,
328
+ "grad_norm": 2.9970459938049316,
329
+ "learning_rate": 0.00013746065934159123,
330
+ "loss": 0.0299,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 1.9555555555555557,
335
+ "eval_loss": 0.01798514649271965,
336
+ "eval_runtime": 8.2746,
337
+ "eval_samples_per_second": 12.085,
338
+ "eval_steps_per_second": 12.085,
339
+ "step": 220
340
+ },
341
+ {
342
+ "epoch": 2.0444444444444443,
343
+ "grad_norm": 1.0432640314102173,
344
+ "learning_rate": 0.00013090169943749476,
345
+ "loss": 0.0166,
346
+ "step": 230
347
+ },
348
+ {
349
+ "epoch": 2.0444444444444443,
350
+ "eval_loss": 0.008207106962800026,
351
+ "eval_runtime": 8.2792,
352
+ "eval_samples_per_second": 12.078,
353
+ "eval_steps_per_second": 12.078,
354
+ "step": 230
355
+ },
356
+ {
357
+ "epoch": 2.1333333333333333,
358
+ "grad_norm": 1.9301716089248657,
359
+ "learning_rate": 0.00012419218955996676,
360
+ "loss": 0.0319,
361
+ "step": 240
362
+ },
363
+ {
364
+ "epoch": 2.1333333333333333,
365
+ "eval_loss": 0.008276881650090218,
366
+ "eval_runtime": 8.0782,
367
+ "eval_samples_per_second": 12.379,
368
+ "eval_steps_per_second": 12.379,
369
+ "step": 240
370
+ },
371
+ {
372
+ "epoch": 2.2222222222222223,
373
+ "grad_norm": 0.07122544944286346,
374
+ "learning_rate": 0.00011736481776669306,
375
+ "loss": 0.0077,
376
+ "step": 250
377
+ },
378
+ {
379
+ "epoch": 2.2222222222222223,
380
+ "eval_loss": 0.007150276098400354,
381
+ "eval_runtime": 8.0319,
382
+ "eval_samples_per_second": 12.45,
383
+ "eval_steps_per_second": 12.45,
384
+ "step": 250
385
+ },
386
+ {
387
+ "epoch": 2.311111111111111,
388
+ "grad_norm": 0.16882538795471191,
389
+ "learning_rate": 0.00011045284632676536,
390
+ "loss": 0.0141,
391
+ "step": 260
392
+ },
393
+ {
394
+ "epoch": 2.311111111111111,
395
+ "eval_loss": 0.003109171986579895,
396
+ "eval_runtime": 8.0309,
397
+ "eval_samples_per_second": 12.452,
398
+ "eval_steps_per_second": 12.452,
399
+ "step": 260
400
+ },
401
+ {
402
+ "epoch": 2.4,
403
+ "grad_norm": 0.037282537668943405,
404
+ "learning_rate": 0.00010348994967025012,
405
+ "loss": 0.0017,
406
+ "step": 270
407
+ },
408
+ {
409
+ "epoch": 2.4,
410
+ "eval_loss": 0.012033730745315552,
411
+ "eval_runtime": 8.0265,
412
+ "eval_samples_per_second": 12.459,
413
+ "eval_steps_per_second": 12.459,
414
+ "step": 270
415
+ },
416
+ {
417
+ "epoch": 2.488888888888889,
418
+ "grad_norm": 0.022591086104512215,
419
+ "learning_rate": 9.651005032974994e-05,
420
+ "loss": 0.0015,
421
+ "step": 280
422
+ },
423
+ {
424
+ "epoch": 2.488888888888889,
425
+ "eval_loss": 0.015280201099812984,
426
+ "eval_runtime": 8.0184,
427
+ "eval_samples_per_second": 12.471,
428
+ "eval_steps_per_second": 12.471,
429
+ "step": 280
430
+ },
431
+ {
432
+ "epoch": 2.5777777777777775,
433
+ "grad_norm": 1.2939496040344238,
434
+ "learning_rate": 8.954715367323468e-05,
435
+ "loss": 0.0126,
436
+ "step": 290
437
+ },
438
+ {
439
+ "epoch": 2.5777777777777775,
440
+ "eval_loss": 0.01406156551092863,
441
+ "eval_runtime": 8.0171,
442
+ "eval_samples_per_second": 12.473,
443
+ "eval_steps_per_second": 12.473,
444
+ "step": 290
445
+ },
446
+ {
447
+ "epoch": 2.6666666666666665,
448
+ "grad_norm": 0.009937470778822899,
449
+ "learning_rate": 8.263518223330697e-05,
450
+ "loss": 0.0043,
451
+ "step": 300
452
+ },
453
+ {
454
+ "epoch": 2.6666666666666665,
455
+ "eval_loss": 0.0021963752806186676,
456
+ "eval_runtime": 8.0193,
457
+ "eval_samples_per_second": 12.47,
458
+ "eval_steps_per_second": 12.47,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 2.7555555555555555,
463
+ "grad_norm": 0.022694729268550873,
464
+ "learning_rate": 7.580781044003324e-05,
465
+ "loss": 0.0068,
466
+ "step": 310
467
+ },
468
+ {
469
+ "epoch": 2.7555555555555555,
470
+ "eval_loss": 0.001871286309324205,
471
+ "eval_runtime": 8.0241,
472
+ "eval_samples_per_second": 12.463,
473
+ "eval_steps_per_second": 12.463,
474
+ "step": 310
475
+ },
476
+ {
477
+ "epoch": 2.8444444444444446,
478
+ "grad_norm": 0.11103329062461853,
479
+ "learning_rate": 6.909830056250527e-05,
480
+ "loss": 0.0018,
481
+ "step": 320
482
+ },
483
+ {
484
+ "epoch": 2.8444444444444446,
485
+ "eval_loss": 0.002184124430641532,
486
+ "eval_runtime": 8.0123,
487
+ "eval_samples_per_second": 12.481,
488
+ "eval_steps_per_second": 12.481,
489
+ "step": 320
490
+ },
491
+ {
492
+ "epoch": 2.9333333333333336,
493
+ "grad_norm": 0.02097630314528942,
494
+ "learning_rate": 6.25393406584088e-05,
495
+ "loss": 0.0026,
496
+ "step": 330
497
+ },
498
+ {
499
+ "epoch": 2.9333333333333336,
500
+ "eval_loss": 0.0034216546919196844,
501
+ "eval_runtime": 8.0164,
502
+ "eval_samples_per_second": 12.474,
503
+ "eval_steps_per_second": 12.474,
504
+ "step": 330
505
+ },
506
+ {
507
+ "epoch": 3.022222222222222,
508
+ "grad_norm": 0.0426625981926918,
509
+ "learning_rate": 5.616288532109225e-05,
510
+ "loss": 0.0017,
511
+ "step": 340
512
+ },
513
+ {
514
+ "epoch": 3.022222222222222,
515
+ "eval_loss": 0.007565508596599102,
516
+ "eval_runtime": 8.0255,
517
+ "eval_samples_per_second": 12.46,
518
+ "eval_steps_per_second": 12.46,
519
+ "step": 340
520
+ },
521
+ {
522
+ "epoch": 3.111111111111111,
523
+ "grad_norm": 0.0036587081849575043,
524
+ "learning_rate": 5.000000000000002e-05,
525
+ "loss": 0.0002,
526
+ "step": 350
527
+ },
528
+ {
529
+ "epoch": 3.111111111111111,
530
+ "eval_loss": 0.010203778743743896,
531
+ "eval_runtime": 8.017,
532
+ "eval_samples_per_second": 12.473,
533
+ "eval_steps_per_second": 12.473,
534
+ "step": 350
535
+ },
536
+ {
537
+ "epoch": 3.2,
538
+ "grad_norm": 0.008132525719702244,
539
+ "learning_rate": 4.4080709652925336e-05,
540
+ "loss": 0.0004,
541
+ "step": 360
542
+ },
543
+ {
544
+ "epoch": 3.2,
545
+ "eval_loss": 0.011154056526720524,
546
+ "eval_runtime": 8.0288,
547
+ "eval_samples_per_second": 12.455,
548
+ "eval_steps_per_second": 12.455,
549
+ "step": 360
550
+ },
551
+ {
552
+ "epoch": 3.2888888888888888,
553
+ "grad_norm": 0.00727389520034194,
554
+ "learning_rate": 3.843385246743417e-05,
555
+ "loss": 0.006,
556
+ "step": 370
557
+ },
558
+ {
559
+ "epoch": 3.2888888888888888,
560
+ "eval_loss": 0.00937813799828291,
561
+ "eval_runtime": 8.0273,
562
+ "eval_samples_per_second": 12.458,
563
+ "eval_steps_per_second": 12.458,
564
+ "step": 370
565
+ },
566
+ {
567
+ "epoch": 3.3777777777777778,
568
+ "grad_norm": 0.004803340416401625,
569
+ "learning_rate": 3.308693936411421e-05,
570
+ "loss": 0.0003,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 3.3777777777777778,
575
+ "eval_loss": 0.007455301936715841,
576
+ "eval_runtime": 8.0254,
577
+ "eval_samples_per_second": 12.46,
578
+ "eval_steps_per_second": 12.46,
579
+ "step": 380
580
+ },
581
+ {
582
+ "epoch": 3.466666666666667,
583
+ "grad_norm": 0.005293034482747316,
584
+ "learning_rate": 2.8066019966134904e-05,
585
+ "loss": 0.0003,
586
+ "step": 390
587
+ },
588
+ {
589
+ "epoch": 3.466666666666667,
590
+ "eval_loss": 0.006887929514050484,
591
+ "eval_runtime": 8.0268,
592
+ "eval_samples_per_second": 12.458,
593
+ "eval_steps_per_second": 12.458,
594
+ "step": 390
595
+ },
596
+ {
597
+ "epoch": 3.5555555555555554,
598
+ "grad_norm": 0.01216947752982378,
599
+ "learning_rate": 2.339555568810221e-05,
600
+ "loss": 0.0002,
601
+ "step": 400
602
+ },
603
+ {
604
+ "epoch": 3.5555555555555554,
605
+ "eval_loss": 0.006745634134858847,
606
+ "eval_runtime": 8.032,
607
+ "eval_samples_per_second": 12.45,
608
+ "eval_steps_per_second": 12.45,
609
+ "step": 400
610
+ },
611
+ {
612
+ "epoch": 3.6444444444444444,
613
+ "grad_norm": 0.008572138845920563,
614
+ "learning_rate": 1.9098300562505266e-05,
615
+ "loss": 0.0005,
616
+ "step": 410
617
+ },
618
+ {
619
+ "epoch": 3.6444444444444444,
620
+ "eval_loss": 0.006609635427594185,
621
+ "eval_runtime": 8.0332,
622
+ "eval_samples_per_second": 12.448,
623
+ "eval_steps_per_second": 12.448,
624
+ "step": 410
625
+ },
626
+ {
627
+ "epoch": 3.7333333333333334,
628
+ "grad_norm": 0.004443774465471506,
629
+ "learning_rate": 1.5195190384357404e-05,
630
+ "loss": 0.0003,
631
+ "step": 420
632
+ },
633
+ {
634
+ "epoch": 3.7333333333333334,
635
+ "eval_loss": 0.007157918065786362,
636
+ "eval_runtime": 8.039,
637
+ "eval_samples_per_second": 12.439,
638
+ "eval_steps_per_second": 12.439,
639
+ "step": 420
640
+ },
641
+ {
642
+ "epoch": 3.822222222222222,
643
+ "grad_norm": 0.0011405730620026588,
644
+ "learning_rate": 1.1705240714107302e-05,
645
+ "loss": 0.0037,
646
+ "step": 430
647
+ },
648
+ {
649
+ "epoch": 3.822222222222222,
650
+ "eval_loss": 0.006302958354353905,
651
+ "eval_runtime": 8.0294,
652
+ "eval_samples_per_second": 12.454,
653
+ "eval_steps_per_second": 12.454,
654
+ "step": 430
655
+ },
656
+ {
657
+ "epoch": 3.911111111111111,
658
+ "grad_norm": 0.004764024633914232,
659
+ "learning_rate": 8.645454235739903e-06,
660
+ "loss": 0.004,
661
+ "step": 440
662
+ },
663
+ {
664
+ "epoch": 3.911111111111111,
665
+ "eval_loss": 0.005341523326933384,
666
+ "eval_runtime": 8.0207,
667
+ "eval_samples_per_second": 12.468,
668
+ "eval_steps_per_second": 12.468,
669
+ "step": 440
670
+ },
671
+ {
672
+ "epoch": 4.0,
673
+ "grad_norm": 0.004020992666482925,
674
+ "learning_rate": 6.030737921409169e-06,
675
+ "loss": 0.0003,
676
+ "step": 450
677
+ },
678
+ {
679
+ "epoch": 4.0,
680
+ "eval_loss": 0.005209792871028185,
681
+ "eval_runtime": 8.031,
682
+ "eval_samples_per_second": 12.452,
683
+ "eval_steps_per_second": 12.452,
684
+ "step": 450
685
+ },
686
+ {
687
+ "epoch": 4.088888888888889,
688
+ "grad_norm": 0.013617518357932568,
689
+ "learning_rate": 3.873830406168111e-06,
690
+ "loss": 0.0002,
691
+ "step": 460
692
+ },
693
+ {
694
+ "epoch": 4.088888888888889,
695
+ "eval_loss": 0.005074501037597656,
696
+ "eval_runtime": 8.0352,
697
+ "eval_samples_per_second": 12.445,
698
+ "eval_steps_per_second": 12.445,
699
+ "step": 460
700
+ },
701
+ {
702
+ "epoch": 4.177777777777778,
703
+ "grad_norm": 0.003857893170788884,
704
+ "learning_rate": 2.1852399266194314e-06,
705
+ "loss": 0.0002,
706
+ "step": 470
707
+ },
708
+ {
709
+ "epoch": 4.177777777777778,
710
+ "eval_loss": 0.004982742480933666,
711
+ "eval_runtime": 8.0284,
712
+ "eval_samples_per_second": 12.456,
713
+ "eval_steps_per_second": 12.456,
714
+ "step": 470
715
+ },
716
+ {
717
+ "epoch": 4.266666666666667,
718
+ "grad_norm": 0.006583555601537228,
719
+ "learning_rate": 9.731931258429638e-07,
720
+ "loss": 0.0006,
721
+ "step": 480
722
+ },
723
+ {
724
+ "epoch": 4.266666666666667,
725
+ "eval_loss": 0.004874282516539097,
726
+ "eval_runtime": 8.0132,
727
+ "eval_samples_per_second": 12.479,
728
+ "eval_steps_per_second": 12.479,
729
+ "step": 480
730
+ },
731
+ {
732
+ "epoch": 4.355555555555555,
733
+ "grad_norm": 0.004742850083857775,
734
+ "learning_rate": 2.4359497401758024e-07,
735
+ "loss": 0.0005,
736
+ "step": 490
737
+ },
738
+ {
739
+ "epoch": 4.355555555555555,
740
+ "eval_loss": 0.004786411300301552,
741
+ "eval_runtime": 8.0341,
742
+ "eval_samples_per_second": 12.447,
743
+ "eval_steps_per_second": 12.447,
744
+ "step": 490
745
+ },
746
+ {
747
+ "epoch": 4.444444444444445,
748
+ "grad_norm": 0.010840805247426033,
749
+ "learning_rate": 0.0,
750
+ "loss": 0.0002,
751
+ "step": 500
752
+ },
753
+ {
754
+ "epoch": 4.444444444444445,
755
+ "eval_loss": 0.005105508491396904,
756
+ "eval_runtime": 8.0277,
757
+ "eval_samples_per_second": 12.457,
758
+ "eval_steps_per_second": 12.457,
759
+ "step": 500
760
+ },
761
+ {
762
+ "epoch": 4.444444444444445,
763
+ "step": 500,
764
+ "total_flos": 1.038706354200576e+16,
765
+ "train_loss": 0.1301481350355316,
766
+ "train_runtime": 1611.4177,
767
+ "train_samples_per_second": 2.482,
768
+ "train_steps_per_second": 0.31
769
+ }
770
+ ],
771
+ "logging_steps": 10,
772
+ "max_steps": 500,
773
+ "num_input_tokens_seen": 0,
774
+ "num_train_epochs": 5,
775
+ "save_steps": 500,
776
+ "stateful_callbacks": {
777
+ "TrainerControl": {
778
+ "args": {
779
+ "should_epoch_stop": false,
780
+ "should_evaluate": false,
781
+ "should_log": false,
782
+ "should_save": true,
783
+ "should_training_stop": true
784
+ },
785
+ "attributes": {}
786
+ }
787
+ },
788
+ "total_flos": 1.038706354200576e+16,
789
+ "train_batch_size": 1,
790
+ "trial_name": null,
791
+ "trial_params": null
792
+ }
training_eval_loss.png ADDED
training_loss.png ADDED