SimuIation commited on
Commit
76683c3
·
verified ·
1 Parent(s): d54d208

Upload folder using huggingface_hub

Browse files
checkpoint-1080/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.2",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-1080/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.2"
6
+ }
checkpoint-1080/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d22586f32398abba71287fc9fbd27f68a106fb4da5a21da85e439f4ea68d69
3
+ size 497774208
checkpoint-1080/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8edcb57babe79aa1e063b9c16387aa7ebed4e6897e63f0b3c4ba6ad1a9a1612f
3
+ size 995642298
checkpoint-1080/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79779268804a95867176b02165dcf4317af5dbf20937682f497d224c91733bf4
3
+ size 14244
checkpoint-1080/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:143a5b3812f11a0ce92f4896d57f598a2f196c87d5b52f7101c582a4b86122c2
3
+ size 1064
checkpoint-1080/trainer_state.json ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1080,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 6.0880818367004395,
14
+ "learning_rate": 3.6e-05,
15
+ "loss": 3.2849,
16
+ "step": 36
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.010123239436619719,
21
+ "eval_bleu": 0.10518697359828132,
22
+ "eval_f1": 0.010153581748448041,
23
+ "eval_loss": 2.6219797134399414,
24
+ "eval_precision": 0.010202794685557478,
25
+ "eval_recall": 0.010123239436619719,
26
+ "eval_runtime": 6.2453,
27
+ "eval_samples_per_second": 22.737,
28
+ "eval_steps_per_second": 2.882,
29
+ "step": 36
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 6.915263652801514,
34
+ "learning_rate": 4.893203883495146e-05,
35
+ "loss": 2.2637,
36
+ "step": 72
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.01248899647887324,
41
+ "eval_bleu": 0.13792868891001447,
42
+ "eval_f1": 0.010680838430589857,
43
+ "eval_loss": 2.017970561981201,
44
+ "eval_precision": 0.009509030569956453,
45
+ "eval_recall": 0.012488996478873238,
46
+ "eval_runtime": 7.0653,
47
+ "eval_samples_per_second": 20.098,
48
+ "eval_steps_per_second": 2.548,
49
+ "step": 72
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "grad_norm": 7.544444561004639,
54
+ "learning_rate": 4.718446601941748e-05,
55
+ "loss": 1.789,
56
+ "step": 108
57
+ },
58
+ {
59
+ "epoch": 3.0,
60
+ "eval_accuracy": 0.012654049295774648,
61
+ "eval_bleu": 0.17566093139607744,
62
+ "eval_f1": 0.011466852239797607,
63
+ "eval_loss": 1.7966645956039429,
64
+ "eval_precision": 0.01049697288064746,
65
+ "eval_recall": 0.012654049295774647,
66
+ "eval_runtime": 6.0435,
67
+ "eval_samples_per_second": 23.496,
68
+ "eval_steps_per_second": 2.978,
69
+ "step": 108
70
+ },
71
+ {
72
+ "epoch": 4.0,
73
+ "grad_norm": 6.955804347991943,
74
+ "learning_rate": 4.543689320388349e-05,
75
+ "loss": 1.5522,
76
+ "step": 144
77
+ },
78
+ {
79
+ "epoch": 4.0,
80
+ "eval_accuracy": 0.012378961267605635,
81
+ "eval_bleu": 0.20413318737157596,
82
+ "eval_f1": 0.011132478900133054,
83
+ "eval_loss": 1.6657302379608154,
84
+ "eval_precision": 0.010120661403149588,
85
+ "eval_recall": 0.012378961267605633,
86
+ "eval_runtime": 6.1905,
87
+ "eval_samples_per_second": 22.938,
88
+ "eval_steps_per_second": 2.908,
89
+ "step": 144
90
+ },
91
+ {
92
+ "epoch": 5.0,
93
+ "grad_norm": 7.229798793792725,
94
+ "learning_rate": 4.368932038834951e-05,
95
+ "loss": 1.3885,
96
+ "step": 180
97
+ },
98
+ {
99
+ "epoch": 5.0,
100
+ "eval_accuracy": 0.012378961267605635,
101
+ "eval_bleu": 0.2305940937780942,
102
+ "eval_f1": 0.011073979352246164,
103
+ "eval_loss": 1.5827170610427856,
104
+ "eval_precision": 0.010022195626080541,
105
+ "eval_recall": 0.012378961267605633,
106
+ "eval_runtime": 6.9109,
107
+ "eval_samples_per_second": 20.547,
108
+ "eval_steps_per_second": 2.605,
109
+ "step": 180
110
+ },
111
+ {
112
+ "epoch": 6.0,
113
+ "grad_norm": 6.714682102203369,
114
+ "learning_rate": 4.194174757281554e-05,
115
+ "loss": 1.2561,
116
+ "step": 216
117
+ },
118
+ {
119
+ "epoch": 6.0,
120
+ "eval_accuracy": 0.012268926056338027,
121
+ "eval_bleu": 0.25012821385731565,
122
+ "eval_f1": 0.01143102719262594,
123
+ "eval_loss": 1.5294432640075684,
124
+ "eval_precision": 0.010707247710344536,
125
+ "eval_recall": 0.012268926056338027,
126
+ "eval_runtime": 6.5697,
127
+ "eval_samples_per_second": 21.614,
128
+ "eval_steps_per_second": 2.74,
129
+ "step": 216
130
+ },
131
+ {
132
+ "epoch": 7.0,
133
+ "grad_norm": 6.063908100128174,
134
+ "learning_rate": 4.019417475728156e-05,
135
+ "loss": 1.1466,
136
+ "step": 252
137
+ },
138
+ {
139
+ "epoch": 7.0,
140
+ "eval_accuracy": 0.012268926056338027,
141
+ "eval_bleu": 0.2719546447213355,
142
+ "eval_f1": 0.01116903184495446,
143
+ "eval_loss": 1.4926397800445557,
144
+ "eval_precision": 0.010253161244814542,
145
+ "eval_recall": 0.012268926056338027,
146
+ "eval_runtime": 6.4161,
147
+ "eval_samples_per_second": 22.132,
148
+ "eval_steps_per_second": 2.805,
149
+ "step": 252
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "grad_norm": 6.155008316040039,
154
+ "learning_rate": 3.844660194174757e-05,
155
+ "loss": 1.0616,
156
+ "step": 288
157
+ },
158
+ {
159
+ "epoch": 8.0,
160
+ "eval_accuracy": 0.012378961267605635,
161
+ "eval_bleu": 0.28742380325292655,
162
+ "eval_f1": 0.01144092667248244,
163
+ "eval_loss": 1.4650629758834839,
164
+ "eval_precision": 0.010640060069611993,
165
+ "eval_recall": 0.012378961267605633,
166
+ "eval_runtime": 6.3235,
167
+ "eval_samples_per_second": 22.456,
168
+ "eval_steps_per_second": 2.847,
169
+ "step": 288
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "grad_norm": 5.874176979064941,
174
+ "learning_rate": 3.6699029126213596e-05,
175
+ "loss": 0.9821,
176
+ "step": 324
177
+ },
178
+ {
179
+ "epoch": 9.0,
180
+ "eval_accuracy": 0.012268926056338027,
181
+ "eval_bleu": 0.30478814512361185,
182
+ "eval_f1": 0.011331946253740536,
183
+ "eval_loss": 1.4492626190185547,
184
+ "eval_precision": 0.010531151474565863,
185
+ "eval_recall": 0.012268926056338027,
186
+ "eval_runtime": 6.2565,
187
+ "eval_samples_per_second": 22.696,
188
+ "eval_steps_per_second": 2.877,
189
+ "step": 324
190
+ },
191
+ {
192
+ "epoch": 10.0,
193
+ "grad_norm": 5.705406665802002,
194
+ "learning_rate": 3.4951456310679615e-05,
195
+ "loss": 0.9071,
196
+ "step": 360
197
+ },
198
+ {
199
+ "epoch": 10.0,
200
+ "eval_accuracy": 0.012268926056338027,
201
+ "eval_bleu": 0.3250108739654091,
202
+ "eval_f1": 0.011214281037053235,
203
+ "eval_loss": 1.4369895458221436,
204
+ "eval_precision": 0.010327542425543311,
205
+ "eval_recall": 0.012268926056338027,
206
+ "eval_runtime": 6.1165,
207
+ "eval_samples_per_second": 23.216,
208
+ "eval_steps_per_second": 2.943,
209
+ "step": 360
210
+ },
211
+ {
212
+ "epoch": 11.0,
213
+ "grad_norm": 5.345541000366211,
214
+ "learning_rate": 3.3203883495145634e-05,
215
+ "loss": 0.8445,
216
+ "step": 396
217
+ },
218
+ {
219
+ "epoch": 11.0,
220
+ "eval_accuracy": 0.012268926056338027,
221
+ "eval_bleu": 0.32825465058506376,
222
+ "eval_f1": 0.011419929314754076,
223
+ "eval_loss": 1.4337056875228882,
224
+ "eval_precision": 0.010683679060359908,
225
+ "eval_recall": 0.012268926056338027,
226
+ "eval_runtime": 6.9862,
227
+ "eval_samples_per_second": 20.326,
228
+ "eval_steps_per_second": 2.577,
229
+ "step": 396
230
+ },
231
+ {
232
+ "epoch": 12.0,
233
+ "grad_norm": 4.928064346313477,
234
+ "learning_rate": 3.145631067961165e-05,
235
+ "loss": 0.7869,
236
+ "step": 432
237
+ },
238
+ {
239
+ "epoch": 12.0,
240
+ "eval_accuracy": 0.012268926056338027,
241
+ "eval_bleu": 0.34315751189373966,
242
+ "eval_f1": 0.011425905613358798,
243
+ "eval_loss": 1.431230902671814,
244
+ "eval_precision": 0.010692912514740159,
245
+ "eval_recall": 0.012268926056338027,
246
+ "eval_runtime": 6.3375,
247
+ "eval_samples_per_second": 22.406,
248
+ "eval_steps_per_second": 2.84,
249
+ "step": 432
250
+ },
251
+ {
252
+ "epoch": 13.0,
253
+ "grad_norm": 4.787758827209473,
254
+ "learning_rate": 2.9708737864077673e-05,
255
+ "loss": 0.7388,
256
+ "step": 468
257
+ },
258
+ {
259
+ "epoch": 13.0,
260
+ "eval_accuracy": 0.012268926056338027,
261
+ "eval_bleu": 0.3506530097105922,
262
+ "eval_f1": 0.011567695105672023,
263
+ "eval_loss": 1.432782530784607,
264
+ "eval_precision": 0.010942929955906473,
265
+ "eval_recall": 0.012268926056338027,
266
+ "eval_runtime": 6.6203,
267
+ "eval_samples_per_second": 21.449,
268
+ "eval_steps_per_second": 2.719,
269
+ "step": 468
270
+ },
271
+ {
272
+ "epoch": 14.0,
273
+ "grad_norm": 4.721966743469238,
274
+ "learning_rate": 2.7961165048543692e-05,
275
+ "loss": 0.6948,
276
+ "step": 504
277
+ },
278
+ {
279
+ "epoch": 14.0,
280
+ "eval_accuracy": 0.012268926056338027,
281
+ "eval_bleu": 0.36203576750790184,
282
+ "eval_f1": 0.011535961695115268,
283
+ "eval_loss": 1.4401381015777588,
284
+ "eval_precision": 0.010887377543860504,
285
+ "eval_recall": 0.012268926056338027,
286
+ "eval_runtime": 6.8054,
287
+ "eval_samples_per_second": 20.866,
288
+ "eval_steps_per_second": 2.645,
289
+ "step": 504
290
+ },
291
+ {
292
+ "epoch": 15.0,
293
+ "grad_norm": 3.970003128051758,
294
+ "learning_rate": 2.6213592233009708e-05,
295
+ "loss": 0.646,
296
+ "step": 540
297
+ },
298
+ {
299
+ "epoch": 15.0,
300
+ "eval_accuracy": 0.012048855633802816,
301
+ "eval_bleu": 0.37067819665088503,
302
+ "eval_f1": 0.011355838282605724,
303
+ "eval_loss": 1.4626511335372925,
304
+ "eval_precision": 0.010738384191565671,
305
+ "eval_recall": 0.012048855633802815,
306
+ "eval_runtime": 6.6898,
307
+ "eval_samples_per_second": 21.226,
308
+ "eval_steps_per_second": 2.691,
309
+ "step": 540
310
+ },
311
+ {
312
+ "epoch": 16.0,
313
+ "grad_norm": 3.967465877532959,
314
+ "learning_rate": 2.446601941747573e-05,
315
+ "loss": 0.6137,
316
+ "step": 576
317
+ },
318
+ {
319
+ "epoch": 16.0,
320
+ "eval_accuracy": 0.012048855633802816,
321
+ "eval_bleu": 0.3721186663899375,
322
+ "eval_f1": 0.01150499351512691,
323
+ "eval_loss": 1.460695505142212,
324
+ "eval_precision": 0.01100817275358665,
325
+ "eval_recall": 0.012048855633802815,
326
+ "eval_runtime": 6.1473,
327
+ "eval_samples_per_second": 23.1,
328
+ "eval_steps_per_second": 2.928,
329
+ "step": 576
330
+ },
331
+ {
332
+ "epoch": 17.0,
333
+ "grad_norm": 3.9431395530700684,
334
+ "learning_rate": 2.2718446601941746e-05,
335
+ "loss": 0.5833,
336
+ "step": 612
337
+ },
338
+ {
339
+ "epoch": 17.0,
340
+ "eval_accuracy": 0.012048855633802816,
341
+ "eval_bleu": 0.3782352067209992,
342
+ "eval_f1": 0.011355838282605724,
343
+ "eval_loss": 1.4740684032440186,
344
+ "eval_precision": 0.010738384191565671,
345
+ "eval_recall": 0.012048855633802815,
346
+ "eval_runtime": 6.2111,
347
+ "eval_samples_per_second": 22.862,
348
+ "eval_steps_per_second": 2.898,
349
+ "step": 612
350
+ },
351
+ {
352
+ "epoch": 18.0,
353
+ "grad_norm": 4.299834251403809,
354
+ "learning_rate": 2.097087378640777e-05,
355
+ "loss": 0.5523,
356
+ "step": 648
357
+ },
358
+ {
359
+ "epoch": 18.0,
360
+ "eval_accuracy": 0.012048855633802816,
361
+ "eval_bleu": 0.38158208167842117,
362
+ "eval_f1": 0.011454837538514543,
363
+ "eval_loss": 1.4804751873016357,
364
+ "eval_precision": 0.010916734980993936,
365
+ "eval_recall": 0.012048855633802815,
366
+ "eval_runtime": 6.2563,
367
+ "eval_samples_per_second": 22.697,
368
+ "eval_steps_per_second": 2.877,
369
+ "step": 648
370
+ },
371
+ {
372
+ "epoch": 19.0,
373
+ "grad_norm": 3.584228038787842,
374
+ "learning_rate": 1.9223300970873785e-05,
375
+ "loss": 0.5282,
376
+ "step": 684
377
+ },
378
+ {
379
+ "epoch": 19.0,
380
+ "eval_accuracy": 0.012268926056338027,
381
+ "eval_bleu": 0.38941442914279584,
382
+ "eval_f1": 0.011665531630013295,
383
+ "eval_loss": 1.4897931814193726,
384
+ "eval_precision": 0.011119728166708294,
385
+ "eval_recall": 0.012268926056338027,
386
+ "eval_runtime": 6.1705,
387
+ "eval_samples_per_second": 23.013,
388
+ "eval_steps_per_second": 2.917,
389
+ "step": 684
390
+ },
391
+ {
392
+ "epoch": 20.0,
393
+ "grad_norm": 3.575045108795166,
394
+ "learning_rate": 1.7475728155339808e-05,
395
+ "loss": 0.5047,
396
+ "step": 720
397
+ },
398
+ {
399
+ "epoch": 20.0,
400
+ "eval_accuracy": 0.01210387323943662,
401
+ "eval_bleu": 0.3904044704756756,
402
+ "eval_f1": 0.011583122303403293,
403
+ "eval_loss": 1.4997267723083496,
404
+ "eval_precision": 0.011105397197031464,
405
+ "eval_recall": 0.01210387323943662,
406
+ "eval_runtime": 6.3359,
407
+ "eval_samples_per_second": 22.412,
408
+ "eval_steps_per_second": 2.841,
409
+ "step": 720
410
+ },
411
+ {
412
+ "epoch": 21.0,
413
+ "grad_norm": 3.5939736366271973,
414
+ "learning_rate": 1.5728155339805823e-05,
415
+ "loss": 0.4854,
416
+ "step": 756
417
+ },
418
+ {
419
+ "epoch": 21.0,
420
+ "eval_accuracy": 0.012048855633802816,
421
+ "eval_bleu": 0.38824458290957736,
422
+ "eval_f1": 0.011442367439933718,
423
+ "eval_loss": 1.5054779052734375,
424
+ "eval_precision": 0.010894114653359857,
425
+ "eval_recall": 0.012048855633802815,
426
+ "eval_runtime": 6.2963,
427
+ "eval_samples_per_second": 22.553,
428
+ "eval_steps_per_second": 2.859,
429
+ "step": 756
430
+ },
431
+ {
432
+ "epoch": 22.0,
433
+ "grad_norm": 3.807431697845459,
434
+ "learning_rate": 1.3980582524271846e-05,
435
+ "loss": 0.4697,
436
+ "step": 792
437
+ },
438
+ {
439
+ "epoch": 22.0,
440
+ "eval_accuracy": 0.012048855633802816,
441
+ "eval_bleu": 0.3905756115063895,
442
+ "eval_f1": 0.011442367439933718,
443
+ "eval_loss": 1.5122706890106201,
444
+ "eval_precision": 0.010894114653359857,
445
+ "eval_recall": 0.012048855633802815,
446
+ "eval_runtime": 7.012,
447
+ "eval_samples_per_second": 20.251,
448
+ "eval_steps_per_second": 2.567,
449
+ "step": 792
450
+ },
451
+ {
452
+ "epoch": 23.0,
453
+ "grad_norm": 3.8207340240478516,
454
+ "learning_rate": 1.2233009708737865e-05,
455
+ "loss": 0.4547,
456
+ "step": 828
457
+ },
458
+ {
459
+ "epoch": 23.0,
460
+ "eval_accuracy": 0.012048855633802816,
461
+ "eval_bleu": 0.39316674027140547,
462
+ "eval_f1": 0.011517601862123769,
463
+ "eval_loss": 1.5183604955673218,
464
+ "eval_precision": 0.011031274351852924,
465
+ "eval_recall": 0.012048855633802815,
466
+ "eval_runtime": 6.8261,
467
+ "eval_samples_per_second": 20.803,
468
+ "eval_steps_per_second": 2.637,
469
+ "step": 828
470
+ },
471
+ {
472
+ "epoch": 24.0,
473
+ "grad_norm": 3.831852912902832,
474
+ "learning_rate": 1.0485436893203885e-05,
475
+ "loss": 0.4413,
476
+ "step": 864
477
+ },
478
+ {
479
+ "epoch": 24.0,
480
+ "eval_accuracy": 0.012048855633802816,
481
+ "eval_bleu": 0.3956605229111985,
482
+ "eval_f1": 0.011392759914288347,
483
+ "eval_loss": 1.5295231342315674,
484
+ "eval_precision": 0.010804571945629947,
485
+ "eval_recall": 0.012048855633802815,
486
+ "eval_runtime": 6.289,
487
+ "eval_samples_per_second": 22.579,
488
+ "eval_steps_per_second": 2.862,
489
+ "step": 864
490
+ },
491
+ {
492
+ "epoch": 25.0,
493
+ "grad_norm": 3.056539297103882,
494
+ "learning_rate": 8.737864077669904e-06,
495
+ "loss": 0.4325,
496
+ "step": 900
497
+ },
498
+ {
499
+ "epoch": 25.0,
500
+ "eval_accuracy": 0.012048855633802816,
501
+ "eval_bleu": 0.39496386454550636,
502
+ "eval_f1": 0.0114798602280593,
503
+ "eval_loss": 1.535691738128662,
504
+ "eval_precision": 0.010962261366716413,
505
+ "eval_recall": 0.012048855633802815,
506
+ "eval_runtime": 6.1059,
507
+ "eval_samples_per_second": 23.256,
508
+ "eval_steps_per_second": 2.948,
509
+ "step": 900
510
+ },
511
+ {
512
+ "epoch": 26.0,
513
+ "grad_norm": 3.1146154403686523,
514
+ "learning_rate": 6.990291262135923e-06,
515
+ "loss": 0.4188,
516
+ "step": 936
517
+ },
518
+ {
519
+ "epoch": 26.0,
520
+ "eval_accuracy": 0.012048855633802816,
521
+ "eval_bleu": 0.3960440421138887,
522
+ "eval_f1": 0.011517601862123769,
523
+ "eval_loss": 1.5396318435668945,
524
+ "eval_precision": 0.011031274351852924,
525
+ "eval_recall": 0.012048855633802815,
526
+ "eval_runtime": 7.2756,
527
+ "eval_samples_per_second": 19.517,
528
+ "eval_steps_per_second": 2.474,
529
+ "step": 936
530
+ },
531
+ {
532
+ "epoch": 27.0,
533
+ "grad_norm": 3.4233100414276123,
534
+ "learning_rate": 5.242718446601942e-06,
535
+ "loss": 0.4131,
536
+ "step": 972
537
+ },
538
+ {
539
+ "epoch": 27.0,
540
+ "eval_accuracy": 0.012048855633802816,
541
+ "eval_bleu": 0.4001388307186371,
542
+ "eval_f1": 0.011492413001125629,
543
+ "eval_loss": 1.5463387966156006,
544
+ "eval_precision": 0.010985168630418547,
545
+ "eval_recall": 0.012048855633802815,
546
+ "eval_runtime": 7.2564,
547
+ "eval_samples_per_second": 19.569,
548
+ "eval_steps_per_second": 2.481,
549
+ "step": 972
550
+ },
551
+ {
552
+ "epoch": 28.0,
553
+ "grad_norm": 2.70424485206604,
554
+ "learning_rate": 3.4951456310679615e-06,
555
+ "loss": 0.4089,
556
+ "step": 1008
557
+ },
558
+ {
559
+ "epoch": 28.0,
560
+ "eval_accuracy": 0.012048855633802816,
561
+ "eval_bleu": 0.39854032138633055,
562
+ "eval_f1": 0.011492413001125629,
563
+ "eval_loss": 1.544076681137085,
564
+ "eval_precision": 0.010985168630418547,
565
+ "eval_recall": 0.012048855633802815,
566
+ "eval_runtime": 7.0435,
567
+ "eval_samples_per_second": 20.16,
568
+ "eval_steps_per_second": 2.556,
569
+ "step": 1008
570
+ },
571
+ {
572
+ "epoch": 29.0,
573
+ "grad_norm": 3.052654266357422,
574
+ "learning_rate": 1.7475728155339808e-06,
575
+ "loss": 0.4026,
576
+ "step": 1044
577
+ },
578
+ {
579
+ "epoch": 29.0,
580
+ "eval_accuracy": 0.012048855633802816,
581
+ "eval_bleu": 0.3987203294842987,
582
+ "eval_f1": 0.01150499351512691,
583
+ "eval_loss": 1.5447367429733276,
584
+ "eval_precision": 0.01100817275358665,
585
+ "eval_recall": 0.012048855633802815,
586
+ "eval_runtime": 6.3248,
587
+ "eval_samples_per_second": 22.451,
588
+ "eval_steps_per_second": 2.846,
589
+ "step": 1044
590
+ },
591
+ {
592
+ "epoch": 30.0,
593
+ "grad_norm": 3.11586856842041,
594
+ "learning_rate": 0.0,
595
+ "loss": 0.3999,
596
+ "step": 1080
597
+ },
598
+ {
599
+ "epoch": 30.0,
600
+ "eval_accuracy": 0.012048855633802816,
601
+ "eval_bleu": 0.398637528263251,
602
+ "eval_f1": 0.011517601862123769,
603
+ "eval_loss": 1.5456310510635376,
604
+ "eval_precision": 0.011031274351852924,
605
+ "eval_recall": 0.012048855633802815,
606
+ "eval_runtime": 6.2921,
607
+ "eval_samples_per_second": 22.568,
608
+ "eval_steps_per_second": 2.861,
609
+ "step": 1080
610
+ }
611
+ ],
612
+ "logging_steps": 500,
613
+ "max_steps": 1080,
614
+ "num_input_tokens_seen": 0,
615
+ "num_train_epochs": 30,
616
+ "save_steps": 500,
617
+ "stateful_callbacks": {
618
+ "TrainerControl": {
619
+ "args": {
620
+ "should_epoch_stop": false,
621
+ "should_evaluate": false,
622
+ "should_log": false,
623
+ "should_save": true,
624
+ "should_training_stop": true
625
+ },
626
+ "attributes": {}
627
+ }
628
+ },
629
+ "total_flos": 1113104056320000.0,
630
+ "train_batch_size": 16,
631
+ "trial_name": null,
632
+ "trial_params": null
633
+ }
checkpoint-1080/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f557e25922091b17e468d8ac6bc55cff0ff7201c8dce20c9bd3ed8370a6bba5
3
+ size 5048
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.41.2",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.41.2"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d22586f32398abba71287fc9fbd27f68a106fb4da5a21da85e439f4ea68d69
3
+ size 497774208
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 1024,
19
+ "pad_token": null,
20
+ "tokenizer_class": "GPT2Tokenizer",
21
+ "unk_token": "<|endoftext|>"
22
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff