File size: 13,922 Bytes
97b4646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.31391014322150285,
  "eval_steps": 50,
  "global_step": 200,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.015695507161075144,
      "grad_norm": 0.04355761408805847,
      "learning_rate": 4.999451708687114e-06,
      "logits/chosen": 14.845781326293945,
      "logits/rejected": 14.576438903808594,
      "logps/chosen": -0.31864267587661743,
      "logps/rejected": -0.24545662105083466,
      "loss": 1.0492,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.47796401381492615,
      "rewards/margins": -0.10977902263402939,
      "rewards/rejected": -0.3681849539279938,
      "step": 10
    },
    {
      "epoch": 0.03139101432215029,
      "grad_norm": 0.04919258877635002,
      "learning_rate": 4.997807075247147e-06,
      "logits/chosen": 15.27595043182373,
      "logits/rejected": 14.872761726379395,
      "logps/chosen": -0.3344747722148895,
      "logps/rejected": -0.24258682131767273,
      "loss": 1.0487,
      "rewards/accuracies": 0.16249999403953552,
      "rewards/chosen": -0.5017121434211731,
      "rewards/margins": -0.1378319263458252,
      "rewards/rejected": -0.3638802468776703,
      "step": 20
    },
    {
      "epoch": 0.047086521483225424,
      "grad_norm": 0.049933061003685,
      "learning_rate": 4.9950668210706795e-06,
      "logits/chosen": 15.913165092468262,
      "logits/rejected": 15.607622146606445,
      "logps/chosen": -0.3440183997154236,
      "logps/rejected": -0.2831566333770752,
      "loss": 1.0405,
      "rewards/accuracies": 0.1875,
      "rewards/chosen": -0.516027569770813,
      "rewards/margins": -0.09129264950752258,
      "rewards/rejected": -0.4247349202632904,
      "step": 30
    },
    {
      "epoch": 0.06278202864430057,
      "grad_norm": 0.05693503096699715,
      "learning_rate": 4.9912321481237616e-06,
      "logits/chosen": 15.402900695800781,
      "logits/rejected": 14.99272632598877,
      "logps/chosen": -0.3297731578350067,
      "logps/rejected": -0.2746916711330414,
      "loss": 1.0369,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.49465981125831604,
      "rewards/margins": -0.08262218534946442,
      "rewards/rejected": -0.41203755140304565,
      "step": 40
    },
    {
      "epoch": 0.07847753580537571,
      "grad_norm": 0.05467928573489189,
      "learning_rate": 4.986304738420684e-06,
      "logits/chosen": 15.64543342590332,
      "logits/rejected": 15.632547378540039,
      "logps/chosen": -0.30952686071395874,
      "logps/rejected": -0.24847058951854706,
      "loss": 1.0367,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.46429023146629333,
      "rewards/margins": -0.09158438444137573,
      "rewards/rejected": -0.37270587682724,
      "step": 50
    },
    {
      "epoch": 0.07847753580537571,
      "eval_logits/chosen": 15.850138664245605,
      "eval_logits/rejected": 15.368529319763184,
      "eval_logps/chosen": -0.3222965598106384,
      "eval_logps/rejected": -0.26877468824386597,
      "eval_loss": 1.0326261520385742,
      "eval_rewards/accuracies": 0.26923078298568726,
      "eval_rewards/chosen": -0.4834447503089905,
      "eval_rewards/margins": -0.08028276264667511,
      "eval_rewards/rejected": -0.40316200256347656,
      "eval_runtime": 14.5044,
      "eval_samples_per_second": 28.405,
      "eval_steps_per_second": 3.585,
      "step": 50
    },
    {
      "epoch": 0.09417304296645085,
      "grad_norm": 0.06174452602863312,
      "learning_rate": 4.980286753286196e-06,
      "logits/chosen": 15.443066596984863,
      "logits/rejected": 15.192205429077148,
      "logps/chosen": -0.31090402603149414,
      "logps/rejected": -0.26281923055648804,
      "loss": 1.04,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.466356098651886,
      "rewards/margins": -0.07212716341018677,
      "rewards/rejected": -0.39422887563705444,
      "step": 60
    },
    {
      "epoch": 0.109868550127526,
      "grad_norm": 0.06952528655529022,
      "learning_rate": 4.973180832407471e-06,
      "logits/chosen": 16.024200439453125,
      "logits/rejected": 15.82934284210205,
      "logps/chosen": -0.348227322101593,
      "logps/rejected": -0.26220566034317017,
      "loss": 1.043,
      "rewards/accuracies": 0.21250000596046448,
      "rewards/chosen": -0.5223408937454224,
      "rewards/margins": -0.1290324479341507,
      "rewards/rejected": -0.39330852031707764,
      "step": 70
    },
    {
      "epoch": 0.12556405728860115,
      "grad_norm": 0.07572082430124283,
      "learning_rate": 4.964990092676263e-06,
      "logits/chosen": 15.884915351867676,
      "logits/rejected": 15.603845596313477,
      "logps/chosen": -0.34849274158477783,
      "logps/rejected": -0.26585355401039124,
      "loss": 1.0285,
      "rewards/accuracies": 0.1875,
      "rewards/chosen": -0.5227391719818115,
      "rewards/margins": -0.12395882606506348,
      "rewards/rejected": -0.39878037571907043,
      "step": 80
    },
    {
      "epoch": 0.14125956444967627,
      "grad_norm": 0.2423778474330902,
      "learning_rate": 4.9557181268217225e-06,
      "logits/chosen": 15.978216171264648,
      "logits/rejected": 15.76471996307373,
      "logps/chosen": -0.327436238527298,
      "logps/rejected": -0.25457051396369934,
      "loss": 1.03,
      "rewards/accuracies": 0.22499999403953552,
      "rewards/chosen": -0.49115434288978577,
      "rewards/margins": -0.10929858684539795,
      "rewards/rejected": -0.3818557560443878,
      "step": 90
    },
    {
      "epoch": 0.15695507161075142,
      "grad_norm": 0.1594536453485489,
      "learning_rate": 4.9453690018345144e-06,
      "logits/chosen": 16.307537078857422,
      "logits/rejected": 16.138330459594727,
      "logps/chosen": -0.3342314660549164,
      "logps/rejected": -0.27582648396492004,
      "loss": 1.0309,
      "rewards/accuracies": 0.26249998807907104,
      "rewards/chosen": -0.5013472437858582,
      "rewards/margins": -0.0876075029373169,
      "rewards/rejected": -0.41373974084854126,
      "step": 100
    },
    {
      "epoch": 0.15695507161075142,
      "eval_logits/chosen": 16.4310245513916,
      "eval_logits/rejected": 15.98912525177002,
      "eval_logps/chosen": -0.3239763677120209,
      "eval_logps/rejected": -0.28784558176994324,
      "eval_loss": 1.020836353302002,
      "eval_rewards/accuracies": 0.3076923191547394,
      "eval_rewards/chosen": -0.4859645664691925,
      "eval_rewards/margins": -0.054196178913116455,
      "eval_rewards/rejected": -0.43176835775375366,
      "eval_runtime": 14.5049,
      "eval_samples_per_second": 28.404,
      "eval_steps_per_second": 3.585,
      "step": 100
    },
    {
      "epoch": 0.17265057877182657,
      "grad_norm": 0.07431349903345108,
      "learning_rate": 4.933947257182901e-06,
      "logits/chosen": 16.56686782836914,
      "logits/rejected": 16.093189239501953,
      "logps/chosen": -0.34455060958862305,
      "logps/rejected": -0.2834388315677643,
      "loss": 1.0388,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.5168259739875793,
      "rewards/margins": -0.09166768193244934,
      "rewards/rejected": -0.4251582622528076,
      "step": 110
    },
    {
      "epoch": 0.1883460859329017,
      "grad_norm": 0.08802352845668793,
      "learning_rate": 4.921457902821578e-06,
      "logits/chosen": 16.50200843811035,
      "logits/rejected": 16.286388397216797,
      "logps/chosen": -0.30845317244529724,
      "logps/rejected": -0.2677682936191559,
      "loss": 1.0247,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.4626797139644623,
      "rewards/margins": -0.06102731078863144,
      "rewards/rejected": -0.40165242552757263,
      "step": 120
    },
    {
      "epoch": 0.20404159309397685,
      "grad_norm": 0.10464702546596527,
      "learning_rate": 4.907906416994146e-06,
      "logits/chosen": 16.163082122802734,
      "logits/rejected": 16.158031463623047,
      "logps/chosen": -0.3138599991798401,
      "logps/rejected": -0.28097471594810486,
      "loss": 1.0169,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.47078999876976013,
      "rewards/margins": -0.04932791367173195,
      "rewards/rejected": -0.4214620590209961,
      "step": 130
    },
    {
      "epoch": 0.219737100255052,
      "grad_norm": 0.16971275210380554,
      "learning_rate": 4.893298743830168e-06,
      "logits/chosen": 16.28864860534668,
      "logits/rejected": 16.151805877685547,
      "logps/chosen": -0.3283368945121765,
      "logps/rejected": -0.2850198745727539,
      "loss": 0.9964,
      "rewards/accuracies": 0.3499999940395355,
      "rewards/chosen": -0.49250537157058716,
      "rewards/margins": -0.06497551500797272,
      "rewards/rejected": -0.42752987146377563,
      "step": 140
    },
    {
      "epoch": 0.23543260741612712,
      "grad_norm": 0.18377964198589325,
      "learning_rate": 4.8776412907378845e-06,
      "logits/chosen": 16.890087127685547,
      "logits/rejected": 16.42388153076172,
      "logps/chosen": -0.33256903290748596,
      "logps/rejected": -0.2939595878124237,
      "loss": 1.0073,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": -0.49885353446006775,
      "rewards/margins": -0.057914119213819504,
      "rewards/rejected": -0.44093936681747437,
      "step": 150
    },
    {
      "epoch": 0.23543260741612712,
      "eval_logits/chosen": 16.833438873291016,
      "eval_logits/rejected": 16.328977584838867,
      "eval_logps/chosen": -0.32567569613456726,
      "eval_logps/rejected": -0.35700783133506775,
      "eval_loss": 0.9802881479263306,
      "eval_rewards/accuracies": 0.42307692766189575,
      "eval_rewards/chosen": -0.4885135293006897,
      "eval_rewards/margins": 0.04699822515249252,
      "eval_rewards/rejected": -0.5355117321014404,
      "eval_runtime": 14.5005,
      "eval_samples_per_second": 28.413,
      "eval_steps_per_second": 3.586,
      "step": 150
    },
    {
      "epoch": 0.2511281145772023,
      "grad_norm": 0.12049826234579086,
      "learning_rate": 4.860940925593703e-06,
      "logits/chosen": 16.505878448486328,
      "logits/rejected": 16.178979873657227,
      "logps/chosen": -0.3397011458873749,
      "logps/rejected": -0.35640352964401245,
      "loss": 0.9795,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.5095517039299011,
      "rewards/margins": 0.0250535998493433,
      "rewards/rejected": -0.5346053242683411,
      "step": 160
    },
    {
      "epoch": 0.2668236217382774,
      "grad_norm": 0.09485407918691635,
      "learning_rate": 4.84320497372973e-06,
      "logits/chosen": 16.245588302612305,
      "logits/rejected": 15.922958374023438,
      "logps/chosen": -0.29733315110206604,
      "logps/rejected": -0.3461209237575531,
      "loss": 0.9694,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.44599977135658264,
      "rewards/margins": 0.0731816366314888,
      "rewards/rejected": -0.5191814303398132,
      "step": 170
    },
    {
      "epoch": 0.28251912889935255,
      "grad_norm": 0.155483216047287,
      "learning_rate": 4.824441214720629e-06,
      "logits/chosen": 16.339645385742188,
      "logits/rejected": 16.115110397338867,
      "logps/chosen": -0.3076801002025604,
      "logps/rejected": -0.3655286729335785,
      "loss": 0.9488,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": -0.4615201950073242,
      "rewards/margins": 0.0867728441953659,
      "rewards/rejected": -0.5482929944992065,
      "step": 180
    },
    {
      "epoch": 0.2982146360604277,
      "grad_norm": 0.21345795691013336,
      "learning_rate": 4.804657878971252e-06,
      "logits/chosen": 16.491886138916016,
      "logits/rejected": 16.376684188842773,
      "logps/chosen": -0.32437095046043396,
      "logps/rejected": -0.39715105295181274,
      "loss": 0.9423,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.4865564703941345,
      "rewards/margins": 0.10917013883590698,
      "rewards/rejected": -0.5957266092300415,
      "step": 190
    },
    {
      "epoch": 0.31391014322150285,
      "grad_norm": 0.17633090913295746,
      "learning_rate": 4.783863644106502e-06,
      "logits/chosen": 16.6339168548584,
      "logits/rejected": 16.79404640197754,
      "logps/chosen": -0.33018192648887634,
      "logps/rejected": -0.384196400642395,
      "loss": 0.939,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.4952728748321533,
      "rewards/margins": 0.0810217633843422,
      "rewards/rejected": -0.5762946009635925,
      "step": 200
    },
    {
      "epoch": 0.31391014322150285,
      "eval_logits/chosen": 17.17803192138672,
      "eval_logits/rejected": 16.59328269958496,
      "eval_logps/chosen": -0.33600664138793945,
      "eval_logps/rejected": -0.47861453890800476,
      "eval_loss": 0.9303967356681824,
      "eval_rewards/accuracies": 0.4615384638309479,
      "eval_rewards/chosen": -0.5040098428726196,
      "eval_rewards/margins": 0.21391186118125916,
      "eval_rewards/rejected": -0.717921793460846,
      "eval_runtime": 14.5,
      "eval_samples_per_second": 28.414,
      "eval_steps_per_second": 3.586,
      "step": 200
    }
  ],
  "logging_steps": 10,
  "max_steps": 1500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 4.860426863236874e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}