RyanYr commited on
Commit
1cd4cbf
1 Parent(s): c547d19

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202625ddfee205574f2d32583f80fb20548624d20b7681b4ef7932d33e5aa9a9
3
+ size 24090788996
last-checkpoint/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8935fea688a6b55e42dca1995a7fc5a48e906db5f362ac569a9303b40778ecd1
3
+ size 24090788996
last-checkpoint/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6088e294ad13b46cbb72437ed9aee133beb3f3f5b0b797d7c0451eae8985742
3
+ size 24090788996
last-checkpoint/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e14882aa9917dd9764eef6b5843693020fafff554ec0e4af12145bbe17c62b
3
+ size 24090788996
last-checkpoint/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92080d92f7039ba9a28ee0207869fee3670869dc8f88ff1c586400224153afc1
3
+ size 150693
last-checkpoint/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2adaa5dad1fb21d06dd1448a46414244004297e4538dc79e31541160fb413a29
3
+ size 150693
last-checkpoint/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e100a2fb0ad2e0b16513edb6bb5e410375b4900628822bec6b2e89cd0a4b6eab
3
+ size 150693
last-checkpoint/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f100eb6452bbb9cf518d9995cea742dd60e64f07f51aceb70c76caef63912b63
3
+ size 150693
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step300
 
1
+ global_step600
last-checkpoint/model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52167d519851f9ca18383dcf927bbf332d4ca820555c360b991e3d8ba233df5f
3
  size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce3396aca5123c08148c8fadc6a122335c6187a86421db1706f54c5c8c0a005
3
  size 4976698672
last-checkpoint/model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a0217eb2e5e83b11a4cfe8a533c2fccf7de851ab8afdb1c3e7c5c3be5103c24
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71e6c5c35f8fb76f9b2bf40c9eac5a2c7e3f90fae72101073bda94d959ba375a
3
  size 4999802720
last-checkpoint/model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d42a1488bb2337064ae7f742825a0d66fb30919db4d0219a0ab3f3925d7b2a5
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a383a8e375cb76c79594819514c55c9186dfa92ca905dc52e8fac1292cd3339
3
  size 4915916176
last-checkpoint/model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a9fa8aeecf4ffdb0a4c7091acfb23ed53525fd9f4aaa1f073244152c0ccf106
3
  size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee696721d43dee6d5e484e7bbfba75d7b14cb9a9a2cf93f09718805cfd76ce9e
3
  size 1168138808
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70cc56408014c410353d4dd58ae9b03f4be043f5f800324f66fd8e20e99b840e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef002048764051a71fb00f8f978e9ec32b780dc850bdb059af362cc56494234b
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49d1438e98cc9c53a6852464635ce62e9788e61eb3646b73e33813f487c4b6ae
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37194a6d48612e1a46a2d5d317ead97c70d9fc4569b0118fcd5f84c3dc9daa5a
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4388add9cec90932f8ff0100d27a0574d98e1bad52ff89d44e31967d2b4fbfde
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17c179483659a784aa1ace2427daff48c556a6bcc3c330e6f3274e4dc95e4b49
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a705d6dfaae4f2c1b4b2be6b25a6eb521ffae6fcba21cc1531e97b60037ed079
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b56857c9b117629f35af2c3d64f522d33a9d8aa94faa81ec6956380a895118c4
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70aea97e227d354a2e2f70f4cd68a16a7614edfe04ccb1a78d71b71f9b8e89a4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:541c18f4f88238b345b63d08a5d1b74f7e12cb362c619071564c34e0c5dbc913
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.34604570687044917,
5
  "eval_steps": 100,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2305,6 +2305,2304 @@
2305
  "eval_samples_per_second": 4.606,
2306
  "eval_steps_per_second": 1.151,
2307
  "step": 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2308
  }
2309
  ],
2310
  "logging_steps": 2,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.6920914137408983,
5
  "eval_steps": 100,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2305
  "eval_samples_per_second": 4.606,
2306
  "eval_steps_per_second": 1.151,
2307
  "step": 300
2308
+ },
2309
+ {
2310
+ "epoch": 0.3483526782495855,
2311
+ "grad_norm": 69.96196416042814,
2312
+ "learning_rate": 1.9253460095791922e-07,
2313
+ "logits/chosen": -1.5020473003387451,
2314
+ "logits/rejected": -1.4953689575195312,
2315
+ "logps/chosen": -106.53646087646484,
2316
+ "logps/rejected": -165.1669158935547,
2317
+ "loss": 0.6546,
2318
+ "rewards/accuracies": 0.59375,
2319
+ "rewards/chosen": -0.15904603898525238,
2320
+ "rewards/margins": 0.06554871797561646,
2321
+ "rewards/rejected": -0.22459478676319122,
2322
+ "step": 302
2323
+ },
2324
+ {
2325
+ "epoch": 0.3506596496287218,
2326
+ "grad_norm": 74.69729400373957,
2327
+ "learning_rate": 1.9238795325112868e-07,
2328
+ "logits/chosen": -1.636529803276062,
2329
+ "logits/rejected": -1.6348826885223389,
2330
+ "logps/chosen": -140.86441040039062,
2331
+ "logps/rejected": -174.48370361328125,
2332
+ "loss": 0.6433,
2333
+ "rewards/accuracies": 0.84375,
2334
+ "rewards/chosen": -0.12615619599819183,
2335
+ "rewards/margins": 0.20733490586280823,
2336
+ "rewards/rejected": -0.3334910571575165,
2337
+ "step": 304
2338
+ },
2339
+ {
2340
+ "epoch": 0.35296662100785814,
2341
+ "grad_norm": 84.17293540044481,
2342
+ "learning_rate": 1.9223993613199713e-07,
2343
+ "logits/chosen": -1.6913816928863525,
2344
+ "logits/rejected": -1.6646835803985596,
2345
+ "logps/chosen": -152.25997924804688,
2346
+ "logps/rejected": -171.05575561523438,
2347
+ "loss": 0.6514,
2348
+ "rewards/accuracies": 0.65625,
2349
+ "rewards/chosen": -0.11823489516973495,
2350
+ "rewards/margins": 0.18948128819465637,
2351
+ "rewards/rejected": -0.3077161908149719,
2352
+ "step": 306
2353
+ },
2354
+ {
2355
+ "epoch": 0.35527359238699446,
2356
+ "grad_norm": 83.6870493511653,
2357
+ "learning_rate": 1.9209055179449537e-07,
2358
+ "logits/chosen": -1.517793893814087,
2359
+ "logits/rejected": -1.6404225826263428,
2360
+ "logps/chosen": -91.36832427978516,
2361
+ "logps/rejected": -134.06529235839844,
2362
+ "loss": 0.6551,
2363
+ "rewards/accuracies": 0.71875,
2364
+ "rewards/chosen": -0.10601670295000076,
2365
+ "rewards/margins": 0.14076808094978333,
2366
+ "rewards/rejected": -0.24678479135036469,
2367
+ "step": 308
2368
+ },
2369
+ {
2370
+ "epoch": 0.3575805637661308,
2371
+ "grad_norm": 64.57674968550867,
2372
+ "learning_rate": 1.9193980245285966e-07,
2373
+ "logits/chosen": -1.4689788818359375,
2374
+ "logits/rejected": -1.3954423666000366,
2375
+ "logps/chosen": -143.7101287841797,
2376
+ "logps/rejected": -169.8336181640625,
2377
+ "loss": 0.6402,
2378
+ "rewards/accuracies": 0.65625,
2379
+ "rewards/chosen": -0.16834121942520142,
2380
+ "rewards/margins": 0.08874449878931046,
2381
+ "rewards/rejected": -0.25708574056625366,
2382
+ "step": 310
2383
+ },
2384
+ {
2385
+ "epoch": 0.3598875351452671,
2386
+ "grad_norm": 81.4185321584637,
2387
+ "learning_rate": 1.9178769034155887e-07,
2388
+ "logits/chosen": -1.6560229063034058,
2389
+ "logits/rejected": -1.7177590131759644,
2390
+ "logps/chosen": -144.23033142089844,
2391
+ "logps/rejected": -166.01162719726562,
2392
+ "loss": 0.6303,
2393
+ "rewards/accuracies": 0.65625,
2394
+ "rewards/chosen": -0.19495287537574768,
2395
+ "rewards/margins": 0.08614547550678253,
2396
+ "rewards/rejected": -0.281098335981369,
2397
+ "step": 312
2398
+ },
2399
+ {
2400
+ "epoch": 0.36219450652440344,
2401
+ "grad_norm": 70.47869326950462,
2402
+ "learning_rate": 1.9163421771526151e-07,
2403
+ "logits/chosen": -1.5131672620773315,
2404
+ "logits/rejected": -1.548357367515564,
2405
+ "logps/chosen": -146.3427734375,
2406
+ "logps/rejected": -159.85092163085938,
2407
+ "loss": 0.6536,
2408
+ "rewards/accuracies": 0.75,
2409
+ "rewards/chosen": -0.1731819212436676,
2410
+ "rewards/margins": 0.1254611313343048,
2411
+ "rewards/rejected": -0.29864302277565,
2412
+ "step": 314
2413
+ },
2414
+ {
2415
+ "epoch": 0.36450147790353976,
2416
+ "grad_norm": 79.69549984021036,
2417
+ "learning_rate": 1.914793868488021e-07,
2418
+ "logits/chosen": -1.512197732925415,
2419
+ "logits/rejected": -1.4396047592163086,
2420
+ "logps/chosen": -97.64339447021484,
2421
+ "logps/rejected": -117.3057632446289,
2422
+ "loss": 0.6579,
2423
+ "rewards/accuracies": 0.5625,
2424
+ "rewards/chosen": -0.1673259437084198,
2425
+ "rewards/margins": 0.045555103570222855,
2426
+ "rewards/rejected": -0.21288102865219116,
2427
+ "step": 316
2428
+ },
2429
+ {
2430
+ "epoch": 0.3668084492826761,
2431
+ "grad_norm": 82.99383875929993,
2432
+ "learning_rate": 1.9132320003714754e-07,
2433
+ "logits/chosen": -1.5376619100570679,
2434
+ "logits/rejected": -1.5551142692565918,
2435
+ "logps/chosen": -207.0707244873047,
2436
+ "logps/rejected": -242.56712341308594,
2437
+ "loss": 0.6439,
2438
+ "rewards/accuracies": 0.65625,
2439
+ "rewards/chosen": -0.24572816491127014,
2440
+ "rewards/margins": 0.16944444179534912,
2441
+ "rewards/rejected": -0.41517263650894165,
2442
+ "step": 318
2443
+ },
2444
+ {
2445
+ "epoch": 0.3691154206618124,
2446
+ "grad_norm": 78.2099765504223,
2447
+ "learning_rate": 1.9116565959536327e-07,
2448
+ "logits/chosen": -1.4779236316680908,
2449
+ "logits/rejected": -1.4861027002334595,
2450
+ "logps/chosen": -193.60748291015625,
2451
+ "logps/rejected": -232.04690551757812,
2452
+ "loss": 0.6534,
2453
+ "rewards/accuracies": 0.625,
2454
+ "rewards/chosen": -0.16232052445411682,
2455
+ "rewards/margins": 0.13388732075691223,
2456
+ "rewards/rejected": -0.29620781540870667,
2457
+ "step": 320
2458
+ },
2459
+ {
2460
+ "epoch": 0.37142239204094873,
2461
+ "grad_norm": 74.80406821040707,
2462
+ "learning_rate": 1.9100676785857857e-07,
2463
+ "logits/chosen": -1.6256941556930542,
2464
+ "logits/rejected": -1.5659886598587036,
2465
+ "logps/chosen": -170.6388702392578,
2466
+ "logps/rejected": -198.07733154296875,
2467
+ "loss": 0.6395,
2468
+ "rewards/accuracies": 0.6875,
2469
+ "rewards/chosen": -0.17732584476470947,
2470
+ "rewards/margins": 0.1462487280368805,
2471
+ "rewards/rejected": -0.32357457280158997,
2472
+ "step": 322
2473
+ },
2474
+ {
2475
+ "epoch": 0.37372936342008506,
2476
+ "grad_norm": 81.93843569632895,
2477
+ "learning_rate": 1.9084652718195236e-07,
2478
+ "logits/chosen": -1.5257925987243652,
2479
+ "logits/rejected": -1.4617056846618652,
2480
+ "logps/chosen": -208.795166015625,
2481
+ "logps/rejected": -243.7969970703125,
2482
+ "loss": 0.6648,
2483
+ "rewards/accuracies": 0.53125,
2484
+ "rewards/chosen": -0.2373400181531906,
2485
+ "rewards/margins": 0.16046729683876038,
2486
+ "rewards/rejected": -0.3978073298931122,
2487
+ "step": 324
2488
+ },
2489
+ {
2490
+ "epoch": 0.3760363347992214,
2491
+ "grad_norm": 68.63199696676665,
2492
+ "learning_rate": 1.9068493994063798e-07,
2493
+ "logits/chosen": -1.4899076223373413,
2494
+ "logits/rejected": -1.5616645812988281,
2495
+ "logps/chosen": -133.66110229492188,
2496
+ "logps/rejected": -236.15924072265625,
2497
+ "loss": 0.6245,
2498
+ "rewards/accuracies": 0.65625,
2499
+ "rewards/chosen": -0.15444569289684296,
2500
+ "rewards/margins": 0.2277567982673645,
2501
+ "rewards/rejected": -0.38220247626304626,
2502
+ "step": 326
2503
+ },
2504
+ {
2505
+ "epoch": 0.3783433061783577,
2506
+ "grad_norm": 77.96696778978115,
2507
+ "learning_rate": 1.905220085297482e-07,
2508
+ "logits/chosen": -1.5441091060638428,
2509
+ "logits/rejected": -1.6405153274536133,
2510
+ "logps/chosen": -204.56991577148438,
2511
+ "logps/rejected": -610.9658203125,
2512
+ "loss": 0.6369,
2513
+ "rewards/accuracies": 0.78125,
2514
+ "rewards/chosen": -0.25125816464424133,
2515
+ "rewards/margins": 0.27758753299713135,
2516
+ "rewards/rejected": -0.5288456678390503,
2517
+ "step": 328
2518
+ },
2519
+ {
2520
+ "epoch": 0.38065027755749403,
2521
+ "grad_norm": 70.94819657566394,
2522
+ "learning_rate": 1.9035773536431955e-07,
2523
+ "logits/chosen": -1.5916917324066162,
2524
+ "logits/rejected": -1.529220461845398,
2525
+ "logps/chosen": -137.5714111328125,
2526
+ "logps/rejected": -160.11544799804688,
2527
+ "loss": 0.628,
2528
+ "rewards/accuracies": 0.65625,
2529
+ "rewards/chosen": -0.20854628086090088,
2530
+ "rewards/margins": 0.11146115511655807,
2531
+ "rewards/rejected": -0.32000741362571716,
2532
+ "step": 330
2533
+ },
2534
+ {
2535
+ "epoch": 0.38295724893663036,
2536
+ "grad_norm": 74.31467840644032,
2537
+ "learning_rate": 1.901921228792766e-07,
2538
+ "logits/chosen": -1.5668599605560303,
2539
+ "logits/rejected": -1.6017038822174072,
2540
+ "logps/chosen": -253.0677947998047,
2541
+ "logps/rejected": -266.9024658203125,
2542
+ "loss": 0.6419,
2543
+ "rewards/accuracies": 0.59375,
2544
+ "rewards/chosen": -0.2701232433319092,
2545
+ "rewards/margins": 0.1171327605843544,
2546
+ "rewards/rejected": -0.387255996465683,
2547
+ "step": 332
2548
+ },
2549
+ {
2550
+ "epoch": 0.3852642203157667,
2551
+ "grad_norm": 80.19418315617096,
2552
+ "learning_rate": 1.9002517352939596e-07,
2553
+ "logits/chosen": -1.538657784461975,
2554
+ "logits/rejected": -1.4902359247207642,
2555
+ "logps/chosen": -151.844482421875,
2556
+ "logps/rejected": -182.43423461914062,
2557
+ "loss": 0.6542,
2558
+ "rewards/accuracies": 0.625,
2559
+ "rewards/chosen": -0.20499791204929352,
2560
+ "rewards/margins": 0.14708584547042847,
2561
+ "rewards/rejected": -0.3520837724208832,
2562
+ "step": 334
2563
+ },
2564
+ {
2565
+ "epoch": 0.38757119169490306,
2566
+ "grad_norm": 78.45881437768317,
2567
+ "learning_rate": 1.898568897892697e-07,
2568
+ "logits/chosen": -1.502273440361023,
2569
+ "logits/rejected": -1.567176342010498,
2570
+ "logps/chosen": -149.17568969726562,
2571
+ "logps/rejected": -218.93869018554688,
2572
+ "loss": 0.6324,
2573
+ "rewards/accuracies": 0.71875,
2574
+ "rewards/chosen": -0.21270516514778137,
2575
+ "rewards/margins": 0.24096481502056122,
2576
+ "rewards/rejected": -0.4536699950695038,
2577
+ "step": 336
2578
+ },
2579
+ {
2580
+ "epoch": 0.3898781630740394,
2581
+ "grad_norm": 69.72871536048268,
2582
+ "learning_rate": 1.8968727415326882e-07,
2583
+ "logits/chosen": -1.595134973526001,
2584
+ "logits/rejected": -1.6751508712768555,
2585
+ "logps/chosen": -112.13485717773438,
2586
+ "logps/rejected": -138.27838134765625,
2587
+ "loss": 0.6302,
2588
+ "rewards/accuracies": 0.6875,
2589
+ "rewards/chosen": -0.11406655609607697,
2590
+ "rewards/margins": 0.13377144932746887,
2591
+ "rewards/rejected": -0.24783800542354584,
2592
+ "step": 338
2593
+ },
2594
+ {
2595
+ "epoch": 0.3921851344531757,
2596
+ "grad_norm": 66.47735099680594,
2597
+ "learning_rate": 1.8951632913550623e-07,
2598
+ "logits/chosen": -1.6112767457962036,
2599
+ "logits/rejected": -1.5350615978240967,
2600
+ "logps/chosen": -212.4505615234375,
2601
+ "logps/rejected": -239.0753173828125,
2602
+ "loss": 0.621,
2603
+ "rewards/accuracies": 0.625,
2604
+ "rewards/chosen": -0.12918683886528015,
2605
+ "rewards/margins": 0.254965603351593,
2606
+ "rewards/rejected": -0.3841524124145508,
2607
+ "step": 340
2608
+ },
2609
+ {
2610
+ "epoch": 0.39449210583231203,
2611
+ "grad_norm": 81.17863346925296,
2612
+ "learning_rate": 1.8934405726979945e-07,
2613
+ "logits/chosen": -1.4070253372192383,
2614
+ "logits/rejected": -1.4879088401794434,
2615
+ "logps/chosen": -166.3784942626953,
2616
+ "logps/rejected": -204.57489013671875,
2617
+ "loss": 0.6395,
2618
+ "rewards/accuracies": 0.65625,
2619
+ "rewards/chosen": -0.31329959630966187,
2620
+ "rewards/margins": 0.13568538427352905,
2621
+ "rewards/rejected": -0.4489849805831909,
2622
+ "step": 342
2623
+ },
2624
+ {
2625
+ "epoch": 0.39679907721144836,
2626
+ "grad_norm": 72.25844304700202,
2627
+ "learning_rate": 1.8917046110963314e-07,
2628
+ "logits/chosen": -1.6808464527130127,
2629
+ "logits/rejected": -1.6618741750717163,
2630
+ "logps/chosen": -184.7408905029297,
2631
+ "logps/rejected": -213.8212127685547,
2632
+ "loss": 0.6414,
2633
+ "rewards/accuracies": 0.65625,
2634
+ "rewards/chosen": -0.1948232203722,
2635
+ "rewards/margins": 0.18943095207214355,
2636
+ "rewards/rejected": -0.3842541575431824,
2637
+ "step": 344
2638
+ },
2639
+ {
2640
+ "epoch": 0.3991060485905847,
2641
+ "grad_norm": 69.12287284056892,
2642
+ "learning_rate": 1.8899554322812116e-07,
2643
+ "logits/chosen": -1.677032470703125,
2644
+ "logits/rejected": -1.6319351196289062,
2645
+ "logps/chosen": -114.67143249511719,
2646
+ "logps/rejected": -125.2265625,
2647
+ "loss": 0.6256,
2648
+ "rewards/accuracies": 0.78125,
2649
+ "rewards/chosen": -0.18165619671344757,
2650
+ "rewards/margins": 0.17791113257408142,
2651
+ "rewards/rejected": -0.3595673143863678,
2652
+ "step": 346
2653
+ },
2654
+ {
2655
+ "epoch": 0.401413019969721,
2656
+ "grad_norm": 68.82861341006546,
2657
+ "learning_rate": 1.8881930621796846e-07,
2658
+ "logits/chosen": -1.531043291091919,
2659
+ "logits/rejected": -1.4552069902420044,
2660
+ "logps/chosen": -172.90670776367188,
2661
+ "logps/rejected": -228.29833984375,
2662
+ "loss": 0.6321,
2663
+ "rewards/accuracies": 0.65625,
2664
+ "rewards/chosen": -0.21518906950950623,
2665
+ "rewards/margins": 0.16281384229660034,
2666
+ "rewards/rejected": -0.37800291180610657,
2667
+ "step": 348
2668
+ },
2669
+ {
2670
+ "epoch": 0.40371999134885733,
2671
+ "grad_norm": 79.01675049183694,
2672
+ "learning_rate": 1.8864175269143273e-07,
2673
+ "logits/chosen": -1.628811001777649,
2674
+ "logits/rejected": -1.5073944330215454,
2675
+ "logps/chosen": -162.4159393310547,
2676
+ "logps/rejected": -173.65521240234375,
2677
+ "loss": 0.6361,
2678
+ "rewards/accuracies": 0.71875,
2679
+ "rewards/chosen": -0.17217856645584106,
2680
+ "rewards/margins": 0.20255069434642792,
2681
+ "rewards/rejected": -0.3747292459011078,
2682
+ "step": 350
2683
+ },
2684
+ {
2685
+ "epoch": 0.40602696272799366,
2686
+ "grad_norm": 80.14358020089544,
2687
+ "learning_rate": 1.8846288528028552e-07,
2688
+ "logits/chosen": -1.2868863344192505,
2689
+ "logits/rejected": -1.4563894271850586,
2690
+ "logps/chosen": -176.4993438720703,
2691
+ "logps/rejected": -219.99745178222656,
2692
+ "loss": 0.6388,
2693
+ "rewards/accuracies": 0.65625,
2694
+ "rewards/chosen": -0.34355729818344116,
2695
+ "rewards/margins": 0.19085751473903656,
2696
+ "rewards/rejected": -0.5344148278236389,
2697
+ "step": 352
2698
+ },
2699
+ {
2700
+ "epoch": 0.40833393410713,
2701
+ "grad_norm": 72.34750725400806,
2702
+ "learning_rate": 1.8828270663577336e-07,
2703
+ "logits/chosen": -1.5702780485153198,
2704
+ "logits/rejected": -1.6198755502700806,
2705
+ "logps/chosen": -135.76097106933594,
2706
+ "logps/rejected": -133.5688018798828,
2707
+ "loss": 0.6593,
2708
+ "rewards/accuracies": 0.625,
2709
+ "rewards/chosen": -0.28700345754623413,
2710
+ "rewards/margins": 0.014538988471031189,
2711
+ "rewards/rejected": -0.3015424311161041,
2712
+ "step": 354
2713
+ },
2714
+ {
2715
+ "epoch": 0.4106409054862663,
2716
+ "grad_norm": 71.70524840332104,
2717
+ "learning_rate": 1.8810121942857845e-07,
2718
+ "logits/chosen": -1.5310659408569336,
2719
+ "logits/rejected": -1.547040343284607,
2720
+ "logps/chosen": -137.63137817382812,
2721
+ "logps/rejected": -175.15028381347656,
2722
+ "loss": 0.6293,
2723
+ "rewards/accuracies": 0.75,
2724
+ "rewards/chosen": -0.1476406753063202,
2725
+ "rewards/margins": 0.20084424316883087,
2726
+ "rewards/rejected": -0.34848493337631226,
2727
+ "step": 356
2728
+ },
2729
+ {
2730
+ "epoch": 0.41294787686540263,
2731
+ "grad_norm": 77.60677795627835,
2732
+ "learning_rate": 1.8791842634877896e-07,
2733
+ "logits/chosen": -1.546626091003418,
2734
+ "logits/rejected": -1.6076010465621948,
2735
+ "logps/chosen": -136.61058044433594,
2736
+ "logps/rejected": -187.11056518554688,
2737
+ "loss": 0.6506,
2738
+ "rewards/accuracies": 0.625,
2739
+ "rewards/chosen": -0.2092825025320053,
2740
+ "rewards/margins": 0.11802927404642105,
2741
+ "rewards/rejected": -0.32731181383132935,
2742
+ "step": 358
2743
+ },
2744
+ {
2745
+ "epoch": 0.41525484824453895,
2746
+ "grad_norm": 76.22986147865214,
2747
+ "learning_rate": 1.8773433010580933e-07,
2748
+ "logits/chosen": -1.5016052722930908,
2749
+ "logits/rejected": -1.6018908023834229,
2750
+ "logps/chosen": -129.33348083496094,
2751
+ "logps/rejected": -151.12342834472656,
2752
+ "loss": 0.627,
2753
+ "rewards/accuracies": 0.5625,
2754
+ "rewards/chosen": -0.1853492707014084,
2755
+ "rewards/margins": 0.10909079760313034,
2756
+ "rewards/rejected": -0.2944400906562805,
2757
+ "step": 360
2758
+ },
2759
+ {
2760
+ "epoch": 0.4175618196236753,
2761
+ "grad_norm": 71.86807271397895,
2762
+ "learning_rate": 1.8754893342842e-07,
2763
+ "logits/chosen": -1.5751183032989502,
2764
+ "logits/rejected": -1.4908232688903809,
2765
+ "logps/chosen": -187.5486602783203,
2766
+ "logps/rejected": -194.04296875,
2767
+ "loss": 0.6223,
2768
+ "rewards/accuracies": 0.71875,
2769
+ "rewards/chosen": -0.27427998185157776,
2770
+ "rewards/margins": 0.1835474967956543,
2771
+ "rewards/rejected": -0.45782750844955444,
2772
+ "step": 362
2773
+ },
2774
+ {
2775
+ "epoch": 0.4198687910028116,
2776
+ "grad_norm": 70.36519300815779,
2777
+ "learning_rate": 1.8736223906463695e-07,
2778
+ "logits/chosen": -1.6419646739959717,
2779
+ "logits/rejected": -1.6212923526763916,
2780
+ "logps/chosen": -165.32421875,
2781
+ "logps/rejected": -171.27830505371094,
2782
+ "loss": 0.6154,
2783
+ "rewards/accuracies": 0.6875,
2784
+ "rewards/chosen": -0.21126417815685272,
2785
+ "rewards/margins": 0.188466876745224,
2786
+ "rewards/rejected": -0.3997310400009155,
2787
+ "step": 364
2788
+ },
2789
+ {
2790
+ "epoch": 0.4221757623819479,
2791
+ "grad_norm": 70.09468918933095,
2792
+ "learning_rate": 1.8717424978172102e-07,
2793
+ "logits/chosen": -1.3921918869018555,
2794
+ "logits/rejected": -1.469792127609253,
2795
+ "logps/chosen": -167.81964111328125,
2796
+ "logps/rejected": -210.77825927734375,
2797
+ "loss": 0.6308,
2798
+ "rewards/accuracies": 0.8125,
2799
+ "rewards/chosen": -0.2520577609539032,
2800
+ "rewards/margins": 0.21120049059391022,
2801
+ "rewards/rejected": -0.463258296251297,
2802
+ "step": 366
2803
+ },
2804
+ {
2805
+ "epoch": 0.42448273376108425,
2806
+ "grad_norm": 83.57733506311956,
2807
+ "learning_rate": 1.8698496836612691e-07,
2808
+ "logits/chosen": -1.494173288345337,
2809
+ "logits/rejected": -1.5522290468215942,
2810
+ "logps/chosen": -163.31491088867188,
2811
+ "logps/rejected": -189.11239624023438,
2812
+ "loss": 0.6605,
2813
+ "rewards/accuracies": 0.71875,
2814
+ "rewards/chosen": -0.2657621204853058,
2815
+ "rewards/margins": 0.16207075119018555,
2816
+ "rewards/rejected": -0.42783284187316895,
2817
+ "step": 368
2818
+ },
2819
+ {
2820
+ "epoch": 0.4267897051402206,
2821
+ "grad_norm": 81.29498139829452,
2822
+ "learning_rate": 1.8679439762346184e-07,
2823
+ "logits/chosen": -1.5649724006652832,
2824
+ "logits/rejected": -1.6319153308868408,
2825
+ "logps/chosen": -208.2643585205078,
2826
+ "logps/rejected": -215.9363555908203,
2827
+ "loss": 0.6724,
2828
+ "rewards/accuracies": 0.71875,
2829
+ "rewards/chosen": -0.27036455273628235,
2830
+ "rewards/margins": 0.1651400327682495,
2831
+ "rewards/rejected": -0.43550461530685425,
2832
+ "step": 370
2833
+ },
2834
+ {
2835
+ "epoch": 0.42909667651935696,
2836
+ "grad_norm": 76.18451864107462,
2837
+ "learning_rate": 1.8660254037844388e-07,
2838
+ "logits/chosen": -1.4427084922790527,
2839
+ "logits/rejected": -1.5188959836959839,
2840
+ "logps/chosen": -171.85968017578125,
2841
+ "logps/rejected": -233.1151580810547,
2842
+ "loss": 0.629,
2843
+ "rewards/accuracies": 0.71875,
2844
+ "rewards/chosen": -0.27071186900138855,
2845
+ "rewards/margins": 0.2559873163700104,
2846
+ "rewards/rejected": -0.5266991853713989,
2847
+ "step": 372
2848
+ },
2849
+ {
2850
+ "epoch": 0.4314036478984933,
2851
+ "grad_norm": 82.63010621157098,
2852
+ "learning_rate": 1.8640939947486023e-07,
2853
+ "logits/chosen": -1.5887802839279175,
2854
+ "logits/rejected": -1.355837106704712,
2855
+ "logps/chosen": -242.5066375732422,
2856
+ "logps/rejected": -230.2034912109375,
2857
+ "loss": 0.6329,
2858
+ "rewards/accuracies": 0.59375,
2859
+ "rewards/chosen": -0.3870730698108673,
2860
+ "rewards/margins": 0.15506887435913086,
2861
+ "rewards/rejected": -0.5421419143676758,
2862
+ "step": 374
2863
+ },
2864
+ {
2865
+ "epoch": 0.4337106192776296,
2866
+ "grad_norm": 59.14499379914714,
2867
+ "learning_rate": 1.8621497777552505e-07,
2868
+ "logits/chosen": -1.420657992362976,
2869
+ "logits/rejected": -1.4776450395584106,
2870
+ "logps/chosen": -127.46673583984375,
2871
+ "logps/rejected": -184.2600860595703,
2872
+ "loss": 0.5869,
2873
+ "rewards/accuracies": 0.875,
2874
+ "rewards/chosen": -0.15772147476673126,
2875
+ "rewards/margins": 0.3883221745491028,
2876
+ "rewards/rejected": -0.5460436344146729,
2877
+ "step": 376
2878
+ },
2879
+ {
2880
+ "epoch": 0.43601759065676593,
2881
+ "grad_norm": 76.51933767322383,
2882
+ "learning_rate": 1.8601927816223695e-07,
2883
+ "logits/chosen": -1.3575465679168701,
2884
+ "logits/rejected": -1.3156774044036865,
2885
+ "logps/chosen": -218.0836944580078,
2886
+ "logps/rejected": -228.03778076171875,
2887
+ "loss": 0.6557,
2888
+ "rewards/accuracies": 0.6875,
2889
+ "rewards/chosen": -0.4051874279975891,
2890
+ "rewards/margins": 0.143568217754364,
2891
+ "rewards/rejected": -0.5487555861473083,
2892
+ "step": 378
2893
+ },
2894
+ {
2895
+ "epoch": 0.43832456203590225,
2896
+ "grad_norm": 61.424133205634206,
2897
+ "learning_rate": 1.8582230353573624e-07,
2898
+ "logits/chosen": -1.4618622064590454,
2899
+ "logits/rejected": -1.4945478439331055,
2900
+ "logps/chosen": -95.66145324707031,
2901
+ "logps/rejected": -135.7235870361328,
2902
+ "loss": 0.6206,
2903
+ "rewards/accuracies": 0.75,
2904
+ "rewards/chosen": -0.1784054934978485,
2905
+ "rewards/margins": 0.23733605444431305,
2906
+ "rewards/rejected": -0.415741503238678,
2907
+ "step": 380
2908
+ },
2909
+ {
2910
+ "epoch": 0.4406315334150386,
2911
+ "grad_norm": 64.92661329207279,
2912
+ "learning_rate": 1.8562405681566214e-07,
2913
+ "logits/chosen": -1.5636019706726074,
2914
+ "logits/rejected": -1.5756021738052368,
2915
+ "logps/chosen": -201.42442321777344,
2916
+ "logps/rejected": -188.35606384277344,
2917
+ "loss": 0.6289,
2918
+ "rewards/accuracies": 0.625,
2919
+ "rewards/chosen": -0.3109050691127777,
2920
+ "rewards/margins": 0.10487519204616547,
2921
+ "rewards/rejected": -0.415780246257782,
2922
+ "step": 382
2923
+ },
2924
+ {
2925
+ "epoch": 0.4429385047941749,
2926
+ "grad_norm": 83.39366061705226,
2927
+ "learning_rate": 1.854245409405092e-07,
2928
+ "logits/chosen": -1.6649830341339111,
2929
+ "logits/rejected": -1.5097665786743164,
2930
+ "logps/chosen": -217.35536193847656,
2931
+ "logps/rejected": -223.5187225341797,
2932
+ "loss": 0.6113,
2933
+ "rewards/accuracies": 0.6875,
2934
+ "rewards/chosen": -0.2543387711048126,
2935
+ "rewards/margins": 0.2463696151971817,
2936
+ "rewards/rejected": -0.5007083415985107,
2937
+ "step": 384
2938
+ },
2939
+ {
2940
+ "epoch": 0.4452454761733112,
2941
+ "grad_norm": 74.49558456416251,
2942
+ "learning_rate": 1.852237588675841e-07,
2943
+ "logits/chosen": -1.582183599472046,
2944
+ "logits/rejected": -1.7068113088607788,
2945
+ "logps/chosen": -162.75521850585938,
2946
+ "logps/rejected": -220.6885986328125,
2947
+ "loss": 0.5992,
2948
+ "rewards/accuracies": 0.75,
2949
+ "rewards/chosen": -0.21387754380702972,
2950
+ "rewards/margins": 0.31847310066223145,
2951
+ "rewards/rejected": -0.5323505997657776,
2952
+ "step": 386
2953
+ },
2954
+ {
2955
+ "epoch": 0.44755244755244755,
2956
+ "grad_norm": 72.0795411450381,
2957
+ "learning_rate": 1.850217135729614e-07,
2958
+ "logits/chosen": -1.605985164642334,
2959
+ "logits/rejected": -1.5858122110366821,
2960
+ "logps/chosen": -196.78073120117188,
2961
+ "logps/rejected": -213.26580810546875,
2962
+ "loss": 0.6034,
2963
+ "rewards/accuracies": 0.625,
2964
+ "rewards/chosen": -0.44325143098831177,
2965
+ "rewards/margins": 0.07666480541229248,
2966
+ "rewards/rejected": -0.5199161767959595,
2967
+ "step": 388
2968
+ },
2969
+ {
2970
+ "epoch": 0.4498594189315839,
2971
+ "grad_norm": 72.48651390442274,
2972
+ "learning_rate": 1.8481840805143987e-07,
2973
+ "logits/chosen": -1.5632058382034302,
2974
+ "logits/rejected": -1.5244344472885132,
2975
+ "logps/chosen": -127.80747985839844,
2976
+ "logps/rejected": -152.81256103515625,
2977
+ "loss": 0.6163,
2978
+ "rewards/accuracies": 0.875,
2979
+ "rewards/chosen": -0.1298586130142212,
2980
+ "rewards/margins": 0.42240971326828003,
2981
+ "rewards/rejected": -0.5522683262825012,
2982
+ "step": 390
2983
+ },
2984
+ {
2985
+ "epoch": 0.4521663903107202,
2986
+ "grad_norm": 74.34299341635638,
2987
+ "learning_rate": 1.8461384531649773e-07,
2988
+ "logits/chosen": -1.4820444583892822,
2989
+ "logits/rejected": -1.605046033859253,
2990
+ "logps/chosen": -105.68638610839844,
2991
+ "logps/rejected": -156.26785278320312,
2992
+ "loss": 0.6202,
2993
+ "rewards/accuracies": 0.75,
2994
+ "rewards/chosen": -0.1893598437309265,
2995
+ "rewards/margins": 0.2589360773563385,
2996
+ "rewards/rejected": -0.4482958912849426,
2997
+ "step": 392
2998
+ },
2999
+ {
3000
+ "epoch": 0.4544733616898565,
3001
+ "grad_norm": 76.36773452235572,
3002
+ "learning_rate": 1.844080284002482e-07,
3003
+ "logits/chosen": -1.5065568685531616,
3004
+ "logits/rejected": -1.5656404495239258,
3005
+ "logps/chosen": -158.7242889404297,
3006
+ "logps/rejected": -228.84844970703125,
3007
+ "loss": 0.6139,
3008
+ "rewards/accuracies": 0.75,
3009
+ "rewards/chosen": -0.251006543636322,
3010
+ "rewards/margins": 0.21102304756641388,
3011
+ "rewards/rejected": -0.46202951669692993,
3012
+ "step": 394
3013
+ },
3014
+ {
3015
+ "epoch": 0.45678033306899285,
3016
+ "grad_norm": 71.03674812873284,
3017
+ "learning_rate": 1.8420096035339452e-07,
3018
+ "logits/chosen": -1.5289005041122437,
3019
+ "logits/rejected": -1.527197003364563,
3020
+ "logps/chosen": -200.40029907226562,
3021
+ "logps/rejected": -212.3697967529297,
3022
+ "loss": 0.6187,
3023
+ "rewards/accuracies": 0.625,
3024
+ "rewards/chosen": -0.2883009612560272,
3025
+ "rewards/margins": 0.30317747592926025,
3026
+ "rewards/rejected": -0.5914784073829651,
3027
+ "step": 396
3028
+ },
3029
+ {
3030
+ "epoch": 0.4590873044481292,
3031
+ "grad_norm": 81.19707296013529,
3032
+ "learning_rate": 1.8399264424518465e-07,
3033
+ "logits/chosen": -1.494114875793457,
3034
+ "logits/rejected": -1.4553757905960083,
3035
+ "logps/chosen": -173.10043334960938,
3036
+ "logps/rejected": -222.2396240234375,
3037
+ "loss": 0.5955,
3038
+ "rewards/accuracies": 0.59375,
3039
+ "rewards/chosen": -0.3878926932811737,
3040
+ "rewards/margins": 0.3027462959289551,
3041
+ "rewards/rejected": -0.6906389594078064,
3042
+ "step": 398
3043
+ },
3044
+ {
3045
+ "epoch": 0.4613942758272655,
3046
+ "grad_norm": 89.13135103863338,
3047
+ "learning_rate": 1.8378308316336582e-07,
3048
+ "logits/chosen": -1.618680715560913,
3049
+ "logits/rejected": -1.5578938722610474,
3050
+ "logps/chosen": -191.10128784179688,
3051
+ "logps/rejected": -280.5110778808594,
3052
+ "loss": 0.6411,
3053
+ "rewards/accuracies": 0.625,
3054
+ "rewards/chosen": -0.4683380126953125,
3055
+ "rewards/margins": 0.19769813120365143,
3056
+ "rewards/rejected": -0.6660361289978027,
3057
+ "step": 400
3058
+ },
3059
+ {
3060
+ "epoch": 0.4613942758272655,
3061
+ "eval_logits/chosen": -1.4853571653366089,
3062
+ "eval_logits/rejected": -1.3932629823684692,
3063
+ "eval_logps/chosen": -189.0384521484375,
3064
+ "eval_logps/rejected": -156.24160766601562,
3065
+ "eval_loss": 0.654194176197052,
3066
+ "eval_rewards/accuracies": 0.6800000071525574,
3067
+ "eval_rewards/chosen": -0.38562828302383423,
3068
+ "eval_rewards/margins": 0.18699264526367188,
3069
+ "eval_rewards/rejected": -0.5726209282875061,
3070
+ "eval_runtime": 26.5299,
3071
+ "eval_samples_per_second": 3.769,
3072
+ "eval_steps_per_second": 0.942,
3073
+ "step": 400
3074
+ },
3075
+ {
3076
+ "epoch": 0.4637012472064018,
3077
+ "grad_norm": 69.21606890792003,
3078
+ "learning_rate": 1.8357228021413883e-07,
3079
+ "logits/chosen": -1.5431230068206787,
3080
+ "logits/rejected": -1.7365866899490356,
3081
+ "logps/chosen": -147.3966827392578,
3082
+ "logps/rejected": -170.9712371826172,
3083
+ "loss": 0.6581,
3084
+ "rewards/accuracies": 0.59375,
3085
+ "rewards/chosen": -0.30663323402404785,
3086
+ "rewards/margins": 0.11269617080688477,
3087
+ "rewards/rejected": -0.4193294048309326,
3088
+ "step": 402
3089
+ },
3090
+ {
3091
+ "epoch": 0.46600821858553815,
3092
+ "grad_norm": 78.7990153253576,
3093
+ "learning_rate": 1.8336023852211194e-07,
3094
+ "logits/chosen": -1.5721492767333984,
3095
+ "logits/rejected": -1.4822769165039062,
3096
+ "logps/chosen": -148.9419403076172,
3097
+ "logps/rejected": -158.44668579101562,
3098
+ "loss": 0.609,
3099
+ "rewards/accuracies": 0.75,
3100
+ "rewards/chosen": -0.27455994486808777,
3101
+ "rewards/margins": 0.3990754187107086,
3102
+ "rewards/rejected": -0.6736353039741516,
3103
+ "step": 404
3104
+ },
3105
+ {
3106
+ "epoch": 0.4683151899646745,
3107
+ "grad_norm": 67.81492283153628,
3108
+ "learning_rate": 1.8314696123025453e-07,
3109
+ "logits/chosen": -1.6370363235473633,
3110
+ "logits/rejected": -1.5174671411514282,
3111
+ "logps/chosen": -145.17050170898438,
3112
+ "logps/rejected": -142.74551391601562,
3113
+ "loss": 0.6312,
3114
+ "rewards/accuracies": 0.71875,
3115
+ "rewards/chosen": -0.28109437227249146,
3116
+ "rewards/margins": 0.2069387137889862,
3117
+ "rewards/rejected": -0.48803308606147766,
3118
+ "step": 406
3119
+ },
3120
+ {
3121
+ "epoch": 0.47062216134381085,
3122
+ "grad_norm": 78.2843593072173,
3123
+ "learning_rate": 1.8293245149985053e-07,
3124
+ "logits/chosen": -1.5488444566726685,
3125
+ "logits/rejected": -1.4798938035964966,
3126
+ "logps/chosen": -161.83570861816406,
3127
+ "logps/rejected": -162.7615509033203,
3128
+ "loss": 0.6484,
3129
+ "rewards/accuracies": 0.65625,
3130
+ "rewards/chosen": -0.2718978822231293,
3131
+ "rewards/margins": 0.15639187395572662,
3132
+ "rewards/rejected": -0.4282897710800171,
3133
+ "step": 408
3134
+ },
3135
+ {
3136
+ "epoch": 0.4729291327229472,
3137
+ "grad_norm": 73.10449012391845,
3138
+ "learning_rate": 1.827167125104517e-07,
3139
+ "logits/chosen": -1.4978845119476318,
3140
+ "logits/rejected": -1.4839560985565186,
3141
+ "logps/chosen": -148.445556640625,
3142
+ "logps/rejected": -161.85986328125,
3143
+ "loss": 0.6481,
3144
+ "rewards/accuracies": 0.59375,
3145
+ "rewards/chosen": -0.27761712670326233,
3146
+ "rewards/margins": 0.09577606618404388,
3147
+ "rewards/rejected": -0.3733932077884674,
3148
+ "step": 410
3149
+ },
3150
+ {
3151
+ "epoch": 0.4752361041020835,
3152
+ "grad_norm": 77.23312704566136,
3153
+ "learning_rate": 1.8249974745983021e-07,
3154
+ "logits/chosen": -1.4896149635314941,
3155
+ "logits/rejected": -1.4279950857162476,
3156
+ "logps/chosen": -136.3888397216797,
3157
+ "logps/rejected": -184.14625549316406,
3158
+ "loss": 0.6186,
3159
+ "rewards/accuracies": 0.75,
3160
+ "rewards/chosen": -0.3546374440193176,
3161
+ "rewards/margins": 0.3140718638896942,
3162
+ "rewards/rejected": -0.6687093377113342,
3163
+ "step": 412
3164
+ },
3165
+ {
3166
+ "epoch": 0.4775430754812198,
3167
+ "grad_norm": 65.58481770102698,
3168
+ "learning_rate": 1.822815595639316e-07,
3169
+ "logits/chosen": -1.4790016412734985,
3170
+ "logits/rejected": -1.525940179824829,
3171
+ "logps/chosen": -162.99288940429688,
3172
+ "logps/rejected": -190.2974853515625,
3173
+ "loss": 0.6112,
3174
+ "rewards/accuracies": 0.6875,
3175
+ "rewards/chosen": -0.36069726943969727,
3176
+ "rewards/margins": 0.20576652884483337,
3177
+ "rewards/rejected": -0.5664637684822083,
3178
+ "step": 414
3179
+ },
3180
+ {
3181
+ "epoch": 0.47985004686035615,
3182
+ "grad_norm": 68.7972400850831,
3183
+ "learning_rate": 1.820621520568268e-07,
3184
+ "logits/chosen": -1.5574984550476074,
3185
+ "logits/rejected": -1.4820420742034912,
3186
+ "logps/chosen": -178.15878295898438,
3187
+ "logps/rejected": -191.66177368164062,
3188
+ "loss": 0.6,
3189
+ "rewards/accuracies": 0.75,
3190
+ "rewards/chosen": -0.314214825630188,
3191
+ "rewards/margins": 0.32970941066741943,
3192
+ "rewards/rejected": -0.6439242362976074,
3193
+ "step": 416
3194
+ },
3195
+ {
3196
+ "epoch": 0.4821570182394925,
3197
+ "grad_norm": 77.22458475405976,
3198
+ "learning_rate": 1.8184152819066434e-07,
3199
+ "logits/chosen": -1.5454033613204956,
3200
+ "logits/rejected": -1.5681257247924805,
3201
+ "logps/chosen": -206.4539031982422,
3202
+ "logps/rejected": -221.17599487304688,
3203
+ "loss": 0.6395,
3204
+ "rewards/accuracies": 0.5625,
3205
+ "rewards/chosen": -0.4876091778278351,
3206
+ "rewards/margins": 0.06031504273414612,
3207
+ "rewards/rejected": -0.5479242205619812,
3208
+ "step": 418
3209
+ },
3210
+ {
3211
+ "epoch": 0.4844639896186288,
3212
+ "grad_norm": 69.59230881656185,
3213
+ "learning_rate": 1.8161969123562217e-07,
3214
+ "logits/chosen": -1.54752516746521,
3215
+ "logits/rejected": -1.5821384191513062,
3216
+ "logps/chosen": -182.0235137939453,
3217
+ "logps/rejected": -163.29364013671875,
3218
+ "loss": 0.6107,
3219
+ "rewards/accuracies": 0.75,
3220
+ "rewards/chosen": -0.3480142056941986,
3221
+ "rewards/margins": 0.3120378255844116,
3222
+ "rewards/rejected": -0.6600520610809326,
3223
+ "step": 420
3224
+ },
3225
+ {
3226
+ "epoch": 0.4867709609977651,
3227
+ "grad_norm": 68.29468448816121,
3228
+ "learning_rate": 1.813966444798591e-07,
3229
+ "logits/chosen": -1.513810634613037,
3230
+ "logits/rejected": -1.4666978120803833,
3231
+ "logps/chosen": -204.99462890625,
3232
+ "logps/rejected": -204.5595245361328,
3233
+ "loss": 0.6143,
3234
+ "rewards/accuracies": 0.75,
3235
+ "rewards/chosen": -0.3375055491924286,
3236
+ "rewards/margins": 0.3794183135032654,
3237
+ "rewards/rejected": -0.7169238328933716,
3238
+ "step": 422
3239
+ },
3240
+ {
3241
+ "epoch": 0.48907793237690145,
3242
+ "grad_norm": 73.69015362328696,
3243
+ "learning_rate": 1.8117239122946611e-07,
3244
+ "logits/chosen": -1.3477180004119873,
3245
+ "logits/rejected": -1.4509586095809937,
3246
+ "logps/chosen": -118.67777252197266,
3247
+ "logps/rejected": -176.48667907714844,
3248
+ "loss": 0.6192,
3249
+ "rewards/accuracies": 0.625,
3250
+ "rewards/chosen": -0.3034321069717407,
3251
+ "rewards/margins": 0.12479298561811447,
3252
+ "rewards/rejected": -0.4282251298427582,
3253
+ "step": 424
3254
+ },
3255
+ {
3256
+ "epoch": 0.49138490375603777,
3257
+ "grad_norm": 78.31541581493791,
3258
+ "learning_rate": 1.809469348084174e-07,
3259
+ "logits/chosen": -1.459653377532959,
3260
+ "logits/rejected": -1.5776402950286865,
3261
+ "logps/chosen": -159.45347595214844,
3262
+ "logps/rejected": -189.2720489501953,
3263
+ "loss": 0.6554,
3264
+ "rewards/accuracies": 0.65625,
3265
+ "rewards/chosen": -0.37468722462654114,
3266
+ "rewards/margins": 0.1383470892906189,
3267
+ "rewards/rejected": -0.5130342841148376,
3268
+ "step": 426
3269
+ },
3270
+ {
3271
+ "epoch": 0.4936918751351741,
3272
+ "grad_norm": 130.5379676824635,
3273
+ "learning_rate": 1.8072027855852095e-07,
3274
+ "logits/chosen": -1.4528967142105103,
3275
+ "logits/rejected": -1.423844814300537,
3276
+ "logps/chosen": -172.85316467285156,
3277
+ "logps/rejected": -215.22189331054688,
3278
+ "loss": 0.6639,
3279
+ "rewards/accuracies": 0.71875,
3280
+ "rewards/chosen": -0.41784724593162537,
3281
+ "rewards/margins": 0.3192124366760254,
3282
+ "rewards/rejected": -0.7370596528053284,
3283
+ "step": 428
3284
+ },
3285
+ {
3286
+ "epoch": 0.4959988465143104,
3287
+ "grad_norm": 63.21984381769687,
3288
+ "learning_rate": 1.8049242583936918e-07,
3289
+ "logits/chosen": -1.5084190368652344,
3290
+ "logits/rejected": -1.4574109315872192,
3291
+ "logps/chosen": -165.896484375,
3292
+ "logps/rejected": -227.423828125,
3293
+ "loss": 0.5893,
3294
+ "rewards/accuracies": 0.8125,
3295
+ "rewards/chosen": -0.25652381777763367,
3296
+ "rewards/margins": 0.47441697120666504,
3297
+ "rewards/rejected": -0.7309407591819763,
3298
+ "step": 430
3299
+ },
3300
+ {
3301
+ "epoch": 0.49830581789344675,
3302
+ "grad_norm": 71.69590925642426,
3303
+ "learning_rate": 1.802633800282891e-07,
3304
+ "logits/chosen": -1.516315221786499,
3305
+ "logits/rejected": -1.6526371240615845,
3306
+ "logps/chosen": -229.77777099609375,
3307
+ "logps/rejected": -292.7660827636719,
3308
+ "loss": 0.5979,
3309
+ "rewards/accuracies": 0.78125,
3310
+ "rewards/chosen": -0.3780279755592346,
3311
+ "rewards/margins": 0.49888893961906433,
3312
+ "rewards/rejected": -0.8769169449806213,
3313
+ "step": 432
3314
+ },
3315
+ {
3316
+ "epoch": 0.5006127892725831,
3317
+ "grad_norm": 72.54608833334152,
3318
+ "learning_rate": 1.8003314452029213e-07,
3319
+ "logits/chosen": -1.5792149305343628,
3320
+ "logits/rejected": -1.550574779510498,
3321
+ "logps/chosen": -226.616455078125,
3322
+ "logps/rejected": -228.4210205078125,
3323
+ "loss": 0.6046,
3324
+ "rewards/accuracies": 0.6875,
3325
+ "rewards/chosen": -0.5066580176353455,
3326
+ "rewards/margins": 0.34013134241104126,
3327
+ "rewards/rejected": -0.8467893600463867,
3328
+ "step": 434
3329
+ },
3330
+ {
3331
+ "epoch": 0.5029197606517194,
3332
+ "grad_norm": 73.04169645370872,
3333
+ "learning_rate": 1.7980172272802395e-07,
3334
+ "logits/chosen": -1.5109785795211792,
3335
+ "logits/rejected": -1.499125361442566,
3336
+ "logps/chosen": -154.92233276367188,
3337
+ "logps/rejected": -175.07643127441406,
3338
+ "loss": 0.5817,
3339
+ "rewards/accuracies": 0.8125,
3340
+ "rewards/chosen": -0.25602594017982483,
3341
+ "rewards/margins": 0.5013114809989929,
3342
+ "rewards/rejected": -0.7573373913764954,
3343
+ "step": 436
3344
+ },
3345
+ {
3346
+ "epoch": 0.5052267320308558,
3347
+ "grad_norm": 69.05059334922119,
3348
+ "learning_rate": 1.7956911808171373e-07,
3349
+ "logits/chosen": -1.561600923538208,
3350
+ "logits/rejected": -1.5301151275634766,
3351
+ "logps/chosen": -217.26930236816406,
3352
+ "logps/rejected": -240.7093048095703,
3353
+ "loss": 0.6151,
3354
+ "rewards/accuracies": 0.59375,
3355
+ "rewards/chosen": -0.46973368525505066,
3356
+ "rewards/margins": 0.2093038558959961,
3357
+ "rewards/rejected": -0.6790375113487244,
3358
+ "step": 438
3359
+ },
3360
+ {
3361
+ "epoch": 0.507533703409992,
3362
+ "grad_norm": 74.68873536524164,
3363
+ "learning_rate": 1.793353340291235e-07,
3364
+ "logits/chosen": -1.3198765516281128,
3365
+ "logits/rejected": -1.4805912971496582,
3366
+ "logps/chosen": -175.9479217529297,
3367
+ "logps/rejected": -226.83265686035156,
3368
+ "loss": 0.6134,
3369
+ "rewards/accuracies": 0.6875,
3370
+ "rewards/chosen": -0.5549490451812744,
3371
+ "rewards/margins": 0.23202911019325256,
3372
+ "rewards/rejected": -0.7869781851768494,
3373
+ "step": 440
3374
+ },
3375
+ {
3376
+ "epoch": 0.5098406747891284,
3377
+ "grad_norm": 73.37532376774183,
3378
+ "learning_rate": 1.7910037403549692e-07,
3379
+ "logits/chosen": -1.4717934131622314,
3380
+ "logits/rejected": -1.5461549758911133,
3381
+ "logps/chosen": -159.91883850097656,
3382
+ "logps/rejected": -204.87376403808594,
3383
+ "loss": 0.6459,
3384
+ "rewards/accuracies": 0.5625,
3385
+ "rewards/chosen": -0.4326345920562744,
3386
+ "rewards/margins": 0.22945694625377655,
3387
+ "rewards/rejected": -0.6620914936065674,
3388
+ "step": 442
3389
+ },
3390
+ {
3391
+ "epoch": 0.5121476461682647,
3392
+ "grad_norm": 69.28741446430803,
3393
+ "learning_rate": 1.7886424158350782e-07,
3394
+ "logits/chosen": -1.5604138374328613,
3395
+ "logits/rejected": -1.663907766342163,
3396
+ "logps/chosen": -158.54408264160156,
3397
+ "logps/rejected": -192.7698516845703,
3398
+ "loss": 0.5921,
3399
+ "rewards/accuracies": 0.78125,
3400
+ "rewards/chosen": -0.3090921640396118,
3401
+ "rewards/margins": 0.3891502916812897,
3402
+ "rewards/rejected": -0.6982424855232239,
3403
+ "step": 444
3404
+ },
3405
+ {
3406
+ "epoch": 0.5144546175474011,
3407
+ "grad_norm": 77.66154968693108,
3408
+ "learning_rate": 1.7862694017320886e-07,
3409
+ "logits/chosen": -1.3435657024383545,
3410
+ "logits/rejected": -1.3843066692352295,
3411
+ "logps/chosen": -174.62672424316406,
3412
+ "logps/rejected": -288.0128173828125,
3413
+ "loss": 0.6145,
3414
+ "rewards/accuracies": 0.6875,
3415
+ "rewards/chosen": -0.4857187271118164,
3416
+ "rewards/margins": 0.4818662703037262,
3417
+ "rewards/rejected": -0.9675850868225098,
3418
+ "step": 446
3419
+ },
3420
+ {
3421
+ "epoch": 0.5167615889265373,
3422
+ "grad_norm": 86.0701716220196,
3423
+ "learning_rate": 1.7838847332197937e-07,
3424
+ "logits/chosen": -1.4369436502456665,
3425
+ "logits/rejected": -1.5111709833145142,
3426
+ "logps/chosen": -193.0187225341797,
3427
+ "logps/rejected": -258.660400390625,
3428
+ "loss": 0.6179,
3429
+ "rewards/accuracies": 0.71875,
3430
+ "rewards/chosen": -0.4460400640964508,
3431
+ "rewards/margins": 0.4027029871940613,
3432
+ "rewards/rejected": -0.8487430810928345,
3433
+ "step": 448
3434
+ },
3435
+ {
3436
+ "epoch": 0.5190685603056737,
3437
+ "grad_norm": 84.40844346826594,
3438
+ "learning_rate": 1.7814884456447335e-07,
3439
+ "logits/chosen": -1.5306761264801025,
3440
+ "logits/rejected": -1.4944154024124146,
3441
+ "logps/chosen": -195.49612426757812,
3442
+ "logps/rejected": -222.01425170898438,
3443
+ "loss": 0.6006,
3444
+ "rewards/accuracies": 0.78125,
3445
+ "rewards/chosen": -0.2904947102069855,
3446
+ "rewards/margins": 0.5166550874710083,
3447
+ "rewards/rejected": -0.8071498870849609,
3448
+ "step": 450
3449
+ },
3450
+ {
3451
+ "epoch": 0.52137553168481,
3452
+ "grad_norm": 86.3712126774886,
3453
+ "learning_rate": 1.7790805745256703e-07,
3454
+ "logits/chosen": -1.3275847434997559,
3455
+ "logits/rejected": -1.38175630569458,
3456
+ "logps/chosen": -136.90707397460938,
3457
+ "logps/rejected": -184.36331176757812,
3458
+ "loss": 0.6767,
3459
+ "rewards/accuracies": 0.625,
3460
+ "rewards/chosen": -0.44699156284332275,
3461
+ "rewards/margins": 0.12617343664169312,
3462
+ "rewards/rejected": -0.5731649398803711,
3463
+ "step": 452
3464
+ },
3465
+ {
3466
+ "epoch": 0.5236825030639464,
3467
+ "grad_norm": 66.61833278109548,
3468
+ "learning_rate": 1.7766611555530635e-07,
3469
+ "logits/chosen": -1.6141921281814575,
3470
+ "logits/rejected": -1.5151243209838867,
3471
+ "logps/chosen": -156.77407836914062,
3472
+ "logps/rejected": -154.7230682373047,
3473
+ "loss": 0.5733,
3474
+ "rewards/accuracies": 0.6875,
3475
+ "rewards/chosen": -0.3759933114051819,
3476
+ "rewards/margins": 0.17464786767959595,
3477
+ "rewards/rejected": -0.5506411790847778,
3478
+ "step": 454
3479
+ },
3480
+ {
3481
+ "epoch": 0.5259894744430826,
3482
+ "grad_norm": 69.26758309677136,
3483
+ "learning_rate": 1.774230224588538e-07,
3484
+ "logits/chosen": -1.3204282522201538,
3485
+ "logits/rejected": -1.4286822080612183,
3486
+ "logps/chosen": -152.52542114257812,
3487
+ "logps/rejected": -232.16189575195312,
3488
+ "loss": 0.5494,
3489
+ "rewards/accuracies": 0.78125,
3490
+ "rewards/chosen": -0.4260653853416443,
3491
+ "rewards/margins": 0.5102941989898682,
3492
+ "rewards/rejected": -0.9363595247268677,
3493
+ "step": 456
3494
+ },
3495
+ {
3496
+ "epoch": 0.528296445822219,
3497
+ "grad_norm": 81.07739462727531,
3498
+ "learning_rate": 1.771787817664356e-07,
3499
+ "logits/chosen": -1.508811116218567,
3500
+ "logits/rejected": -1.5395921468734741,
3501
+ "logps/chosen": -134.4735565185547,
3502
+ "logps/rejected": -166.41592407226562,
3503
+ "loss": 0.6351,
3504
+ "rewards/accuracies": 0.5,
3505
+ "rewards/chosen": -0.49481019377708435,
3506
+ "rewards/margins": 0.1262877732515335,
3507
+ "rewards/rejected": -0.6210979223251343,
3508
+ "step": 458
3509
+ },
3510
+ {
3511
+ "epoch": 0.5306034172013554,
3512
+ "grad_norm": 86.01343093557993,
3513
+ "learning_rate": 1.769333970982879e-07,
3514
+ "logits/chosen": -1.518664836883545,
3515
+ "logits/rejected": -1.3482635021209717,
3516
+ "logps/chosen": -173.78538513183594,
3517
+ "logps/rejected": -160.53573608398438,
3518
+ "loss": 0.5857,
3519
+ "rewards/accuracies": 0.625,
3520
+ "rewards/chosen": -0.49463319778442383,
3521
+ "rewards/margins": 0.202806293964386,
3522
+ "rewards/rejected": -0.6974395513534546,
3523
+ "step": 460
3524
+ },
3525
+ {
3526
+ "epoch": 0.5329103885804917,
3527
+ "grad_norm": 85.16027410016599,
3528
+ "learning_rate": 1.766868720916035e-07,
3529
+ "logits/chosen": -1.359481930732727,
3530
+ "logits/rejected": -1.3029265403747559,
3531
+ "logps/chosen": -134.05616760253906,
3532
+ "logps/rejected": -134.0654754638672,
3533
+ "loss": 0.6487,
3534
+ "rewards/accuracies": 0.625,
3535
+ "rewards/chosen": -0.4239296019077301,
3536
+ "rewards/margins": 0.03123108297586441,
3537
+ "rewards/rejected": -0.4551607072353363,
3538
+ "step": 462
3539
+ },
3540
+ {
3541
+ "epoch": 0.535217359959628,
3542
+ "grad_norm": 84.5629811685175,
3543
+ "learning_rate": 1.7643921040047766e-07,
3544
+ "logits/chosen": -1.6018937826156616,
3545
+ "logits/rejected": -1.6816954612731934,
3546
+ "logps/chosen": -237.3992919921875,
3547
+ "logps/rejected": -253.08688354492188,
3548
+ "loss": 0.597,
3549
+ "rewards/accuracies": 0.59375,
3550
+ "rewards/chosen": -0.6288573741912842,
3551
+ "rewards/margins": 0.15610165894031525,
3552
+ "rewards/rejected": -0.7849590182304382,
3553
+ "step": 464
3554
+ },
3555
+ {
3556
+ "epoch": 0.5375243313387643,
3557
+ "grad_norm": 80.72916842158041,
3558
+ "learning_rate": 1.7619041569585418e-07,
3559
+ "logits/chosen": -1.4444328546524048,
3560
+ "logits/rejected": -1.4673030376434326,
3561
+ "logps/chosen": -170.2801971435547,
3562
+ "logps/rejected": -214.7718963623047,
3563
+ "loss": 0.6181,
3564
+ "rewards/accuracies": 0.75,
3565
+ "rewards/chosen": -0.564181923866272,
3566
+ "rewards/margins": 0.2302751988172531,
3567
+ "rewards/rejected": -0.7944571375846863,
3568
+ "step": 466
3569
+ },
3570
+ {
3571
+ "epoch": 0.5398313027179007,
3572
+ "grad_norm": 76.00828750498393,
3573
+ "learning_rate": 1.759404916654707e-07,
3574
+ "logits/chosen": -1.4668854475021362,
3575
+ "logits/rejected": -1.421462059020996,
3576
+ "logps/chosen": -360.7674560546875,
3577
+ "logps/rejected": -301.1515197753906,
3578
+ "loss": 0.6139,
3579
+ "rewards/accuracies": 0.75,
3580
+ "rewards/chosen": -0.6432144641876221,
3581
+ "rewards/margins": 0.3255874514579773,
3582
+ "rewards/rejected": -0.9688019156455994,
3583
+ "step": 468
3584
+ },
3585
+ {
3586
+ "epoch": 0.542138274097037,
3587
+ "grad_norm": 75.00038820917719,
3588
+ "learning_rate": 1.756894420138043e-07,
3589
+ "logits/chosen": -1.5766559839248657,
3590
+ "logits/rejected": -1.656800627708435,
3591
+ "logps/chosen": -216.8627471923828,
3592
+ "logps/rejected": -270.90850830078125,
3593
+ "loss": 0.615,
3594
+ "rewards/accuracies": 0.78125,
3595
+ "rewards/chosen": -0.4949862062931061,
3596
+ "rewards/margins": 0.4039486050605774,
3597
+ "rewards/rejected": -0.8989347815513611,
3598
+ "step": 470
3599
+ },
3600
+ {
3601
+ "epoch": 0.5444452454761733,
3602
+ "grad_norm": 86.17675092820859,
3603
+ "learning_rate": 1.754372704620164e-07,
3604
+ "logits/chosen": -1.4618090391159058,
3605
+ "logits/rejected": -1.5533053874969482,
3606
+ "logps/chosen": -202.59561157226562,
3607
+ "logps/rejected": -221.70413208007812,
3608
+ "loss": 0.6478,
3609
+ "rewards/accuracies": 0.75,
3610
+ "rewards/chosen": -0.44822004437446594,
3611
+ "rewards/margins": 0.28794264793395996,
3612
+ "rewards/rejected": -0.7361626625061035,
3613
+ "step": 472
3614
+ },
3615
+ {
3616
+ "epoch": 0.5467522168553096,
3617
+ "grad_norm": 72.36150215283246,
3618
+ "learning_rate": 1.7518398074789774e-07,
3619
+ "logits/chosen": -1.4804517030715942,
3620
+ "logits/rejected": -1.5212501287460327,
3621
+ "logps/chosen": -195.58935546875,
3622
+ "logps/rejected": -247.99276733398438,
3623
+ "loss": 0.553,
3624
+ "rewards/accuracies": 0.8125,
3625
+ "rewards/chosen": -0.44707149267196655,
3626
+ "rewards/margins": 0.6286894679069519,
3627
+ "rewards/rejected": -1.0757609605789185,
3628
+ "step": 474
3629
+ },
3630
+ {
3631
+ "epoch": 0.549059188234446,
3632
+ "grad_norm": 73.94947964279808,
3633
+ "learning_rate": 1.7492957662581294e-07,
3634
+ "logits/chosen": -1.3577089309692383,
3635
+ "logits/rejected": -1.4486963748931885,
3636
+ "logps/chosen": -133.3319091796875,
3637
+ "logps/rejected": -188.2812957763672,
3638
+ "loss": 0.6001,
3639
+ "rewards/accuracies": 0.65625,
3640
+ "rewards/chosen": -0.34889039397239685,
3641
+ "rewards/margins": 0.3021068871021271,
3642
+ "rewards/rejected": -0.6509972214698792,
3643
+ "step": 476
3644
+ },
3645
+ {
3646
+ "epoch": 0.5513661596135823,
3647
+ "grad_norm": 74.0047644626624,
3648
+ "learning_rate": 1.7467406186664473e-07,
3649
+ "logits/chosen": -1.5747010707855225,
3650
+ "logits/rejected": -1.5058567523956299,
3651
+ "logps/chosen": -216.6630401611328,
3652
+ "logps/rejected": -223.66598510742188,
3653
+ "loss": 0.6345,
3654
+ "rewards/accuracies": 0.6875,
3655
+ "rewards/chosen": -0.5371094346046448,
3656
+ "rewards/margins": 0.3996596932411194,
3657
+ "rewards/rejected": -0.9367691874504089,
3658
+ "step": 478
3659
+ },
3660
+ {
3661
+ "epoch": 0.5536731309927186,
3662
+ "grad_norm": 50.915761396824145,
3663
+ "learning_rate": 1.7441744025773834e-07,
3664
+ "logits/chosen": -1.4014126062393188,
3665
+ "logits/rejected": -1.569306492805481,
3666
+ "logps/chosen": -156.43629455566406,
3667
+ "logps/rejected": -228.84625244140625,
3668
+ "loss": 0.5975,
3669
+ "rewards/accuracies": 0.5625,
3670
+ "rewards/chosen": -0.399608850479126,
3671
+ "rewards/margins": 0.29513585567474365,
3672
+ "rewards/rejected": -0.6947447061538696,
3673
+ "step": 480
3674
+ },
3675
+ {
3676
+ "epoch": 0.5559801023718549,
3677
+ "grad_norm": 80.40246802194461,
3678
+ "learning_rate": 1.74159715602845e-07,
3679
+ "logits/chosen": -1.49760103225708,
3680
+ "logits/rejected": -1.4302232265472412,
3681
+ "logps/chosen": -152.4906005859375,
3682
+ "logps/rejected": -165.43942260742188,
3683
+ "loss": 0.6511,
3684
+ "rewards/accuracies": 0.5625,
3685
+ "rewards/chosen": -0.4252295196056366,
3686
+ "rewards/margins": 0.12136977910995483,
3687
+ "rewards/rejected": -0.5465993285179138,
3688
+ "step": 482
3689
+ },
3690
+ {
3691
+ "epoch": 0.5582870737509913,
3692
+ "grad_norm": 70.56990492477674,
3693
+ "learning_rate": 1.739008917220659e-07,
3694
+ "logits/chosen": -1.4919289350509644,
3695
+ "logits/rejected": -1.5267033576965332,
3696
+ "logps/chosen": -187.85191345214844,
3697
+ "logps/rejected": -220.8524169921875,
3698
+ "loss": 0.5689,
3699
+ "rewards/accuracies": 0.78125,
3700
+ "rewards/chosen": -0.5196070671081543,
3701
+ "rewards/margins": 0.3590528666973114,
3702
+ "rewards/rejected": -0.8786599636077881,
3703
+ "step": 484
3704
+ },
3705
+ {
3706
+ "epoch": 0.5605940451301276,
3707
+ "grad_norm": 78.98020718967784,
3708
+ "learning_rate": 1.7364097245179527e-07,
3709
+ "logits/chosen": -1.599880337715149,
3710
+ "logits/rejected": -1.5224246978759766,
3711
+ "logps/chosen": -196.72555541992188,
3712
+ "logps/rejected": -213.14309692382812,
3713
+ "loss": 0.5892,
3714
+ "rewards/accuracies": 0.71875,
3715
+ "rewards/chosen": -0.5108906030654907,
3716
+ "rewards/margins": 0.1902090609073639,
3717
+ "rewards/rejected": -0.701099693775177,
3718
+ "step": 486
3719
+ },
3720
+ {
3721
+ "epoch": 0.5629010165092639,
3722
+ "grad_norm": 75.35371757401214,
3723
+ "learning_rate": 1.733799616446637e-07,
3724
+ "logits/chosen": -1.4978597164154053,
3725
+ "logits/rejected": -1.5102261304855347,
3726
+ "logps/chosen": -186.15167236328125,
3727
+ "logps/rejected": -226.00375366210938,
3728
+ "loss": 0.6112,
3729
+ "rewards/accuracies": 0.75,
3730
+ "rewards/chosen": -0.43081170320510864,
3731
+ "rewards/margins": 0.36774906516075134,
3732
+ "rewards/rejected": -0.7985607385635376,
3733
+ "step": 488
3734
+ },
3735
+ {
3736
+ "epoch": 0.5652079878884002,
3737
+ "grad_norm": 75.43303696622675,
3738
+ "learning_rate": 1.7311786316948108e-07,
3739
+ "logits/chosen": -1.418121337890625,
3740
+ "logits/rejected": -1.4920923709869385,
3741
+ "logps/chosen": -179.17889404296875,
3742
+ "logps/rejected": -229.40098571777344,
3743
+ "loss": 0.5938,
3744
+ "rewards/accuracies": 0.6875,
3745
+ "rewards/chosen": -0.6558996438980103,
3746
+ "rewards/margins": 0.27045130729675293,
3747
+ "rewards/rejected": -0.9263509511947632,
3748
+ "step": 490
3749
+ },
3750
+ {
3751
+ "epoch": 0.5675149592675366,
3752
+ "grad_norm": 71.0686050492484,
3753
+ "learning_rate": 1.7285468091117904e-07,
3754
+ "logits/chosen": -1.4989047050476074,
3755
+ "logits/rejected": -1.4156945943832397,
3756
+ "logps/chosen": -153.10214233398438,
3757
+ "logps/rejected": -172.13262939453125,
3758
+ "loss": 0.5901,
3759
+ "rewards/accuracies": 0.625,
3760
+ "rewards/chosen": -0.4824844300746918,
3761
+ "rewards/margins": 0.44079095125198364,
3762
+ "rewards/rejected": -0.9232754707336426,
3763
+ "step": 492
3764
+ },
3765
+ {
3766
+ "epoch": 0.569821930646673,
3767
+ "grad_norm": 67.99918941849218,
3768
+ "learning_rate": 1.7259041877075352e-07,
3769
+ "logits/chosen": -1.430630087852478,
3770
+ "logits/rejected": -1.3989218473434448,
3771
+ "logps/chosen": -209.73452758789062,
3772
+ "logps/rejected": -254.0313720703125,
3773
+ "loss": 0.5729,
3774
+ "rewards/accuracies": 0.6875,
3775
+ "rewards/chosen": -0.5363369584083557,
3776
+ "rewards/margins": 0.5890082120895386,
3777
+ "rewards/rejected": -1.125345230102539,
3778
+ "step": 494
3779
+ },
3780
+ {
3781
+ "epoch": 0.5721289020258092,
3782
+ "grad_norm": 78.40754956054191,
3783
+ "learning_rate": 1.7232508066520698e-07,
3784
+ "logits/chosen": -1.5510261058807373,
3785
+ "logits/rejected": -1.5487847328186035,
3786
+ "logps/chosen": -211.16983032226562,
3787
+ "logps/rejected": -240.33824157714844,
3788
+ "loss": 0.5772,
3789
+ "rewards/accuracies": 0.6875,
3790
+ "rewards/chosen": -0.4531714916229248,
3791
+ "rewards/margins": 0.2688879370689392,
3792
+ "rewards/rejected": -0.7220594882965088,
3793
+ "step": 496
3794
+ },
3795
+ {
3796
+ "epoch": 0.5744358734049456,
3797
+ "grad_norm": 61.990430466819326,
3798
+ "learning_rate": 1.7205867052749023e-07,
3799
+ "logits/chosen": -1.363396167755127,
3800
+ "logits/rejected": -1.3964465856552124,
3801
+ "logps/chosen": -147.12242126464844,
3802
+ "logps/rejected": -180.23667907714844,
3803
+ "loss": 0.6459,
3804
+ "rewards/accuracies": 0.71875,
3805
+ "rewards/chosen": -0.5750865340232849,
3806
+ "rewards/margins": 0.11157172918319702,
3807
+ "rewards/rejected": -0.6866582632064819,
3808
+ "step": 498
3809
+ },
3810
+ {
3811
+ "epoch": 0.5767428447840819,
3812
+ "grad_norm": 76.0573953537264,
3813
+ "learning_rate": 1.717911923064442e-07,
3814
+ "logits/chosen": -1.5747530460357666,
3815
+ "logits/rejected": -1.4509817361831665,
3816
+ "logps/chosen": -181.61216735839844,
3817
+ "logps/rejected": -153.97573852539062,
3818
+ "loss": 0.6012,
3819
+ "rewards/accuracies": 0.59375,
3820
+ "rewards/chosen": -0.5273740887641907,
3821
+ "rewards/margins": 0.1454104781150818,
3822
+ "rewards/rejected": -0.6727845668792725,
3823
+ "step": 500
3824
+ },
3825
+ {
3826
+ "epoch": 0.5767428447840819,
3827
+ "eval_logits/chosen": -1.440444827079773,
3828
+ "eval_logits/rejected": -1.3533989191055298,
3829
+ "eval_logps/chosen": -191.4648895263672,
3830
+ "eval_logps/rejected": -158.6099395751953,
3831
+ "eval_loss": 0.636239767074585,
3832
+ "eval_rewards/accuracies": 0.6800000071525574,
3833
+ "eval_rewards/chosen": -0.628268837928772,
3834
+ "eval_rewards/margins": 0.18118661642074585,
3835
+ "eval_rewards/rejected": -0.809455394744873,
3836
+ "eval_runtime": 37.9799,
3837
+ "eval_samples_per_second": 2.633,
3838
+ "eval_steps_per_second": 0.658,
3839
+ "step": 500
3840
+ },
3841
+ {
3842
+ "epoch": 0.5790498161632183,
3843
+ "grad_norm": 77.78220283215643,
3844
+ "learning_rate": 1.7152264996674135e-07,
3845
+ "logits/chosen": -1.4428610801696777,
3846
+ "logits/rejected": -1.2872042655944824,
3847
+ "logps/chosen": -184.39501953125,
3848
+ "logps/rejected": -238.38723754882812,
3849
+ "loss": 0.5953,
3850
+ "rewards/accuracies": 0.75,
3851
+ "rewards/chosen": -0.6475786566734314,
3852
+ "rewards/margins": 0.2779845893383026,
3853
+ "rewards/rejected": -0.9255632758140564,
3854
+ "step": 502
3855
+ },
3856
+ {
3857
+ "epoch": 0.5813567875423545,
3858
+ "grad_norm": 93.29916680291039,
3859
+ "learning_rate": 1.71253047488827e-07,
3860
+ "logits/chosen": -1.4898688793182373,
3861
+ "logits/rejected": -1.5620332956314087,
3862
+ "logps/chosen": -178.47802734375,
3863
+ "logps/rejected": -205.5224609375,
3864
+ "loss": 0.6703,
3865
+ "rewards/accuracies": 0.625,
3866
+ "rewards/chosen": -0.5680350065231323,
3867
+ "rewards/margins": 0.18766377866268158,
3868
+ "rewards/rejected": -0.7556988000869751,
3869
+ "step": 504
3870
+ },
3871
+ {
3872
+ "epoch": 0.5836637589214909,
3873
+ "grad_norm": 77.19105499219319,
3874
+ "learning_rate": 1.7098238886886024e-07,
3875
+ "logits/chosen": -1.4835506677627563,
3876
+ "logits/rejected": -1.5302045345306396,
3877
+ "logps/chosen": -203.8736114501953,
3878
+ "logps/rejected": -228.69265747070312,
3879
+ "loss": 0.5951,
3880
+ "rewards/accuracies": 0.53125,
3881
+ "rewards/chosen": -0.47867119312286377,
3882
+ "rewards/margins": 0.22942683100700378,
3883
+ "rewards/rejected": -0.7080979943275452,
3884
+ "step": 506
3885
+ },
3886
+ {
3887
+ "epoch": 0.5859707303006272,
3888
+ "grad_norm": 67.4261860354,
3889
+ "learning_rate": 1.7071067811865473e-07,
3890
+ "logits/chosen": -1.4649958610534668,
3891
+ "logits/rejected": -1.4145183563232422,
3892
+ "logps/chosen": -199.42066955566406,
3893
+ "logps/rejected": -235.40292358398438,
3894
+ "loss": 0.5368,
3895
+ "rewards/accuracies": 0.78125,
3896
+ "rewards/chosen": -0.4195340573787689,
3897
+ "rewards/margins": 0.551209032535553,
3898
+ "rewards/rejected": -0.9707430601119995,
3899
+ "step": 508
3900
+ },
3901
+ {
3902
+ "epoch": 0.5882777016797636,
3903
+ "grad_norm": 87.85240065033273,
3904
+ "learning_rate": 1.7043791926561932e-07,
3905
+ "logits/chosen": -1.5964919328689575,
3906
+ "logits/rejected": -1.561856746673584,
3907
+ "logps/chosen": -201.67276000976562,
3908
+ "logps/rejected": -234.04359436035156,
3909
+ "loss": 0.651,
3910
+ "rewards/accuracies": 0.65625,
3911
+ "rewards/chosen": -0.6162290573120117,
3912
+ "rewards/margins": 0.4439167082309723,
3913
+ "rewards/rejected": -1.0601458549499512,
3914
+ "step": 510
3915
+ },
3916
+ {
3917
+ "epoch": 0.5905846730588998,
3918
+ "grad_norm": 62.42968300457303,
3919
+ "learning_rate": 1.7016411635269815e-07,
3920
+ "logits/chosen": -1.4615092277526855,
3921
+ "logits/rejected": -1.4488492012023926,
3922
+ "logps/chosen": -151.2560577392578,
3923
+ "logps/rejected": -176.4474334716797,
3924
+ "loss": 0.609,
3925
+ "rewards/accuracies": 0.75,
3926
+ "rewards/chosen": -0.33995571732521057,
3927
+ "rewards/margins": 0.2483442723751068,
3928
+ "rewards/rejected": -0.5882999897003174,
3929
+ "step": 512
3930
+ },
3931
+ {
3932
+ "epoch": 0.5928916444380362,
3933
+ "grad_norm": 74.39629379240114,
3934
+ "learning_rate": 1.6988927343831091e-07,
3935
+ "logits/chosen": -1.5747379064559937,
3936
+ "logits/rejected": -1.4773468971252441,
3937
+ "logps/chosen": -198.891845703125,
3938
+ "logps/rejected": -210.0729522705078,
3939
+ "loss": 0.61,
3940
+ "rewards/accuracies": 0.78125,
3941
+ "rewards/chosen": -0.47531554102897644,
3942
+ "rewards/margins": 0.47791624069213867,
3943
+ "rewards/rejected": -0.9532317519187927,
3944
+ "step": 514
3945
+ },
3946
+ {
3947
+ "epoch": 0.5951986158171725,
3948
+ "grad_norm": 70.19350216590036,
3949
+ "learning_rate": 1.6961339459629266e-07,
3950
+ "logits/chosen": -1.4481630325317383,
3951
+ "logits/rejected": -1.4714566469192505,
3952
+ "logps/chosen": -190.8370361328125,
3953
+ "logps/rejected": -242.71621704101562,
3954
+ "loss": 0.5872,
3955
+ "rewards/accuracies": 0.78125,
3956
+ "rewards/chosen": -0.5172877907752991,
3957
+ "rewards/margins": 0.48140281438827515,
3958
+ "rewards/rejected": -0.998690664768219,
3959
+ "step": 516
3960
+ },
3961
+ {
3962
+ "epoch": 0.5975055871963089,
3963
+ "grad_norm": 73.75535823993799,
3964
+ "learning_rate": 1.6933648391583328e-07,
3965
+ "logits/chosen": -1.531792163848877,
3966
+ "logits/rejected": -1.4680547714233398,
3967
+ "logps/chosen": -144.9717559814453,
3968
+ "logps/rejected": -172.87686157226562,
3969
+ "loss": 0.6006,
3970
+ "rewards/accuracies": 0.59375,
3971
+ "rewards/chosen": -0.3757992386817932,
3972
+ "rewards/margins": 0.35130438208580017,
3973
+ "rewards/rejected": -0.7271036505699158,
3974
+ "step": 518
3975
+ },
3976
+ {
3977
+ "epoch": 0.5998125585754451,
3978
+ "grad_norm": 69.85303523035323,
3979
+ "learning_rate": 1.6905854550141714e-07,
3980
+ "logits/chosen": -1.5805073976516724,
3981
+ "logits/rejected": -1.5384862422943115,
3982
+ "logps/chosen": -171.9115753173828,
3983
+ "logps/rejected": -169.82862854003906,
3984
+ "loss": 0.5875,
3985
+ "rewards/accuracies": 0.78125,
3986
+ "rewards/chosen": -0.5081273317337036,
3987
+ "rewards/margins": 0.2863667607307434,
3988
+ "rewards/rejected": -0.794494092464447,
3989
+ "step": 520
3990
+ },
3991
+ {
3992
+ "epoch": 0.6021195299545815,
3993
+ "grad_norm": 69.03602758187714,
3994
+ "learning_rate": 1.6877958347276197e-07,
3995
+ "logits/chosen": -1.4844419956207275,
3996
+ "logits/rejected": -1.4906061887741089,
3997
+ "logps/chosen": -149.6005859375,
3998
+ "logps/rejected": -163.59097290039062,
3999
+ "loss": 0.6013,
4000
+ "rewards/accuracies": 0.65625,
4001
+ "rewards/chosen": -0.42841285467147827,
4002
+ "rewards/margins": 0.30834630131721497,
4003
+ "rewards/rejected": -0.7367592453956604,
4004
+ "step": 522
4005
+ },
4006
+ {
4007
+ "epoch": 0.6044265013337178,
4008
+ "grad_norm": 80.75337933099041,
4009
+ "learning_rate": 1.6849960196475805e-07,
4010
+ "logits/chosen": -1.5245236158370972,
4011
+ "logits/rejected": -1.5345442295074463,
4012
+ "logps/chosen": -148.5638885498047,
4013
+ "logps/rejected": -178.37429809570312,
4014
+ "loss": 0.5909,
4015
+ "rewards/accuracies": 0.75,
4016
+ "rewards/chosen": -0.3656730651855469,
4017
+ "rewards/margins": 0.3520704507827759,
4018
+ "rewards/rejected": -0.7177435159683228,
4019
+ "step": 524
4020
+ },
4021
+ {
4022
+ "epoch": 0.6067334727128542,
4023
+ "grad_norm": 79.6488573037571,
4024
+ "learning_rate": 1.682186051274067e-07,
4025
+ "logits/chosen": -1.4462357759475708,
4026
+ "logits/rejected": -1.4616801738739014,
4027
+ "logps/chosen": -144.83853149414062,
4028
+ "logps/rejected": -191.320556640625,
4029
+ "loss": 0.5847,
4030
+ "rewards/accuracies": 0.75,
4031
+ "rewards/chosen": -0.6087457537651062,
4032
+ "rewards/margins": 0.3239368498325348,
4033
+ "rewards/rejected": -0.9326826930046082,
4034
+ "step": 526
4035
+ },
4036
+ {
4037
+ "epoch": 0.6090404440919904,
4038
+ "grad_norm": 82.53815106903608,
4039
+ "learning_rate": 1.6793659712575895e-07,
4040
+ "logits/chosen": -1.5642480850219727,
4041
+ "logits/rejected": -1.4599685668945312,
4042
+ "logps/chosen": -215.29837036132812,
4043
+ "logps/rejected": -199.14767456054688,
4044
+ "loss": 0.5928,
4045
+ "rewards/accuracies": 0.5625,
4046
+ "rewards/chosen": -0.5695382356643677,
4047
+ "rewards/margins": 0.271673321723938,
4048
+ "rewards/rejected": -0.8412115573883057,
4049
+ "step": 528
4050
+ },
4051
+ {
4052
+ "epoch": 0.6113474154711268,
4053
+ "grad_norm": 86.53571512694035,
4054
+ "learning_rate": 1.676535821398537e-07,
4055
+ "logits/chosen": -1.3208836317062378,
4056
+ "logits/rejected": -1.3146097660064697,
4057
+ "logps/chosen": -189.41128540039062,
4058
+ "logps/rejected": -232.5477294921875,
4059
+ "loss": 0.6013,
4060
+ "rewards/accuracies": 0.65625,
4061
+ "rewards/chosen": -0.654186487197876,
4062
+ "rewards/margins": 0.4602148234844208,
4063
+ "rewards/rejected": -1.1144013404846191,
4064
+ "step": 530
4065
+ },
4066
+ {
4067
+ "epoch": 0.6136543868502632,
4068
+ "grad_norm": 70.64851504723866,
4069
+ "learning_rate": 1.6736956436465573e-07,
4070
+ "logits/chosen": -1.3590030670166016,
4071
+ "logits/rejected": -1.4608113765716553,
4072
+ "logps/chosen": -148.809326171875,
4073
+ "logps/rejected": -203.59759521484375,
4074
+ "loss": 0.5861,
4075
+ "rewards/accuracies": 0.71875,
4076
+ "rewards/chosen": -0.496415913105011,
4077
+ "rewards/margins": 0.31767329573631287,
4078
+ "rewards/rejected": -0.814089298248291,
4079
+ "step": 532
4080
+ },
4081
+ {
4082
+ "epoch": 0.6159613582293995,
4083
+ "grad_norm": 73.57136513502368,
4084
+ "learning_rate": 1.6708454800999366e-07,
4085
+ "logits/chosen": -1.4504910707473755,
4086
+ "logits/rejected": -1.4983229637145996,
4087
+ "logps/chosen": -166.2091522216797,
4088
+ "logps/rejected": -206.8488311767578,
4089
+ "loss": 0.6153,
4090
+ "rewards/accuracies": 0.75,
4091
+ "rewards/chosen": -0.49555644392967224,
4092
+ "rewards/margins": 0.3523869812488556,
4093
+ "rewards/rejected": -0.8479433655738831,
4094
+ "step": 534
4095
+ },
4096
+ {
4097
+ "epoch": 0.6182683296085358,
4098
+ "grad_norm": 67.83021038753246,
4099
+ "learning_rate": 1.667985373004974e-07,
4100
+ "logits/chosen": -1.4747323989868164,
4101
+ "logits/rejected": -1.3922568559646606,
4102
+ "logps/chosen": -159.47254943847656,
4103
+ "logps/rejected": -177.21884155273438,
4104
+ "loss": 0.5691,
4105
+ "rewards/accuracies": 0.8125,
4106
+ "rewards/chosen": -0.2918567657470703,
4107
+ "rewards/margins": 0.5216075778007507,
4108
+ "rewards/rejected": -0.8134642839431763,
4109
+ "step": 536
4110
+ },
4111
+ {
4112
+ "epoch": 0.6205753009876721,
4113
+ "grad_norm": 75.55693314924734,
4114
+ "learning_rate": 1.6651153647553567e-07,
4115
+ "logits/chosen": -1.6021491289138794,
4116
+ "logits/rejected": -1.6126930713653564,
4117
+ "logps/chosen": -165.55172729492188,
4118
+ "logps/rejected": -197.1583251953125,
4119
+ "loss": 0.5986,
4120
+ "rewards/accuracies": 0.65625,
4121
+ "rewards/chosen": -0.505136251449585,
4122
+ "rewards/margins": 0.2592867612838745,
4123
+ "rewards/rejected": -0.7644230127334595,
4124
+ "step": 538
4125
+ },
4126
+ {
4127
+ "epoch": 0.6228822723668085,
4128
+ "grad_norm": 74.57237448077612,
4129
+ "learning_rate": 1.6622354978915304e-07,
4130
+ "logits/chosen": -1.3560292720794678,
4131
+ "logits/rejected": -1.4895740747451782,
4132
+ "logps/chosen": -152.60386657714844,
4133
+ "logps/rejected": -200.48497009277344,
4134
+ "loss": 0.5976,
4135
+ "rewards/accuracies": 0.75,
4136
+ "rewards/chosen": -0.450514554977417,
4137
+ "rewards/margins": 0.42979568243026733,
4138
+ "rewards/rejected": -0.8803102374076843,
4139
+ "step": 540
4140
+ },
4141
+ {
4142
+ "epoch": 0.6251892437459448,
4143
+ "grad_norm": 76.07758708375029,
4144
+ "learning_rate": 1.6593458151000687e-07,
4145
+ "logits/chosen": -1.418495535850525,
4146
+ "logits/rejected": -1.5285032987594604,
4147
+ "logps/chosen": -174.468017578125,
4148
+ "logps/rejected": -212.58534240722656,
4149
+ "loss": 0.6021,
4150
+ "rewards/accuracies": 0.625,
4151
+ "rewards/chosen": -0.4992409944534302,
4152
+ "rewards/margins": 0.357663631439209,
4153
+ "rewards/rejected": -0.8569046854972839,
4154
+ "step": 542
4155
+ },
4156
+ {
4157
+ "epoch": 0.6274962151250811,
4158
+ "grad_norm": 67.61668250943133,
4159
+ "learning_rate": 1.6564463592130426e-07,
4160
+ "logits/chosen": -1.6000475883483887,
4161
+ "logits/rejected": -1.5714551210403442,
4162
+ "logps/chosen": -129.46788024902344,
4163
+ "logps/rejected": -137.58729553222656,
4164
+ "loss": 0.6027,
4165
+ "rewards/accuracies": 0.6875,
4166
+ "rewards/chosen": -0.4155838191509247,
4167
+ "rewards/margins": 0.31966376304626465,
4168
+ "rewards/rejected": -0.7352475523948669,
4169
+ "step": 544
4170
+ },
4171
+ {
4172
+ "epoch": 0.6298031865042174,
4173
+ "grad_norm": 67.37831547087359,
4174
+ "learning_rate": 1.6535371732073823e-07,
4175
+ "logits/chosen": -1.5627467632293701,
4176
+ "logits/rejected": -1.4833993911743164,
4177
+ "logps/chosen": -115.5599594116211,
4178
+ "logps/rejected": -121.90804290771484,
4179
+ "loss": 0.5859,
4180
+ "rewards/accuracies": 0.625,
4181
+ "rewards/chosen": -0.286516010761261,
4182
+ "rewards/margins": 0.36314332485198975,
4183
+ "rewards/rejected": -0.6496593356132507,
4184
+ "step": 546
4185
+ },
4186
+ {
4187
+ "epoch": 0.6321101578833538,
4188
+ "grad_norm": 79.67037148877638,
4189
+ "learning_rate": 1.650618300204242e-07,
4190
+ "logits/chosen": -1.4731521606445312,
4191
+ "logits/rejected": -1.5530614852905273,
4192
+ "logps/chosen": -218.06552124023438,
4193
+ "logps/rejected": -257.6269226074219,
4194
+ "loss": 0.6104,
4195
+ "rewards/accuracies": 0.5625,
4196
+ "rewards/chosen": -0.7696484923362732,
4197
+ "rewards/margins": 0.28321802616119385,
4198
+ "rewards/rejected": -1.0528665781021118,
4199
+ "step": 548
4200
+ },
4201
+ {
4202
+ "epoch": 0.63441712926249,
4203
+ "grad_norm": 67.9423797863854,
4204
+ "learning_rate": 1.6476897834683618e-07,
4205
+ "logits/chosen": -1.4056189060211182,
4206
+ "logits/rejected": -1.4078246355056763,
4207
+ "logps/chosen": -147.92111206054688,
4208
+ "logps/rejected": -188.60968017578125,
4209
+ "loss": 0.6018,
4210
+ "rewards/accuracies": 0.75,
4211
+ "rewards/chosen": -0.5256268978118896,
4212
+ "rewards/margins": 0.4678364396095276,
4213
+ "rewards/rejected": -0.9934633374214172,
4214
+ "step": 550
4215
+ },
4216
+ {
4217
+ "epoch": 0.6367241006416264,
4218
+ "grad_norm": 68.15375283996126,
4219
+ "learning_rate": 1.644751666407424e-07,
4220
+ "logits/chosen": -1.2929272651672363,
4221
+ "logits/rejected": -1.3170608282089233,
4222
+ "logps/chosen": -207.3567352294922,
4223
+ "logps/rejected": -262.3974609375,
4224
+ "loss": 0.5823,
4225
+ "rewards/accuracies": 0.6875,
4226
+ "rewards/chosen": -0.7716534733772278,
4227
+ "rewards/margins": 0.6446899771690369,
4228
+ "rewards/rejected": -1.4163434505462646,
4229
+ "step": 552
4230
+ },
4231
+ {
4232
+ "epoch": 0.6390310720207627,
4233
+ "grad_norm": 71.41650018580867,
4234
+ "learning_rate": 1.6418039925714115e-07,
4235
+ "logits/chosen": -1.3858839273452759,
4236
+ "logits/rejected": -1.3953114748001099,
4237
+ "logps/chosen": -160.35096740722656,
4238
+ "logps/rejected": -186.47933959960938,
4239
+ "loss": 0.5559,
4240
+ "rewards/accuracies": 0.65625,
4241
+ "rewards/chosen": -0.5581396222114563,
4242
+ "rewards/margins": 0.3457927703857422,
4243
+ "rewards/rejected": -0.9039323329925537,
4244
+ "step": 554
4245
+ },
4246
+ {
4247
+ "epoch": 0.6413380433998991,
4248
+ "grad_norm": 76.78836475295354,
4249
+ "learning_rate": 1.6388468056519612e-07,
4250
+ "logits/chosen": -1.4668548107147217,
4251
+ "logits/rejected": -1.4067307710647583,
4252
+ "logps/chosen": -212.10546875,
4253
+ "logps/rejected": -193.7842254638672,
4254
+ "loss": 0.5721,
4255
+ "rewards/accuracies": 0.71875,
4256
+ "rewards/chosen": -0.618504524230957,
4257
+ "rewards/margins": 0.36426225304603577,
4258
+ "rewards/rejected": -0.9827668070793152,
4259
+ "step": 556
4260
+ },
4261
+ {
4262
+ "epoch": 0.6436450147790354,
4263
+ "grad_norm": 66.95864858123714,
4264
+ "learning_rate": 1.6358801494817172e-07,
4265
+ "logits/chosen": -1.4181556701660156,
4266
+ "logits/rejected": -1.409440279006958,
4267
+ "logps/chosen": -139.5923309326172,
4268
+ "logps/rejected": -183.9441375732422,
4269
+ "loss": 0.5663,
4270
+ "rewards/accuracies": 0.71875,
4271
+ "rewards/chosen": -0.42550671100616455,
4272
+ "rewards/margins": 0.626122236251831,
4273
+ "rewards/rejected": -1.0516289472579956,
4274
+ "step": 558
4275
+ },
4276
+ {
4277
+ "epoch": 0.6459519861581717,
4278
+ "grad_norm": 88.18680458715171,
4279
+ "learning_rate": 1.6329040680336805e-07,
4280
+ "logits/chosen": -1.468677282333374,
4281
+ "logits/rejected": -1.5043675899505615,
4282
+ "logps/chosen": -161.72213745117188,
4283
+ "logps/rejected": -206.85214233398438,
4284
+ "loss": 0.572,
4285
+ "rewards/accuracies": 0.6875,
4286
+ "rewards/chosen": -0.5167573690414429,
4287
+ "rewards/margins": 0.36671191453933716,
4288
+ "rewards/rejected": -0.8834693431854248,
4289
+ "step": 560
4290
+ },
4291
+ {
4292
+ "epoch": 0.648258957537308,
4293
+ "grad_norm": 71.84112642036989,
4294
+ "learning_rate": 1.6299186054205575e-07,
4295
+ "logits/chosen": -1.5098912715911865,
4296
+ "logits/rejected": -1.4657700061798096,
4297
+ "logps/chosen": -177.00067138671875,
4298
+ "logps/rejected": -190.06985473632812,
4299
+ "loss": 0.5365,
4300
+ "rewards/accuracies": 0.78125,
4301
+ "rewards/chosen": -0.3948441743850708,
4302
+ "rewards/margins": 0.5432202816009521,
4303
+ "rewards/rejected": -0.9380643963813782,
4304
+ "step": 562
4305
+ },
4306
+ {
4307
+ "epoch": 0.6505659289164444,
4308
+ "grad_norm": 77.21845596596229,
4309
+ "learning_rate": 1.6269238058941067e-07,
4310
+ "logits/chosen": -1.5354855060577393,
4311
+ "logits/rejected": -1.4872441291809082,
4312
+ "logps/chosen": -220.86279296875,
4313
+ "logps/rejected": -242.259765625,
4314
+ "loss": 0.6141,
4315
+ "rewards/accuracies": 0.71875,
4316
+ "rewards/chosen": -0.5020161867141724,
4317
+ "rewards/margins": 0.3912605345249176,
4318
+ "rewards/rejected": -0.8932766914367676,
4319
+ "step": 564
4320
+ },
4321
+ {
4322
+ "epoch": 0.6528729002955808,
4323
+ "grad_norm": 77.14842839642075,
4324
+ "learning_rate": 1.6239197138444807e-07,
4325
+ "logits/chosen": -1.4313609600067139,
4326
+ "logits/rejected": -1.4305431842803955,
4327
+ "logps/chosen": -99.62786865234375,
4328
+ "logps/rejected": -128.8907928466797,
4329
+ "loss": 0.5895,
4330
+ "rewards/accuracies": 0.75,
4331
+ "rewards/chosen": -0.1888483613729477,
4332
+ "rewards/margins": 0.4503237307071686,
4333
+ "rewards/rejected": -0.6391721367835999,
4334
+ "step": 566
4335
+ },
4336
+ {
4337
+ "epoch": 0.655179871674717,
4338
+ "grad_norm": 62.79374975719681,
4339
+ "learning_rate": 1.6209063737995714e-07,
4340
+ "logits/chosen": -1.4637759923934937,
4341
+ "logits/rejected": -1.4549309015274048,
4342
+ "logps/chosen": -144.82948303222656,
4343
+ "logps/rejected": -185.9346466064453,
4344
+ "loss": 0.5515,
4345
+ "rewards/accuracies": 0.71875,
4346
+ "rewards/chosen": -0.44154876470565796,
4347
+ "rewards/margins": 0.37137869000434875,
4348
+ "rewards/rejected": -0.8129273653030396,
4349
+ "step": 568
4350
+ },
4351
+ {
4352
+ "epoch": 0.6574868430538534,
4353
+ "grad_norm": 77.33084496555169,
4354
+ "learning_rate": 1.6178838304243472e-07,
4355
+ "logits/chosen": -1.491298794746399,
4356
+ "logits/rejected": -1.5582300424575806,
4357
+ "logps/chosen": -193.7870635986328,
4358
+ "logps/rejected": -242.5855712890625,
4359
+ "loss": 0.5723,
4360
+ "rewards/accuracies": 0.8125,
4361
+ "rewards/chosen": -0.5082133412361145,
4362
+ "rewards/margins": 0.6296249628067017,
4363
+ "rewards/rejected": -1.1378382444381714,
4364
+ "step": 570
4365
+ },
4366
+ {
4367
+ "epoch": 0.6597938144329897,
4368
+ "grad_norm": 67.02472308421605,
4369
+ "learning_rate": 1.6148521285201927e-07,
4370
+ "logits/chosen": -1.4817756414413452,
4371
+ "logits/rejected": -1.402366042137146,
4372
+ "logps/chosen": -154.45765686035156,
4373
+ "logps/rejected": -178.16561889648438,
4374
+ "loss": 0.5564,
4375
+ "rewards/accuracies": 0.75,
4376
+ "rewards/chosen": -0.3961385488510132,
4377
+ "rewards/margins": 0.5840703248977661,
4378
+ "rewards/rejected": -0.9802089333534241,
4379
+ "step": 572
4380
+ },
4381
+ {
4382
+ "epoch": 0.6621007858121261,
4383
+ "grad_norm": 73.0106659319347,
4384
+ "learning_rate": 1.6118113130242432e-07,
4385
+ "logits/chosen": -1.4550271034240723,
4386
+ "logits/rejected": -1.4115763902664185,
4387
+ "logps/chosen": -221.6585235595703,
4388
+ "logps/rejected": -195.1796417236328,
4389
+ "loss": 0.5774,
4390
+ "rewards/accuracies": 0.65625,
4391
+ "rewards/chosen": -0.8219617009162903,
4392
+ "rewards/margins": 0.16280440986156464,
4393
+ "rewards/rejected": -0.9847662448883057,
4394
+ "step": 574
4395
+ },
4396
+ {
4397
+ "epoch": 0.6644077571912623,
4398
+ "grad_norm": 77.31259598468839,
4399
+ "learning_rate": 1.6087614290087206e-07,
4400
+ "logits/chosen": -1.4929287433624268,
4401
+ "logits/rejected": -1.4764537811279297,
4402
+ "logps/chosen": -230.29653930664062,
4403
+ "logps/rejected": -284.22412109375,
4404
+ "loss": 0.5818,
4405
+ "rewards/accuracies": 0.78125,
4406
+ "rewards/chosen": -0.6301875114440918,
4407
+ "rewards/margins": 0.7476638555526733,
4408
+ "rewards/rejected": -1.3778512477874756,
4409
+ "step": 576
4410
+ },
4411
+ {
4412
+ "epoch": 0.6667147285703987,
4413
+ "grad_norm": 69.04855850678052,
4414
+ "learning_rate": 1.605702521680263e-07,
4415
+ "logits/chosen": -1.3067015409469604,
4416
+ "logits/rejected": -1.338529348373413,
4417
+ "logps/chosen": -147.36080932617188,
4418
+ "logps/rejected": -193.80665588378906,
4419
+ "loss": 0.5757,
4420
+ "rewards/accuracies": 0.71875,
4421
+ "rewards/chosen": -0.6234080791473389,
4422
+ "rewards/margins": 0.39194294810295105,
4423
+ "rewards/rejected": -1.0153510570526123,
4424
+ "step": 578
4425
+ },
4426
+ {
4427
+ "epoch": 0.669021699949535,
4428
+ "grad_norm": 81.45402825293101,
4429
+ "learning_rate": 1.6026346363792565e-07,
4430
+ "logits/chosen": -1.4524238109588623,
4431
+ "logits/rejected": -1.3550243377685547,
4432
+ "logps/chosen": -187.0885772705078,
4433
+ "logps/rejected": -177.09780883789062,
4434
+ "loss": 0.6058,
4435
+ "rewards/accuracies": 0.6875,
4436
+ "rewards/chosen": -0.7711231708526611,
4437
+ "rewards/margins": 0.17797166109085083,
4438
+ "rewards/rejected": -0.9490947127342224,
4439
+ "step": 580
4440
+ },
4441
+ {
4442
+ "epoch": 0.6713286713286714,
4443
+ "grad_norm": 65.47602685653504,
4444
+ "learning_rate": 1.5995578185791616e-07,
4445
+ "logits/chosen": -1.387951374053955,
4446
+ "logits/rejected": -1.3309695720672607,
4447
+ "logps/chosen": -158.39202880859375,
4448
+ "logps/rejected": -186.85105895996094,
4449
+ "loss": 0.5825,
4450
+ "rewards/accuracies": 0.75,
4451
+ "rewards/chosen": -0.48583418130874634,
4452
+ "rewards/margins": 0.503716230392456,
4453
+ "rewards/rejected": -0.9895503520965576,
4454
+ "step": 582
4455
+ },
4456
+ {
4457
+ "epoch": 0.6736356427078076,
4458
+ "grad_norm": 76.89288613284735,
4459
+ "learning_rate": 1.596472113885841e-07,
4460
+ "logits/chosen": -1.4493763446807861,
4461
+ "logits/rejected": -1.4876127243041992,
4462
+ "logps/chosen": -180.78541564941406,
4463
+ "logps/rejected": -220.08172607421875,
4464
+ "loss": 0.5822,
4465
+ "rewards/accuracies": 0.65625,
4466
+ "rewards/chosen": -0.5832819938659668,
4467
+ "rewards/margins": 0.494464248418808,
4468
+ "rewards/rejected": -1.0777461528778076,
4469
+ "step": 584
4470
+ },
4471
+ {
4472
+ "epoch": 0.675942614086944,
4473
+ "grad_norm": 82.2690699212878,
4474
+ "learning_rate": 1.5933775680368822e-07,
4475
+ "logits/chosen": -1.4559937715530396,
4476
+ "logits/rejected": -1.5102128982543945,
4477
+ "logps/chosen": -169.15960693359375,
4478
+ "logps/rejected": -176.64280700683594,
4479
+ "loss": 0.6272,
4480
+ "rewards/accuracies": 0.59375,
4481
+ "rewards/chosen": -0.5040290355682373,
4482
+ "rewards/margins": 0.27444028854370117,
4483
+ "rewards/rejected": -0.7784693241119385,
4484
+ "step": 586
4485
+ },
4486
+ {
4487
+ "epoch": 0.6782495854660803,
4488
+ "grad_norm": 76.21062906880101,
4489
+ "learning_rate": 1.5902742269009194e-07,
4490
+ "logits/chosen": -1.348806381225586,
4491
+ "logits/rejected": -1.293540358543396,
4492
+ "logps/chosen": -135.5105438232422,
4493
+ "logps/rejected": -156.5147705078125,
4494
+ "loss": 0.5875,
4495
+ "rewards/accuracies": 0.8125,
4496
+ "rewards/chosen": -0.5231513977050781,
4497
+ "rewards/margins": 0.4782097041606903,
4498
+ "rewards/rejected": -1.0013611316680908,
4499
+ "step": 588
4500
+ },
4501
+ {
4502
+ "epoch": 0.6805565568452167,
4503
+ "grad_norm": 75.50192821178838,
4504
+ "learning_rate": 1.5871621364769553e-07,
4505
+ "logits/chosen": -1.5168403387069702,
4506
+ "logits/rejected": -1.4424357414245605,
4507
+ "logps/chosen": -183.81605529785156,
4508
+ "logps/rejected": -171.45872497558594,
4509
+ "loss": 0.6035,
4510
+ "rewards/accuracies": 0.65625,
4511
+ "rewards/chosen": -0.7719120979309082,
4512
+ "rewards/margins": 0.2601196765899658,
4513
+ "rewards/rejected": -1.0320318937301636,
4514
+ "step": 590
4515
+ },
4516
+ {
4517
+ "epoch": 0.6828635282243529,
4518
+ "grad_norm": 84.93892075040027,
4519
+ "learning_rate": 1.5840413428936766e-07,
4520
+ "logits/chosen": -1.3720101118087769,
4521
+ "logits/rejected": -1.391021490097046,
4522
+ "logps/chosen": -171.98031616210938,
4523
+ "logps/rejected": -176.23892211914062,
4524
+ "loss": 0.599,
4525
+ "rewards/accuracies": 0.65625,
4526
+ "rewards/chosen": -0.7516859769821167,
4527
+ "rewards/margins": 0.21854539215564728,
4528
+ "rewards/rejected": -0.9702314138412476,
4529
+ "step": 592
4530
+ },
4531
+ {
4532
+ "epoch": 0.6851704996034893,
4533
+ "grad_norm": 66.70595859312724,
4534
+ "learning_rate": 1.5809118924087733e-07,
4535
+ "logits/chosen": -1.4547669887542725,
4536
+ "logits/rejected": -1.430787205696106,
4537
+ "logps/chosen": -177.32481384277344,
4538
+ "logps/rejected": -208.61553955078125,
4539
+ "loss": 0.6102,
4540
+ "rewards/accuracies": 0.625,
4541
+ "rewards/chosen": -0.5358410477638245,
4542
+ "rewards/margins": 0.26219645142555237,
4543
+ "rewards/rejected": -0.7980375289916992,
4544
+ "step": 594
4545
+ },
4546
+ {
4547
+ "epoch": 0.6874774709826256,
4548
+ "grad_norm": 82.62176636567787,
4549
+ "learning_rate": 1.5777738314082511e-07,
4550
+ "logits/chosen": -1.4137248992919922,
4551
+ "logits/rejected": -1.404469609260559,
4552
+ "logps/chosen": -164.01600646972656,
4553
+ "logps/rejected": -184.97645568847656,
4554
+ "loss": 0.6472,
4555
+ "rewards/accuracies": 0.6875,
4556
+ "rewards/chosen": -0.5918564200401306,
4557
+ "rewards/margins": 0.21411672234535217,
4558
+ "rewards/rejected": -0.8059731721878052,
4559
+ "step": 596
4560
+ },
4561
+ {
4562
+ "epoch": 0.689784442361762,
4563
+ "grad_norm": 72.16505210857706,
4564
+ "learning_rate": 1.5746272064057439e-07,
4565
+ "logits/chosen": -1.3921738862991333,
4566
+ "logits/rejected": -1.3382896184921265,
4567
+ "logps/chosen": -199.48634338378906,
4568
+ "logps/rejected": -226.77871704101562,
4569
+ "loss": 0.5858,
4570
+ "rewards/accuracies": 0.65625,
4571
+ "rewards/chosen": -0.5180130004882812,
4572
+ "rewards/margins": 0.4014572501182556,
4573
+ "rewards/rejected": -0.9194702506065369,
4574
+ "step": 598
4575
+ },
4576
+ {
4577
+ "epoch": 0.6920914137408983,
4578
+ "grad_norm": 78.66776375616931,
4579
+ "learning_rate": 1.5714720640418247e-07,
4580
+ "logits/chosen": -1.511127233505249,
4581
+ "logits/rejected": -1.5256671905517578,
4582
+ "logps/chosen": -182.10826110839844,
4583
+ "logps/rejected": -198.63510131835938,
4584
+ "loss": 0.618,
4585
+ "rewards/accuracies": 0.65625,
4586
+ "rewards/chosen": -0.6393432021141052,
4587
+ "rewards/margins": 0.16456884145736694,
4588
+ "rewards/rejected": -0.8039120435714722,
4589
+ "step": 600
4590
+ },
4591
+ {
4592
+ "epoch": 0.6920914137408983,
4593
+ "eval_logits/chosen": -1.4086966514587402,
4594
+ "eval_logits/rejected": -1.3254387378692627,
4595
+ "eval_logps/chosen": -191.96621704101562,
4596
+ "eval_logps/rejected": -160.9102325439453,
4597
+ "eval_loss": 0.6056262850761414,
4598
+ "eval_rewards/accuracies": 0.7200000286102295,
4599
+ "eval_rewards/chosen": -0.6784057021141052,
4600
+ "eval_rewards/margins": 0.3610783815383911,
4601
+ "eval_rewards/rejected": -1.0394840240478516,
4602
+ "eval_runtime": 37.022,
4603
+ "eval_samples_per_second": 2.701,
4604
+ "eval_steps_per_second": 0.675,
4605
+ "step": 600
4606
  }
4607
  ],
4608
  "logging_steps": 2,