hZzy commited on
Commit
9ff305a
·
verified ·
1 Parent(s): 5af2d0e

Model save

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
4
+ tags:
5
+ - trl
6
+ - expo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: qwen2.5-0.5b-expo-DPO-ES-TRY
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/gcfd4lf7)
17
+ # qwen2.5-0.5b-expo-DPO-ES-TRY
18
+
19
+ This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6965
22
+ - Logps: -103.5456
23
+ - Logits: -2.4456
24
+ - Objective: 0.7050
25
+ - Dpo Loss: 0.7050
26
+ - Regularize: 0.7050
27
+ - Ranking Simple: 0.5735
28
+ - Ranking Idealized: 0.6046
29
+ - Ranking Idealized Expo: 0.5280
30
+ - Dpo Wo Beta: -3.2166
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 5e-06
50
+ - train_batch_size: 2
51
+ - eval_batch_size: 2
52
+ - seed: 42
53
+ - distributed_type: multi-GPU
54
+ - num_devices: 6
55
+ - gradient_accumulation_steps: 6
56
+ - total_train_batch_size: 72
57
+ - total_eval_batch_size: 12
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 3
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Dpo Loss | Dpo Wo Beta | Logits | Logps | Validation Loss | Objective | Ranking Idealized | Ranking Idealized Expo | Ranking Simple | Regularize |
66
+ |:-------------:|:------:|:----:|:--------:|:-----------:|:-------:|:---------:|:---------------:|:---------:|:-----------------:|:----------------------:|:--------------:|:----------:|
67
+ | 0.6857 | 0.0709 | 50 | 0.6927 | -1.2807 | -1.9606 | -88.9841 | 0.6914 | 0.6927 | 0.6046 | 0.5280 | 0.5362 | 0.6927 |
68
+ | 0.6524 | 0.1417 | 100 | 0.7010 | -1.8911 | -2.0579 | -98.6358 | 0.6922 | 0.7010 | 0.6046 | 0.5280 | 0.5269 | 0.7010 |
69
+ | 0.6123 | 0.2126 | 150 | 0.7015 | -2.1166 | -1.9033 | -102.8927 | 0.6967 | 0.7015 | 0.6046 | 0.5280 | 0.5280 | 0.7015 |
70
+ | 0.5779 | 0.2834 | 200 | 0.6816 | -2.1417 | -2.0716 | -106.4944 | 0.6794 | 0.6816 | 0.6046 | 0.5280 | 0.5507 | 0.6816 |
71
+ | 0.5709 | 0.3543 | 250 | 0.6817 | -2.2676 | -2.2470 | -87.7326 | 0.6883 | 0.6817 | 0.6046 | 0.5280 | 0.5424 | 0.6817 |
72
+ | 0.5563 | 0.4251 | 300 | 0.6619 | -2.3796 | -2.2697 | -89.5089 | 0.6811 | 0.6619 | 0.6046 | 0.5280 | 0.5735 | 0.6619 |
73
+ | 0.5321 | 0.4960 | 350 | 0.6773 | -2.6295 | -2.3683 | -99.0927 | 0.6926 | 0.6773 | 0.6046 | 0.5280 | 0.5735 | 0.6773 |
74
+ | 0.4963 | 0.5668 | 400 | 0.6836 | -2.6913 | -2.2508 | -106.7073 | 0.6914 | 0.6836 | 0.6046 | 0.5280 | 0.5673 | 0.6836 |
75
+ | 0.4745 | 0.6377 | 450 | 0.6938 | -105.8669 | -2.2347 | 0.6815 | 0.6815 | 0.6815 | 0.5631 | 0.6046 | 0.5280 | -2.6738 |
76
+ | 0.4867 | 0.7085 | 500 | 0.7040 | -105.1848 | -2.2182 | 0.6995 | 0.6995 | 0.6995 | 0.5507 | 0.6046 | 0.5280 | -2.7257 |
77
+ | 0.4582 | 0.7794 | 550 | 0.6995 | -102.6643 | -2.3855 | 0.7027 | 0.7027 | 0.7027 | 0.5683 | 0.6046 | 0.5280 | -3.1023 |
78
+ | 0.4339 | 0.8503 | 600 | 0.6965 | -103.5456 | -2.4456 | 0.7050 | 0.7050 | 0.7050 | 0.5735 | 0.6046 | 0.5280 | -3.2166 |
79
+
80
+
81
+ ### Framework versions
82
+
83
+ - Transformers 4.42.0
84
+ - Pytorch 2.3.0+cu121
85
+ - Datasets 2.19.1
86
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8502598016060463,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.15444423039754232,
5
+ "train_runtime": 3628.8607,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 41.998,
8
+ "train_steps_per_second": 0.583
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151644,
3
+ "eos_token_id": 151645,
4
+ "max_new_tokens": 2048,
5
+ "pad_token_id": 151645,
6
+ "transformers_version": "4.42.0"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f9f67c930cb079817a30b62a9d5ce9955a9ce2bfd805f44199310b726da82e8
3
  size 1975192208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5891934e598bd576da95bb0536f8170134dd4e331ed360f27dd151d750c7dfe4
3
  size 1975192208
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8502598016060463,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.15444423039754232,
5
+ "train_runtime": 3628.8607,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 41.998,
8
+ "train_steps_per_second": 0.583
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5734989643096924,
3
+ "best_model_checkpoint": "./qwen2.5-0.5b/qwen2.5-0.5b-expo-DPO-ES-TRY/checkpoint-300",
4
+ "epoch": 0.8502598016060463,
5
+ "eval_steps": 50,
6
+ "global_step": 600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "dpo_loss": 0.6931471824645996,
13
+ "dpo_wo_beta": -0.6931471824645996,
14
+ "epoch": 0.0014170996693434106,
15
+ "grad_norm": 25.66138500619404,
16
+ "learning_rate": 2.358490566037736e-08,
17
+ "logits": -1.7146095037460327,
18
+ "logps": -79.01810455322266,
19
+ "loss": 0.6931,
20
+ "objective": 0.6931471824645996,
21
+ "ranking_idealized": 0.3333333432674408,
22
+ "ranking_idealized_expo": 0.3333333432674408,
23
+ "ranking_simple": 0.3333333432674408,
24
+ "regularize": 0.6931471824645996,
25
+ "step": 1
26
+ },
27
+ {
28
+ "dpo_loss": 0.6873584985733032,
29
+ "dpo_wo_beta": -0.8693115711212158,
30
+ "epoch": 0.07085498346717052,
31
+ "grad_norm": 25.93169363980804,
32
+ "learning_rate": 1.179245283018868e-06,
33
+ "logits": -1.890508770942688,
34
+ "logps": -82.390869140625,
35
+ "loss": 0.6857,
36
+ "objective": 0.6873584985733032,
37
+ "ranking_idealized": 0.6258503198623657,
38
+ "ranking_idealized_expo": 0.5408163070678711,
39
+ "ranking_simple": 0.5408163070678711,
40
+ "regularize": 0.6873584985733032,
41
+ "step": 50
42
+ },
43
+ {
44
+ "epoch": 0.07085498346717052,
45
+ "eval_dpo_loss": 0.6927401423454285,
46
+ "eval_dpo_wo_beta": -1.280670404434204,
47
+ "eval_logits": -1.960647463798523,
48
+ "eval_logps": -88.98406219482422,
49
+ "eval_loss": 0.6913570761680603,
50
+ "eval_objective": 0.6927401423454285,
51
+ "eval_ranking_idealized": 0.6045548915863037,
52
+ "eval_ranking_idealized_expo": 0.5279502868652344,
53
+ "eval_ranking_simple": 0.5362318754196167,
54
+ "eval_regularize": 0.6927401423454285,
55
+ "eval_runtime": 318.1168,
56
+ "eval_samples_per_second": 18.201,
57
+ "eval_steps_per_second": 1.518,
58
+ "step": 50
59
+ },
60
+ {
61
+ "dpo_loss": 0.6656978130340576,
62
+ "dpo_wo_beta": -1.517911434173584,
63
+ "epoch": 0.14170996693434104,
64
+ "grad_norm": 29.18536320021953,
65
+ "learning_rate": 2.358490566037736e-06,
66
+ "logits": -2.088792085647583,
67
+ "logps": -82.19146728515625,
68
+ "loss": 0.6524,
69
+ "objective": 0.6656978130340576,
70
+ "ranking_idealized": 0.5950000286102295,
71
+ "ranking_idealized_expo": 0.5149999856948853,
72
+ "ranking_simple": 0.5233333110809326,
73
+ "regularize": 0.6656978130340576,
74
+ "step": 100
75
+ },
76
+ {
77
+ "epoch": 0.14170996693434104,
78
+ "eval_dpo_loss": 0.7010491490364075,
79
+ "eval_dpo_wo_beta": -1.8911339044570923,
80
+ "eval_logits": -2.0578620433807373,
81
+ "eval_logps": -98.63578033447266,
82
+ "eval_loss": 0.6921781897544861,
83
+ "eval_objective": 0.7010491490364075,
84
+ "eval_ranking_idealized": 0.6045548915863037,
85
+ "eval_ranking_idealized_expo": 0.5279502868652344,
86
+ "eval_ranking_simple": 0.5269151329994202,
87
+ "eval_regularize": 0.7010491490364075,
88
+ "eval_runtime": 317.6312,
89
+ "eval_samples_per_second": 18.229,
90
+ "eval_steps_per_second": 1.521,
91
+ "step": 100
92
+ },
93
+ {
94
+ "dpo_loss": 0.6302288770675659,
95
+ "dpo_wo_beta": -1.9832934141159058,
96
+ "epoch": 0.21256495040151158,
97
+ "grad_norm": 31.92092339525771,
98
+ "learning_rate": 3.5377358490566038e-06,
99
+ "logits": -2.047361135482788,
100
+ "logps": -90.97752380371094,
101
+ "loss": 0.6123,
102
+ "objective": 0.6302288770675659,
103
+ "ranking_idealized": 0.6050000190734863,
104
+ "ranking_idealized_expo": 0.528333306312561,
105
+ "ranking_simple": 0.5799999833106995,
106
+ "regularize": 0.6302288770675659,
107
+ "step": 150
108
+ },
109
+ {
110
+ "epoch": 0.21256495040151158,
111
+ "eval_dpo_loss": 0.7015214562416077,
112
+ "eval_dpo_wo_beta": -2.1165764331817627,
113
+ "eval_logits": -1.9032589197158813,
114
+ "eval_logps": -102.8927001953125,
115
+ "eval_loss": 0.6967350840568542,
116
+ "eval_objective": 0.7015214562416077,
117
+ "eval_ranking_idealized": 0.6045548915863037,
118
+ "eval_ranking_idealized_expo": 0.5279502868652344,
119
+ "eval_ranking_simple": 0.5279502868652344,
120
+ "eval_regularize": 0.7015214562416077,
121
+ "eval_runtime": 317.4305,
122
+ "eval_samples_per_second": 18.24,
123
+ "eval_steps_per_second": 1.522,
124
+ "step": 150
125
+ },
126
+ {
127
+ "dpo_loss": 0.5571741461753845,
128
+ "dpo_wo_beta": -1.7987542152404785,
129
+ "epoch": 0.2834199338686821,
130
+ "grad_norm": 22.667783186241508,
131
+ "learning_rate": 4.716981132075472e-06,
132
+ "logits": -2.1097896099090576,
133
+ "logps": -93.05426025390625,
134
+ "loss": 0.5779,
135
+ "objective": 0.5571741461753845,
136
+ "ranking_idealized": 0.6333333253860474,
137
+ "ranking_idealized_expo": 0.5433333516120911,
138
+ "ranking_simple": 0.6449999809265137,
139
+ "regularize": 0.5571741461753845,
140
+ "step": 200
141
+ },
142
+ {
143
+ "epoch": 0.2834199338686821,
144
+ "eval_dpo_loss": 0.6816009283065796,
145
+ "eval_dpo_wo_beta": -2.1417369842529297,
146
+ "eval_logits": -2.071585178375244,
147
+ "eval_logps": -106.49442291259766,
148
+ "eval_loss": 0.6793810129165649,
149
+ "eval_objective": 0.6816009283065796,
150
+ "eval_ranking_idealized": 0.6045548915863037,
151
+ "eval_ranking_idealized_expo": 0.5279502868652344,
152
+ "eval_ranking_simple": 0.5507246255874634,
153
+ "eval_regularize": 0.6816009283065796,
154
+ "eval_runtime": 316.4091,
155
+ "eval_samples_per_second": 18.299,
156
+ "eval_steps_per_second": 1.527,
157
+ "step": 200
158
+ },
159
+ {
160
+ "dpo_loss": 0.5780055522918701,
161
+ "dpo_wo_beta": -2.330664873123169,
162
+ "epoch": 0.35427491733585265,
163
+ "grad_norm": 19.831646672236253,
164
+ "learning_rate": 4.995082357614404e-06,
165
+ "logits": -2.255446434020996,
166
+ "logps": -94.9359130859375,
167
+ "loss": 0.5709,
168
+ "objective": 0.5780055522918701,
169
+ "ranking_idealized": 0.5699999928474426,
170
+ "ranking_idealized_expo": 0.4833333194255829,
171
+ "ranking_simple": 0.6083333492279053,
172
+ "regularize": 0.5780055522918701,
173
+ "step": 250
174
+ },
175
+ {
176
+ "epoch": 0.35427491733585265,
177
+ "eval_dpo_loss": 0.6816768646240234,
178
+ "eval_dpo_wo_beta": -2.267601728439331,
179
+ "eval_logits": -2.246999740600586,
180
+ "eval_logps": -87.73257446289062,
181
+ "eval_loss": 0.6882591843605042,
182
+ "eval_objective": 0.6816768646240234,
183
+ "eval_ranking_idealized": 0.6045548915863037,
184
+ "eval_ranking_idealized_expo": 0.5279502868652344,
185
+ "eval_ranking_simple": 0.542443037033081,
186
+ "eval_regularize": 0.6816768646240234,
187
+ "eval_runtime": 316.123,
188
+ "eval_samples_per_second": 18.316,
189
+ "eval_steps_per_second": 1.528,
190
+ "step": 250
191
+ },
192
+ {
193
+ "dpo_loss": 0.5427210330963135,
194
+ "dpo_wo_beta": -2.1861753463745117,
195
+ "epoch": 0.42512990080302315,
196
+ "grad_norm": 27.500670384351913,
197
+ "learning_rate": 4.973664984850435e-06,
198
+ "logits": -2.136077404022217,
199
+ "logps": -81.2485122680664,
200
+ "loss": 0.5563,
201
+ "objective": 0.5427210330963135,
202
+ "ranking_idealized": 0.6299999952316284,
203
+ "ranking_idealized_expo": 0.5649999976158142,
204
+ "ranking_simple": 0.6583333611488342,
205
+ "regularize": 0.5427210330963135,
206
+ "step": 300
207
+ },
208
+ {
209
+ "epoch": 0.42512990080302315,
210
+ "eval_dpo_loss": 0.6618562340736389,
211
+ "eval_dpo_wo_beta": -2.3796472549438477,
212
+ "eval_logits": -2.269749641418457,
213
+ "eval_logps": -89.50890350341797,
214
+ "eval_loss": 0.6810693740844727,
215
+ "eval_objective": 0.6618562340736389,
216
+ "eval_ranking_idealized": 0.6045548915863037,
217
+ "eval_ranking_idealized_expo": 0.5279502868652344,
218
+ "eval_ranking_simple": 0.5734989643096924,
219
+ "eval_regularize": 0.6618562340736389,
220
+ "eval_runtime": 319.8742,
221
+ "eval_samples_per_second": 18.101,
222
+ "eval_steps_per_second": 1.51,
223
+ "step": 300
224
+ },
225
+ {
226
+ "dpo_loss": 0.5661785006523132,
227
+ "dpo_wo_beta": -2.481991767883301,
228
+ "epoch": 0.49598488427019366,
229
+ "grad_norm": 19.802376166923445,
230
+ "learning_rate": 4.9354031766005005e-06,
231
+ "logits": -2.372645616531372,
232
+ "logps": -87.0498046875,
233
+ "loss": 0.5321,
234
+ "objective": 0.5661785006523132,
235
+ "ranking_idealized": 0.6016666889190674,
236
+ "ranking_idealized_expo": 0.5233333110809326,
237
+ "ranking_simple": 0.6499999761581421,
238
+ "regularize": 0.5661785006523132,
239
+ "step": 350
240
+ },
241
+ {
242
+ "epoch": 0.49598488427019366,
243
+ "eval_dpo_loss": 0.6773233413696289,
244
+ "eval_dpo_wo_beta": -2.6295416355133057,
245
+ "eval_logits": -2.3682754039764404,
246
+ "eval_logps": -99.09272766113281,
247
+ "eval_loss": 0.6925813555717468,
248
+ "eval_objective": 0.6773233413696289,
249
+ "eval_ranking_idealized": 0.6045548915863037,
250
+ "eval_ranking_idealized_expo": 0.5279502868652344,
251
+ "eval_ranking_simple": 0.5734989643096924,
252
+ "eval_regularize": 0.6773233413696289,
253
+ "eval_runtime": 316.8112,
254
+ "eval_samples_per_second": 18.276,
255
+ "eval_steps_per_second": 1.525,
256
+ "step": 350
257
+ },
258
+ {
259
+ "dpo_loss": 0.5010849237442017,
260
+ "dpo_wo_beta": -2.259007453918457,
261
+ "epoch": 0.5668398677373642,
262
+ "grad_norm": 15.355707906669174,
263
+ "learning_rate": 4.880557476860893e-06,
264
+ "logits": -2.343498468399048,
265
+ "logps": -100.16142272949219,
266
+ "loss": 0.4963,
267
+ "objective": 0.5010849237442017,
268
+ "ranking_idealized": 0.6200000047683716,
269
+ "ranking_idealized_expo": 0.5199999809265137,
270
+ "ranking_simple": 0.6883333325386047,
271
+ "regularize": 0.5010849237442017,
272
+ "step": 400
273
+ },
274
+ {
275
+ "epoch": 0.5668398677373642,
276
+ "eval_dpo_loss": 0.6835893392562866,
277
+ "eval_dpo_wo_beta": -2.691314697265625,
278
+ "eval_logits": -2.250782012939453,
279
+ "eval_logps": -106.707275390625,
280
+ "eval_loss": 0.6914249062538147,
281
+ "eval_objective": 0.6835893392562866,
282
+ "eval_ranking_idealized": 0.6045548915863037,
283
+ "eval_ranking_idealized_expo": 0.5279502868652344,
284
+ "eval_ranking_simple": 0.567287802696228,
285
+ "eval_regularize": 0.6835893392562866,
286
+ "eval_runtime": 320.5789,
287
+ "eval_samples_per_second": 18.061,
288
+ "eval_steps_per_second": 1.507,
289
+ "step": 400
290
+ },
291
+ {
292
+ "dpo_loss": 0.47148290276527405,
293
+ "dpo_wo_beta": -2.062403678894043,
294
+ "epoch": 0.6376948512045347,
295
+ "grad_norm": 19.132752913050712,
296
+ "learning_rate": 4.80950135772933e-06,
297
+ "logits": -2.2268483638763428,
298
+ "logps": -102.33979034423828,
299
+ "loss": 0.4745,
300
+ "objective": 0.47148290276527405,
301
+ "ranking_idealized": 0.5916666388511658,
302
+ "ranking_idealized_expo": 0.4933333396911621,
303
+ "ranking_simple": 0.675000011920929,
304
+ "regularize": 0.47148290276527405,
305
+ "step": 450
306
+ },
307
+ {
308
+ "epoch": 0.6376948512045347,
309
+ "eval_dpo_loss": 0.681454062461853,
310
+ "eval_dpo_wo_beta": -2.673811912536621,
311
+ "eval_logits": -2.2347095012664795,
312
+ "eval_logps": -105.86691284179688,
313
+ "eval_loss": 0.6937749981880188,
314
+ "eval_objective": 0.681454062461853,
315
+ "eval_ranking_idealized": 0.6045548915863037,
316
+ "eval_ranking_idealized_expo": 0.5279502868652344,
317
+ "eval_ranking_simple": 0.5631470084190369,
318
+ "eval_regularize": 0.681454062461853,
319
+ "eval_runtime": 319.8507,
320
+ "eval_samples_per_second": 18.102,
321
+ "eval_steps_per_second": 1.51,
322
+ "step": 450
323
+ },
324
+ {
325
+ "dpo_loss": 0.4877893626689911,
326
+ "dpo_wo_beta": -2.4107022285461426,
327
+ "epoch": 0.7085498346717053,
328
+ "grad_norm": 18.950528232263615,
329
+ "learning_rate": 4.72271867624463e-06,
330
+ "logits": -2.197690963745117,
331
+ "logps": -101.995361328125,
332
+ "loss": 0.4867,
333
+ "objective": 0.4877893626689911,
334
+ "ranking_idealized": 0.5933333039283752,
335
+ "ranking_idealized_expo": 0.5216666460037231,
336
+ "ranking_simple": 0.699999988079071,
337
+ "regularize": 0.4877893626689911,
338
+ "step": 500
339
+ },
340
+ {
341
+ "epoch": 0.7085498346717053,
342
+ "eval_dpo_loss": 0.6994954347610474,
343
+ "eval_dpo_wo_beta": -2.7257001399993896,
344
+ "eval_logits": -2.2181758880615234,
345
+ "eval_logps": -105.1847915649414,
346
+ "eval_loss": 0.7040360569953918,
347
+ "eval_objective": 0.6994954347610474,
348
+ "eval_ranking_idealized": 0.6045548915863037,
349
+ "eval_ranking_idealized_expo": 0.5279502868652344,
350
+ "eval_ranking_simple": 0.5507246255874634,
351
+ "eval_regularize": 0.6994954347610474,
352
+ "eval_runtime": 315.9927,
353
+ "eval_samples_per_second": 18.323,
354
+ "eval_steps_per_second": 1.529,
355
+ "step": 500
356
+ },
357
+ {
358
+ "dpo_loss": 0.4837046265602112,
359
+ "dpo_wo_beta": -2.136967897415161,
360
+ "epoch": 0.7794048181388757,
361
+ "grad_norm": 17.007084301353213,
362
+ "learning_rate": 4.620800379559508e-06,
363
+ "logits": -2.328810214996338,
364
+ "logps": -102.86935424804688,
365
+ "loss": 0.4582,
366
+ "objective": 0.4837046265602112,
367
+ "ranking_idealized": 0.5716666579246521,
368
+ "ranking_idealized_expo": 0.47833332419395447,
369
+ "ranking_simple": 0.6866666674613953,
370
+ "regularize": 0.4837046265602112,
371
+ "step": 550
372
+ },
373
+ {
374
+ "epoch": 0.7794048181388757,
375
+ "eval_dpo_loss": 0.7027432918548584,
376
+ "eval_dpo_wo_beta": -3.1023459434509277,
377
+ "eval_logits": -2.3854899406433105,
378
+ "eval_logps": -102.664306640625,
379
+ "eval_loss": 0.6995241045951843,
380
+ "eval_objective": 0.7027432918548584,
381
+ "eval_ranking_idealized": 0.6045548915863037,
382
+ "eval_ranking_idealized_expo": 0.5279502868652344,
383
+ "eval_ranking_simple": 0.5683229565620422,
384
+ "eval_regularize": 0.7027432918548584,
385
+ "eval_runtime": 317.401,
386
+ "eval_samples_per_second": 18.242,
387
+ "eval_steps_per_second": 1.522,
388
+ "step": 550
389
+ },
390
+ {
391
+ "dpo_loss": 0.42498964071273804,
392
+ "dpo_wo_beta": -1.8326289653778076,
393
+ "epoch": 0.8502598016060463,
394
+ "grad_norm": 15.283086339392508,
395
+ "learning_rate": 4.504440480882651e-06,
396
+ "logits": -2.4975786209106445,
397
+ "logps": -99.132568359375,
398
+ "loss": 0.4339,
399
+ "objective": 0.42498964071273804,
400
+ "ranking_idealized": 0.5350000262260437,
401
+ "ranking_idealized_expo": 0.4749999940395355,
402
+ "ranking_simple": 0.7083333134651184,
403
+ "regularize": 0.42498964071273804,
404
+ "step": 600
405
+ },
406
+ {
407
+ "epoch": 0.8502598016060463,
408
+ "eval_dpo_loss": 0.7050178647041321,
409
+ "eval_dpo_wo_beta": -3.216639995574951,
410
+ "eval_logits": -2.445580005645752,
411
+ "eval_logps": -103.54557037353516,
412
+ "eval_loss": 0.6964531540870667,
413
+ "eval_objective": 0.7050178647041321,
414
+ "eval_ranking_idealized": 0.6045548915863037,
415
+ "eval_ranking_idealized_expo": 0.5279502868652344,
416
+ "eval_ranking_simple": 0.5734989643096924,
417
+ "eval_regularize": 0.7050178647041321,
418
+ "eval_runtime": 317.5725,
419
+ "eval_samples_per_second": 18.232,
420
+ "eval_steps_per_second": 1.521,
421
+ "step": 600
422
+ },
423
+ {
424
+ "epoch": 0.8502598016060463,
425
+ "step": 600,
426
+ "total_flos": 0.0,
427
+ "train_loss": 0.15444423039754232,
428
+ "train_runtime": 3628.8607,
429
+ "train_samples_per_second": 41.998,
430
+ "train_steps_per_second": 0.583
431
+ }
432
+ ],
433
+ "logging_steps": 50,
434
+ "max_steps": 2115,
435
+ "num_input_tokens_seen": 0,
436
+ "num_train_epochs": 3,
437
+ "save_steps": 50,
438
+ "stateful_callbacks": {
439
+ "EarlyStoppingCallback": {
440
+ "args": {
441
+ "early_stopping_patience": 4,
442
+ "early_stopping_threshold": 0.0
443
+ },
444
+ "attributes": {
445
+ "early_stopping_patience_counter": 0
446
+ }
447
+ },
448
+ "TrainerControl": {
449
+ "args": {
450
+ "should_epoch_stop": false,
451
+ "should_evaluate": false,
452
+ "should_log": false,
453
+ "should_save": true,
454
+ "should_training_stop": true
455
+ },
456
+ "attributes": {}
457
+ }
458
+ },
459
+ "total_flos": 0.0,
460
+ "train_batch_size": 2,
461
+ "trial_name": null,
462
+ "trial_params": null
463
+ }