hZzy commited on
Commit
073e9a9
·
verified ·
1 Parent(s): 90513ed

Model save

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
4
+ tags:
5
+ - trl
6
+ - expo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: qwen2.5-0.5b-expo-L2EXPO-ES-10
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/akswogl4)
17
+ # qwen2.5-0.5b-expo-L2EXPO-ES-10
18
+
19
+ This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 49.4271
22
+ - Logps: -79.4245
23
+ - Logits: -0.4875
24
+ - Objective: 49.7428
25
+ - Dpo Loss: 25.2219
26
+ - Regularize: 49.7428
27
+ - Ranking Simple: 0.5259
28
+ - Ranking Idealized: 0.5212
29
+ - Ranking Idealized Expo: 0.5212
30
+ - Wo Beta: 14.1923
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 5e-06
50
+ - train_batch_size: 4
51
+ - eval_batch_size: 4
52
+ - seed: 42
53
+ - distributed_type: multi-GPU
54
+ - num_devices: 3
55
+ - gradient_accumulation_steps: 12
56
+ - total_train_batch_size: 144
57
+ - total_eval_batch_size: 12
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 5
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Dpo Loss | Logits | Logps | Validation Loss | Objective | Ranking Idealized | Ranking Idealized Expo | Ranking Simple | Regularize | Wo Beta |
66
+ |:-------------:|:------:|:----:|:--------:|:-------:|:--------:|:---------------:|:---------:|:-----------------:|:----------------------:|:--------------:|:----------:|:-------:|
67
+ | 4.4544 | 0.1417 | 50 | 3.0769 | -1.4000 | -90.0695 | 6.1132 | 6.2354 | 0.5212 | 0.5212 | 0.5243 | 6.2354 | 16.0705 |
68
+ | 17.3779 | 0.2834 | 100 | 7.9374 | -1.3238 | -85.5257 | 16.1760 | 16.0037 | 0.5212 | 0.5212 | 0.5259 | 16.0037 | 15.7780 |
69
+ | 28.1478 | 0.4251 | 150 | 14.7239 | -1.0824 | -82.4808 | 28.7309 | 28.1308 | 0.5212 | 0.5212 | 0.5228 | 28.1308 | 15.4096 |
70
+ | 35.2522 | 0.5668 | 200 | 18.9116 | -0.8236 | -75.2715 | 38.5263 | 37.4366 | 0.5212 | 0.5212 | 0.5295 | 37.4366 | 14.6816 |
71
+ | 37.8556 | 0.7085 | 250 | 22.7495 | -0.6024 | -76.2798 | 44.8164 | 44.5795 | 0.5212 | 0.5212 | 0.5223 | 44.5795 | 14.3182 |
72
+ | 36.0351 | 0.8503 | 300 | 22.1457 | -0.7057 | -79.1833 | 44.3831 | 43.8777 | 0.5212 | 0.5212 | 0.5254 | 43.8777 | 14.2675 |
73
+ | 32.9882 | 0.9920 | 350 | 23.0098 | -0.6345 | -80.3166 | 46.6946 | 45.5953 | 0.5212 | 0.5212 | 0.5248 | 45.5953 | 14.1690 |
74
+ | 30.7247 | 1.1337 | 400 | 48.3805 | -82.4111| -0.4810 | 48.0656 | 24.6183 | 48.0656 | 0.5166 | 0.5212 | 0.5212 | 14.1059 |
75
+ | 29.6491 | 1.2754 | 450 | 48.5237 | -81.5285| -0.5861 | 48.8411 | 24.9495 | 48.8411 | 0.5243 | 0.5212 | 0.5212 | 14.4793 |
76
+ | 28.3933 | 1.4171 | 500 | 47.8150 | -79.8843| -0.5585 | 47.9210 | 24.8156 | 47.9210 | 0.5212 | 0.5212 | 0.5212 | 14.3458 |
77
+ | 26.3026 | 1.5588 | 550 | 48.0081 | -79.5567| -0.5594 | 48.2215 | 24.4583 | 48.2215 | 0.5228 | 0.5212 | 0.5212 | 14.1587 |
78
+ | 25.1162 | 1.7005 | 600 | 49.4271 | -79.4245| -0.4875 | 49.7428 | 25.2219 | 49.7428 | 0.5259 | 0.5212 | 0.5212 | 14.1923 |
79
+
80
+
81
+ ### Framework versions
82
+
83
+ - Transformers 4.42.0
84
+ - Pytorch 2.3.0+cu121
85
+ - Datasets 3.2.0
86
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.7005196032120926,
3
+ "total_flos": 0.0,
4
+ "train_loss": 11.682157999674478,
5
+ "train_runtime": 6847.3936,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 37.096,
8
+ "train_steps_per_second": 0.257
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151644,
3
+ "eos_token_id": 151645,
4
+ "max_new_tokens": 2048,
5
+ "pad_token_id": 151645,
6
+ "transformers_version": "4.42.0"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:885880a0f693d78bc61596df26ec0b3de297265ae8a05a136be87c747080c3a1
3
  size 1975192208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d43689d4e962884458db5e4b5c53982d6fb23fdd3781e0051ade3d98446dcc
3
  size 1975192208
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.7005196032120926,
3
+ "total_flos": 0.0,
4
+ "train_loss": 11.682157999674478,
5
+ "train_runtime": 6847.3936,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 37.096,
8
+ "train_steps_per_second": 0.257
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5295031070709229,
3
+ "best_model_checkpoint": "./qwen2.5-0.5b/qwen2.5-0.5b-expo-L2EXPO-ES-10/checkpoint-200",
4
+ "epoch": 1.7005196032120926,
5
+ "eval_steps": 50,
6
+ "global_step": 600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "dpo_loss": 0.6931471824645996,
13
+ "epoch": 0.002834199338686821,
14
+ "grad_norm": 3688.5083619666543,
15
+ "learning_rate": 2.840909090909091e-08,
16
+ "logits": -1.359458565711975,
17
+ "logps": -84.69721221923828,
18
+ "loss": 0.3913,
19
+ "objective": 0.3618059456348419,
20
+ "ranking_idealized": 0.5833333134651184,
21
+ "ranking_idealized_expo": 0.5833333134651184,
22
+ "ranking_simple": 0.5833333134651184,
23
+ "regularize": 0.3618059456348419,
24
+ "step": 1,
25
+ "wo_beta": 14.830931663513184
26
+ },
27
+ {
28
+ "dpo_loss": 2.238539934158325,
29
+ "epoch": 0.14170996693434104,
30
+ "grad_norm": 3276.735561173721,
31
+ "learning_rate": 1.4204545454545458e-06,
32
+ "logits": -1.448372483253479,
33
+ "logps": -84.56832885742188,
34
+ "loss": 4.4544,
35
+ "objective": 4.151083946228027,
36
+ "ranking_idealized": 0.5225340127944946,
37
+ "ranking_idealized_expo": 0.5216836929321289,
38
+ "ranking_simple": 0.5199829936027527,
39
+ "regularize": 4.151083946228027,
40
+ "step": 50,
41
+ "wo_beta": 15.634044647216797
42
+ },
43
+ {
44
+ "epoch": 0.14170996693434104,
45
+ "eval_dpo_loss": 3.076866626739502,
46
+ "eval_logits": -1.4000310897827148,
47
+ "eval_logps": -90.06948852539062,
48
+ "eval_loss": 6.113166332244873,
49
+ "eval_objective": 6.235374927520752,
50
+ "eval_ranking_idealized": 0.5212215185165405,
51
+ "eval_ranking_idealized_expo": 0.5212215185165405,
52
+ "eval_ranking_simple": 0.5243270993232727,
53
+ "eval_regularize": 6.235374927520752,
54
+ "eval_runtime": 307.8342,
55
+ "eval_samples_per_second": 18.809,
56
+ "eval_steps_per_second": 1.569,
57
+ "eval_wo_beta": 16.07046890258789,
58
+ "step": 50
59
+ },
60
+ {
61
+ "dpo_loss": 8.729989051818848,
62
+ "epoch": 0.2834199338686821,
63
+ "grad_norm": 2869.8187087544343,
64
+ "learning_rate": 2.8409090909090916e-06,
65
+ "logits": -1.3595805168151855,
66
+ "logps": -82.20881652832031,
67
+ "loss": 17.3779,
68
+ "objective": 18.13115882873535,
69
+ "ranking_idealized": 0.5141666531562805,
70
+ "ranking_idealized_expo": 0.5137500166893005,
71
+ "ranking_simple": 0.5199999809265137,
72
+ "regularize": 18.13115882873535,
73
+ "step": 100,
74
+ "wo_beta": 15.266292572021484
75
+ },
76
+ {
77
+ "epoch": 0.2834199338686821,
78
+ "eval_dpo_loss": 7.9374237060546875,
79
+ "eval_logits": -1.3237512111663818,
80
+ "eval_logps": -85.5257339477539,
81
+ "eval_loss": 16.1760196685791,
82
+ "eval_objective": 16.003671646118164,
83
+ "eval_ranking_idealized": 0.5212215185165405,
84
+ "eval_ranking_idealized_expo": 0.5212215185165405,
85
+ "eval_ranking_simple": 0.5258799195289612,
86
+ "eval_regularize": 16.003671646118164,
87
+ "eval_runtime": 307.1481,
88
+ "eval_samples_per_second": 18.851,
89
+ "eval_steps_per_second": 1.573,
90
+ "eval_wo_beta": 15.778022766113281,
91
+ "step": 100
92
+ },
93
+ {
94
+ "dpo_loss": 14.852143287658691,
95
+ "epoch": 0.42512990080302315,
96
+ "grad_norm": 2245.895525757807,
97
+ "learning_rate": 4.2613636363636365e-06,
98
+ "logits": -1.168060302734375,
99
+ "logps": -79.34931945800781,
100
+ "loss": 28.1478,
101
+ "objective": 28.49669647216797,
102
+ "ranking_idealized": 0.5287500023841858,
103
+ "ranking_idealized_expo": 0.527916669845581,
104
+ "ranking_simple": 0.5287500023841858,
105
+ "regularize": 28.49669647216797,
106
+ "step": 150,
107
+ "wo_beta": 15.188308715820312
108
+ },
109
+ {
110
+ "epoch": 0.42512990080302315,
111
+ "eval_dpo_loss": 14.723884582519531,
112
+ "eval_logits": -1.0823547840118408,
113
+ "eval_logps": -82.48079681396484,
114
+ "eval_loss": 28.730850219726562,
115
+ "eval_objective": 28.130752563476562,
116
+ "eval_ranking_idealized": 0.5212215185165405,
117
+ "eval_ranking_idealized_expo": 0.5212215185165405,
118
+ "eval_ranking_simple": 0.522774338722229,
119
+ "eval_regularize": 28.130752563476562,
120
+ "eval_runtime": 307.3658,
121
+ "eval_samples_per_second": 18.837,
122
+ "eval_steps_per_second": 1.571,
123
+ "eval_wo_beta": 15.409582138061523,
124
+ "step": 150
125
+ },
126
+ {
127
+ "dpo_loss": 17.784215927124023,
128
+ "epoch": 0.5668398677373642,
129
+ "grad_norm": 1921.2331717681436,
130
+ "learning_rate": 4.997168347957521e-06,
131
+ "logits": -0.9321090579032898,
132
+ "logps": -74.18836212158203,
133
+ "loss": 35.2522,
134
+ "objective": 35.61735534667969,
135
+ "ranking_idealized": 0.51583331823349,
136
+ "ranking_idealized_expo": 0.51541668176651,
137
+ "ranking_simple": 0.5066666603088379,
138
+ "regularize": 35.61735534667969,
139
+ "step": 200,
140
+ "wo_beta": 15.46297550201416
141
+ },
142
+ {
143
+ "epoch": 0.5668398677373642,
144
+ "eval_dpo_loss": 18.911605834960938,
145
+ "eval_logits": -0.8236067295074463,
146
+ "eval_logps": -75.27147674560547,
147
+ "eval_loss": 38.52634048461914,
148
+ "eval_objective": 37.43656921386719,
149
+ "eval_ranking_idealized": 0.5212215185165405,
150
+ "eval_ranking_idealized_expo": 0.5212215185165405,
151
+ "eval_ranking_simple": 0.5295031070709229,
152
+ "eval_regularize": 37.43656921386719,
153
+ "eval_runtime": 307.428,
154
+ "eval_samples_per_second": 18.834,
155
+ "eval_steps_per_second": 1.571,
156
+ "eval_wo_beta": 14.68159294128418,
157
+ "step": 200
158
+ },
159
+ {
160
+ "dpo_loss": 19.207956314086914,
161
+ "epoch": 0.7085498346717053,
162
+ "grad_norm": 1961.1411038553429,
163
+ "learning_rate": 4.973122855144066e-06,
164
+ "logits": -0.7678465843200684,
165
+ "logps": -71.71759033203125,
166
+ "loss": 37.8556,
167
+ "objective": 38.2454833984375,
168
+ "ranking_idealized": 0.5166666507720947,
169
+ "ranking_idealized_expo": 0.5162500143051147,
170
+ "ranking_simple": 0.5183333158493042,
171
+ "regularize": 38.2454833984375,
172
+ "step": 250,
173
+ "wo_beta": 15.593232154846191
174
+ },
175
+ {
176
+ "epoch": 0.7085498346717053,
177
+ "eval_dpo_loss": 22.749528884887695,
178
+ "eval_logits": -0.602376401424408,
179
+ "eval_logps": -76.27977752685547,
180
+ "eval_loss": 44.81644821166992,
181
+ "eval_objective": 44.5794677734375,
182
+ "eval_ranking_idealized": 0.5212215185165405,
183
+ "eval_ranking_idealized_expo": 0.5212215185165405,
184
+ "eval_ranking_simple": 0.5222567319869995,
185
+ "eval_regularize": 44.5794677734375,
186
+ "eval_runtime": 309.9732,
187
+ "eval_samples_per_second": 18.679,
188
+ "eval_steps_per_second": 1.558,
189
+ "eval_wo_beta": 14.318175315856934,
190
+ "step": 250
191
+ },
192
+ {
193
+ "dpo_loss": 18.925874710083008,
194
+ "epoch": 0.8502598016060463,
195
+ "grad_norm": 1894.7438599816405,
196
+ "learning_rate": 4.924776641419513e-06,
197
+ "logits": -0.6545333862304688,
198
+ "logps": -74.65443420410156,
199
+ "loss": 36.0351,
200
+ "objective": 36.791263580322266,
201
+ "ranking_idealized": 0.4962500035762787,
202
+ "ranking_idealized_expo": 0.4950000047683716,
203
+ "ranking_simple": 0.5074999928474426,
204
+ "regularize": 36.791263580322266,
205
+ "step": 300,
206
+ "wo_beta": 15.123795509338379
207
+ },
208
+ {
209
+ "epoch": 0.8502598016060463,
210
+ "eval_dpo_loss": 22.145748138427734,
211
+ "eval_logits": -0.7056564688682556,
212
+ "eval_logps": -79.1832504272461,
213
+ "eval_loss": 44.38310623168945,
214
+ "eval_objective": 43.87770462036133,
215
+ "eval_ranking_idealized": 0.5212215185165405,
216
+ "eval_ranking_idealized_expo": 0.5212215185165405,
217
+ "eval_ranking_simple": 0.5253623127937317,
218
+ "eval_regularize": 43.87770462036133,
219
+ "eval_runtime": 307.3056,
220
+ "eval_samples_per_second": 18.841,
221
+ "eval_steps_per_second": 1.572,
222
+ "eval_wo_beta": 14.267467498779297,
223
+ "step": 300
224
+ },
225
+ {
226
+ "dpo_loss": 16.664411544799805,
227
+ "epoch": 0.9919697685403873,
228
+ "grad_norm": 1652.397965840821,
229
+ "learning_rate": 4.8526047530778175e-06,
230
+ "logits": -0.6539748907089233,
231
+ "logps": -77.53305053710938,
232
+ "loss": 32.9882,
233
+ "objective": 31.92953109741211,
234
+ "ranking_idealized": 0.5262500047683716,
235
+ "ranking_idealized_expo": 0.5254166722297668,
236
+ "ranking_simple": 0.5174999833106995,
237
+ "regularize": 31.92953109741211,
238
+ "step": 350,
239
+ "wo_beta": 15.103469848632812
240
+ },
241
+ {
242
+ "epoch": 0.9919697685403873,
243
+ "eval_dpo_loss": 23.009784698486328,
244
+ "eval_logits": -0.6344824433326721,
245
+ "eval_logps": -80.31657409667969,
246
+ "eval_loss": 46.694580078125,
247
+ "eval_objective": 45.59531021118164,
248
+ "eval_ranking_idealized": 0.5212215185165405,
249
+ "eval_ranking_idealized_expo": 0.5212215185165405,
250
+ "eval_ranking_simple": 0.5248447060585022,
251
+ "eval_regularize": 45.59531021118164,
252
+ "eval_runtime": 307.1817,
253
+ "eval_samples_per_second": 18.849,
254
+ "eval_steps_per_second": 1.572,
255
+ "eval_wo_beta": 14.169025421142578,
256
+ "step": 350
257
+ },
258
+ {
259
+ "dpo_loss": 15.38097858428955,
260
+ "epoch": 1.1336797354747283,
261
+ "grad_norm": 1767.7891148738722,
262
+ "learning_rate": 4.757316345716554e-06,
263
+ "logits": -0.5037474036216736,
264
+ "logps": -77.35700225830078,
265
+ "loss": 30.7247,
266
+ "objective": 30.997066497802734,
267
+ "ranking_idealized": 0.5333333611488342,
268
+ "ranking_idealized_expo": 0.5320833325386047,
269
+ "ranking_simple": 0.5258333086967468,
270
+ "regularize": 30.997066497802734,
271
+ "step": 400,
272
+ "wo_beta": 15.239886283874512
273
+ },
274
+ {
275
+ "epoch": 1.1336797354747283,
276
+ "eval_dpo_loss": 24.61825180053711,
277
+ "eval_logits": -0.4809892475605011,
278
+ "eval_logps": -82.41106414794922,
279
+ "eval_loss": 48.38045120239258,
280
+ "eval_objective": 48.06561279296875,
281
+ "eval_ranking_idealized": 0.5212215185165405,
282
+ "eval_ranking_idealized_expo": 0.5212215185165405,
283
+ "eval_ranking_simple": 0.5165631175041199,
284
+ "eval_regularize": 48.06561279296875,
285
+ "eval_runtime": 307.5308,
286
+ "eval_samples_per_second": 18.827,
287
+ "eval_steps_per_second": 1.571,
288
+ "eval_wo_beta": 14.105908393859863,
289
+ "step": 400
290
+ },
291
+ {
292
+ "dpo_loss": 15.528545379638672,
293
+ "epoch": 1.2753897024090695,
294
+ "grad_norm": 1599.4345499147116,
295
+ "learning_rate": 4.639847716126855e-06,
296
+ "logits": -0.4860953390598297,
297
+ "logps": -78.5374755859375,
298
+ "loss": 29.6491,
299
+ "objective": 30.698896408081055,
300
+ "ranking_idealized": 0.5195833444595337,
301
+ "ranking_idealized_expo": 0.5191666483879089,
302
+ "ranking_simple": 0.5208333134651184,
303
+ "regularize": 30.698896408081055,
304
+ "step": 450,
305
+ "wo_beta": 15.879746437072754
306
+ },
307
+ {
308
+ "epoch": 1.2753897024090695,
309
+ "eval_dpo_loss": 24.949462890625,
310
+ "eval_logits": -0.5861236453056335,
311
+ "eval_logps": -81.5285415649414,
312
+ "eval_loss": 48.523677825927734,
313
+ "eval_objective": 48.84111022949219,
314
+ "eval_ranking_idealized": 0.5212215185165405,
315
+ "eval_ranking_idealized_expo": 0.5212215185165405,
316
+ "eval_ranking_simple": 0.5243270993232727,
317
+ "eval_regularize": 48.84111022949219,
318
+ "eval_runtime": 311.2891,
319
+ "eval_samples_per_second": 18.6,
320
+ "eval_steps_per_second": 1.552,
321
+ "eval_wo_beta": 14.479339599609375,
322
+ "step": 450
323
+ },
324
+ {
325
+ "dpo_loss": 14.001568794250488,
326
+ "epoch": 1.4170996693434104,
327
+ "grad_norm": 1563.7145297724219,
328
+ "learning_rate": 4.501353102310901e-06,
329
+ "logits": -0.5200133323669434,
330
+ "logps": -76.25775909423828,
331
+ "loss": 28.3933,
332
+ "objective": 28.495651245117188,
333
+ "ranking_idealized": 0.49791666865348816,
334
+ "ranking_idealized_expo": 0.4970833361148834,
335
+ "ranking_simple": 0.5062500238418579,
336
+ "regularize": 28.495651245117188,
337
+ "step": 500,
338
+ "wo_beta": 15.288237571716309
339
+ },
340
+ {
341
+ "epoch": 1.4170996693434104,
342
+ "eval_dpo_loss": 24.81561851501465,
343
+ "eval_logits": -0.5585454106330872,
344
+ "eval_logps": -79.88430786132812,
345
+ "eval_loss": 47.81501388549805,
346
+ "eval_objective": 47.92095947265625,
347
+ "eval_ranking_idealized": 0.5212215185165405,
348
+ "eval_ranking_idealized_expo": 0.5212215185165405,
349
+ "eval_ranking_simple": 0.5212215185165405,
350
+ "eval_regularize": 47.92095947265625,
351
+ "eval_runtime": 307.162,
352
+ "eval_samples_per_second": 18.85,
353
+ "eval_steps_per_second": 1.572,
354
+ "eval_wo_beta": 14.345782279968262,
355
+ "step": 500
356
+ },
357
+ {
358
+ "dpo_loss": 13.645363807678223,
359
+ "epoch": 1.5588096362777515,
360
+ "grad_norm": 1564.1500708905278,
361
+ "learning_rate": 4.34319334202531e-06,
362
+ "logits": -0.5150942802429199,
363
+ "logps": -77.01350402832031,
364
+ "loss": 26.3026,
365
+ "objective": 26.619535446166992,
366
+ "ranking_idealized": 0.5112500190734863,
367
+ "ranking_idealized_expo": 0.5104166865348816,
368
+ "ranking_simple": 0.5120833516120911,
369
+ "regularize": 26.619535446166992,
370
+ "step": 550,
371
+ "wo_beta": 14.997577667236328
372
+ },
373
+ {
374
+ "epoch": 1.5588096362777515,
375
+ "eval_dpo_loss": 24.458288192749023,
376
+ "eval_logits": -0.5593612194061279,
377
+ "eval_logps": -79.55674743652344,
378
+ "eval_loss": 48.00811767578125,
379
+ "eval_objective": 48.22153854370117,
380
+ "eval_ranking_idealized": 0.5212215185165405,
381
+ "eval_ranking_idealized_expo": 0.5212215185165405,
382
+ "eval_ranking_simple": 0.522774338722229,
383
+ "eval_regularize": 48.22153854370117,
384
+ "eval_runtime": 307.4518,
385
+ "eval_samples_per_second": 18.832,
386
+ "eval_steps_per_second": 1.571,
387
+ "eval_wo_beta": 14.15873908996582,
388
+ "step": 550
389
+ },
390
+ {
391
+ "dpo_loss": 13.081768989562988,
392
+ "epoch": 1.7005196032120926,
393
+ "grad_norm": 1567.2994510773917,
394
+ "learning_rate": 4.16692250129073e-06,
395
+ "logits": -0.4002828598022461,
396
+ "logps": -76.4526138305664,
397
+ "loss": 25.1162,
398
+ "objective": 25.69132423400879,
399
+ "ranking_idealized": 0.51541668176651,
400
+ "ranking_idealized_expo": 0.5149999856948853,
401
+ "ranking_simple": 0.5137500166893005,
402
+ "regularize": 25.69132423400879,
403
+ "step": 600,
404
+ "wo_beta": 14.990047454833984
405
+ },
406
+ {
407
+ "epoch": 1.7005196032120926,
408
+ "eval_dpo_loss": 25.221858978271484,
409
+ "eval_logits": -0.4875362813472748,
410
+ "eval_logps": -79.4244613647461,
411
+ "eval_loss": 49.42710494995117,
412
+ "eval_objective": 49.74281311035156,
413
+ "eval_ranking_idealized": 0.5212215185165405,
414
+ "eval_ranking_idealized_expo": 0.5212215185165405,
415
+ "eval_ranking_simple": 0.5258799195289612,
416
+ "eval_regularize": 49.74281311035156,
417
+ "eval_runtime": 311.292,
418
+ "eval_samples_per_second": 18.6,
419
+ "eval_steps_per_second": 1.552,
420
+ "eval_wo_beta": 14.192330360412598,
421
+ "step": 600
422
+ },
423
+ {
424
+ "epoch": 1.7005196032120926,
425
+ "step": 600,
426
+ "total_flos": 0.0,
427
+ "train_loss": 11.682157999674478,
428
+ "train_runtime": 6847.3936,
429
+ "train_samples_per_second": 37.096,
430
+ "train_steps_per_second": 0.257
431
+ }
432
+ ],
433
+ "logging_steps": 50,
434
+ "max_steps": 1760,
435
+ "num_input_tokens_seen": 0,
436
+ "num_train_epochs": 5,
437
+ "save_steps": 50,
438
+ "stateful_callbacks": {
439
+ "EarlyStoppingCallback": {
440
+ "args": {
441
+ "early_stopping_patience": 5,
442
+ "early_stopping_threshold": 0.0
443
+ },
444
+ "attributes": {
445
+ "early_stopping_patience_counter": 0
446
+ }
447
+ },
448
+ "TrainerControl": {
449
+ "args": {
450
+ "should_epoch_stop": false,
451
+ "should_evaluate": false,
452
+ "should_log": false,
453
+ "should_save": true,
454
+ "should_training_stop": true
455
+ },
456
+ "attributes": {}
457
+ }
458
+ },
459
+ "total_flos": 0.0,
460
+ "train_batch_size": 4,
461
+ "trial_name": null,
462
+ "trial_params": null
463
+ }