hZzy commited on
Commit
37857a7
1 Parent(s): a7fe872

Model save

Browse files
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
4
+ tags:
5
+ - trl
6
+ - expo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: qwen2.5-0.5b-expo-L2EXPO-ES-1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/72yqfbdr)
17
+ # qwen2.5-0.5b-expo-L2EXPO-ES-1
18
+
19
+ This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 5.0286
22
+ - Logps: -83.8820
23
+ - Logits: -0.4938
24
+ - Objective: 5.0013
25
+ - Dpo Loss: 2.6194
26
+ - Regularize: 5.0013
27
+ - Ranking Simple: 0.5197
28
+ - Ranking Idealized: 0.5295
29
+ - Ranking Idealized Expo: 0.5212
30
+ - Wo Beta: 14.2504
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 5e-06
50
+ - train_batch_size: 4
51
+ - eval_batch_size: 4
52
+ - seed: 42
53
+ - distributed_type: multi-GPU
54
+ - num_devices: 3
55
+ - gradient_accumulation_steps: 12
56
+ - total_train_batch_size: 144
57
+ - total_eval_batch_size: 12
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 5
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Logps | Logits | Objective | Dpo Loss | Regularize | Ranking Simple | Ranking Idealized | Ranking Idealized Expo | Wo Beta |
66
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:-------:|:---------:|:--------:|:----------:|:--------------:|:-----------------:|:----------------------:|:-------:|
67
+ | 0.6418 | 0.1417 | 50 | 0.7369 | -89.5788 | -1.4384 | 0.7343 | 0.7480 | 0.7343 | 0.5248 | 0.5295 | 0.5212 | 16.0414 |
68
+ | 1.7208 | 0.2834 | 100 | 1.7082 | -87.8064 | -1.3168 | 1.6950 | 1.0867 | 1.6950 | 0.5228 | 0.5295 | 0.5212 | 15.5148 |
69
+ | 2.841 | 0.4251 | 150 | 2.9302 | -83.1791 | -1.1086 | 2.8768 | 1.6352 | 2.8768 | 0.5300 | 0.5295 | 0.5212 | 15.0680 |
70
+ | 3.5072 | 0.5668 | 200 | 4.2317 | -80.2960 | -0.8688 | 4.2210 | 2.3120 | 4.2210 | 0.5155 | 0.5295 | 0.5212 | 14.5319 |
71
+ | 3.7707 | 0.7085 | 250 | 4.3648 | -80.5389 | -0.7639 | 4.3627 | 2.2988 | 4.3627 | 0.5212 | 0.5295 | 0.5212 | 14.5663 |
72
+ | 3.5773 | 0.8503 | 300 | 4.3904 | -83.8565 | -0.5388 | 4.3972 | 2.2955 | 4.3972 | 0.5238 | 0.5295 | 0.5212 | 14.3098 |
73
+ | 3.359 | 0.9920 | 350 | 4.6868 | -82.1212 | -0.5555 | 4.6293 | 2.4176 | 4.6293 | 0.5264 | 0.5295 | 0.5212 | 14.3177 |
74
+ | 3.0892 | 1.1337 | 400 | 4.8991 | -80.1851 | -0.4846 | 4.9208 | 2.5732 | 4.9208 | 0.5238 | 0.5295 | 0.5212 | 14.1271 |
75
+ | 3.001 | 1.2754 | 450 | 4.8651 | -82.0773 | -0.5097 | 4.8038 | 2.4966 | 4.8038 | 0.5233 | 0.5295 | 0.5212 | 14.2309 |
76
+ | 2.8358 | 1.4171 | 500 | 4.8734 | -81.9592 | -0.4937 | 4.8544 | 2.5685 | 4.8544 | 0.5243 | 0.5295 | 0.5212 | 14.2662 |
77
+ | 2.6622 | 1.5588 | 550 | 4.8760 | -81.5020 | -0.5513 | 4.9098 | 2.5441 | 4.9098 | 0.5243 | 0.5295 | 0.5212 | 14.2522 |
78
+ | 2.5417 | 1.7005 | 600 | 5.0324 | -83.9181 | -0.5043 | 5.0251 | 2.5863 | 5.0251 | 0.5259 | 0.5295 | 0.5212 | 14.2325 |
79
+ | 2.435 | 1.8422 | 650 | 5.0286 | -83.8820 | -0.4938 | 5.0013 | 2.6194 | 5.0013 | 0.5197 | 0.5295 | 0.5212 | 14.2504 |
80
+
81
+
82
+ ### Framework versions
83
+
84
+ - Transformers 4.42.0
85
+ - Pytorch 2.3.0+cu121
86
+ - Datasets 2.19.1
87
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.8422295701464337,
3
+ "total_flos": 0.0,
4
+ "train_loss": 2.767508169504312,
5
+ "train_runtime": 17905.6458,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 14.186,
8
+ "train_steps_per_second": 0.098
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151644,
3
+ "eos_token_id": 151645,
4
+ "max_new_tokens": 2048,
5
+ "pad_token_id": 151645,
6
+ "transformers_version": "4.42.0"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44b22b98f3afad888c9dd62ee3d2c5133354c47a8cab6ee2d1f5c1bc460d1ff2
3
  size 1975192208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a7364df93e48c468324c2c61297c1eef8f3a03445af895598cde31920d19d3
3
  size 1975192208
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.8422295701464337,
3
+ "total_flos": 0.0,
4
+ "train_loss": 2.767508169504312,
5
+ "train_runtime": 17905.6458,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 14.186,
8
+ "train_steps_per_second": 0.098
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 14.127137184143066,
3
+ "best_model_checkpoint": "./qwen2.5-0.5b/qwen2.5-0.5b-expo-L2EXPO-ES-1/checkpoint-400",
4
+ "epoch": 1.8422295701464337,
5
+ "eval_steps": 50,
6
+ "global_step": 650,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "dpo_loss": 0.6931471824645996,
13
+ "epoch": 0.002834199338686821,
14
+ "grad_norm": 368.84791774460115,
15
+ "learning_rate": 2.840909090909091e-08,
16
+ "logits": -1.359458565711975,
17
+ "logps": -84.69721221923828,
18
+ "loss": 0.3913,
19
+ "objective": 0.3618059456348419,
20
+ "ranking_idealized": 0.5833333134651184,
21
+ "ranking_idealized_expo": 0.5833333134651184,
22
+ "ranking_simple": 0.5833333134651184,
23
+ "regularize": 0.3618059456348419,
24
+ "step": 1,
25
+ "wo_beta": 14.830931663513184
26
+ },
27
+ {
28
+ "dpo_loss": 0.6998967528343201,
29
+ "epoch": 0.14170996693434104,
30
+ "grad_norm": 396.7221362251878,
31
+ "learning_rate": 1.4204545454545458e-06,
32
+ "logits": -1.4534623622894287,
33
+ "logps": -84.4651870727539,
34
+ "loss": 0.6418,
35
+ "objective": 0.6346251964569092,
36
+ "ranking_idealized": 0.5289115905761719,
37
+ "ranking_idealized_expo": 0.5221088528633118,
38
+ "ranking_simple": 0.5246598720550537,
39
+ "regularize": 0.6346251964569092,
40
+ "step": 50,
41
+ "wo_beta": 15.657191276550293
42
+ },
43
+ {
44
+ "epoch": 0.14170996693434104,
45
+ "eval_dpo_loss": 0.7479657530784607,
46
+ "eval_logits": -1.4384208917617798,
47
+ "eval_logps": -89.57877349853516,
48
+ "eval_loss": 0.7368742823600769,
49
+ "eval_objective": 0.7343389391899109,
50
+ "eval_ranking_idealized": 0.5295031070709229,
51
+ "eval_ranking_idealized_expo": 0.5212215185165405,
52
+ "eval_ranking_simple": 0.5248447060585022,
53
+ "eval_regularize": 0.7343389391899109,
54
+ "eval_runtime": 308.869,
55
+ "eval_samples_per_second": 18.746,
56
+ "eval_steps_per_second": 1.564,
57
+ "eval_wo_beta": 16.0413818359375,
58
+ "step": 50
59
+ },
60
+ {
61
+ "dpo_loss": 0.9691944122314453,
62
+ "epoch": 0.2834199338686821,
63
+ "grad_norm": 273.93971490638415,
64
+ "learning_rate": 2.8409090909090916e-06,
65
+ "logits": -1.3613545894622803,
66
+ "logps": -82.8901596069336,
67
+ "loss": 1.7208,
68
+ "objective": 1.7239964008331299,
69
+ "ranking_idealized": 0.5241666436195374,
70
+ "ranking_idealized_expo": 0.5137500166893005,
71
+ "ranking_simple": 0.5274999737739563,
72
+ "regularize": 1.7239964008331299,
73
+ "step": 100,
74
+ "wo_beta": 15.285738945007324
75
+ },
76
+ {
77
+ "epoch": 0.2834199338686821,
78
+ "eval_dpo_loss": 1.0867407321929932,
79
+ "eval_logits": -1.3167681694030762,
80
+ "eval_logps": -87.80636596679688,
81
+ "eval_loss": 1.7081643342971802,
82
+ "eval_objective": 1.6949896812438965,
83
+ "eval_ranking_idealized": 0.5295031070709229,
84
+ "eval_ranking_idealized_expo": 0.5212215185165405,
85
+ "eval_ranking_simple": 0.522774338722229,
86
+ "eval_regularize": 1.6949896812438965,
87
+ "eval_runtime": 307.8806,
88
+ "eval_samples_per_second": 18.806,
89
+ "eval_steps_per_second": 1.569,
90
+ "eval_wo_beta": 15.514751434326172,
91
+ "step": 100
92
+ },
93
+ {
94
+ "dpo_loss": 1.4132683277130127,
95
+ "epoch": 0.42512990080302315,
96
+ "grad_norm": 224.02387198038068,
97
+ "learning_rate": 4.2613636363636365e-06,
98
+ "logits": -1.1957015991210938,
99
+ "logps": -80.9156723022461,
100
+ "loss": 2.841,
101
+ "objective": 2.8095057010650635,
102
+ "ranking_idealized": 0.5333333611488342,
103
+ "ranking_idealized_expo": 0.527916669845581,
104
+ "ranking_simple": 0.5325000286102295,
105
+ "regularize": 2.8095057010650635,
106
+ "step": 150,
107
+ "wo_beta": 15.042305946350098
108
+ },
109
+ {
110
+ "epoch": 0.42512990080302315,
111
+ "eval_dpo_loss": 1.6352450847625732,
112
+ "eval_logits": -1.1085715293884277,
113
+ "eval_logps": -83.1790771484375,
114
+ "eval_loss": 2.930232048034668,
115
+ "eval_objective": 2.876790761947632,
116
+ "eval_ranking_idealized": 0.5295031070709229,
117
+ "eval_ranking_idealized_expo": 0.5212215185165405,
118
+ "eval_ranking_simple": 0.5300207138061523,
119
+ "eval_regularize": 2.876790761947632,
120
+ "eval_runtime": 311.4753,
121
+ "eval_samples_per_second": 18.589,
122
+ "eval_steps_per_second": 1.551,
123
+ "eval_wo_beta": 15.068045616149902,
124
+ "step": 150
125
+ },
126
+ {
127
+ "dpo_loss": 1.8845717906951904,
128
+ "epoch": 0.5668398677373642,
129
+ "grad_norm": 197.31465488920477,
130
+ "learning_rate": 4.997168347957521e-06,
131
+ "logits": -0.9064983129501343,
132
+ "logps": -77.56580352783203,
133
+ "loss": 3.5072,
134
+ "objective": 3.5702998638153076,
135
+ "ranking_idealized": 0.5204166769981384,
136
+ "ranking_idealized_expo": 0.51541668176651,
137
+ "ranking_simple": 0.5104166865348816,
138
+ "regularize": 3.5702998638153076,
139
+ "step": 200,
140
+ "wo_beta": 15.2503023147583
141
+ },
142
+ {
143
+ "epoch": 0.5668398677373642,
144
+ "eval_dpo_loss": 2.312016725540161,
145
+ "eval_logits": -0.8688302040100098,
146
+ "eval_logps": -80.29598236083984,
147
+ "eval_loss": 4.2316670417785645,
148
+ "eval_objective": 4.220970153808594,
149
+ "eval_ranking_idealized": 0.5295031070709229,
150
+ "eval_ranking_idealized_expo": 0.5212215185165405,
151
+ "eval_ranking_simple": 0.5155279636383057,
152
+ "eval_regularize": 4.220970153808594,
153
+ "eval_runtime": 307.8579,
154
+ "eval_samples_per_second": 18.807,
155
+ "eval_steps_per_second": 1.569,
156
+ "eval_wo_beta": 14.531935691833496,
157
+ "step": 200
158
+ },
159
+ {
160
+ "dpo_loss": 1.959755301475525,
161
+ "epoch": 0.7085498346717053,
162
+ "grad_norm": 180.7425737123104,
163
+ "learning_rate": 4.973122855144066e-06,
164
+ "logits": -0.8095456957817078,
165
+ "logps": -75.73948669433594,
166
+ "loss": 3.7707,
167
+ "objective": 3.773972272872925,
168
+ "ranking_idealized": 0.5249999761581421,
169
+ "ranking_idealized_expo": 0.5162500143051147,
170
+ "ranking_simple": 0.518750011920929,
171
+ "regularize": 3.773972272872925,
172
+ "step": 250,
173
+ "wo_beta": 15.669358253479004
174
+ },
175
+ {
176
+ "epoch": 0.7085498346717053,
177
+ "eval_dpo_loss": 2.2987730503082275,
178
+ "eval_logits": -0.7639057636260986,
179
+ "eval_logps": -80.5389175415039,
180
+ "eval_loss": 4.364786148071289,
181
+ "eval_objective": 4.362744331359863,
182
+ "eval_ranking_idealized": 0.5295031070709229,
183
+ "eval_ranking_idealized_expo": 0.5212215185165405,
184
+ "eval_ranking_simple": 0.5212215185165405,
185
+ "eval_regularize": 4.362744331359863,
186
+ "eval_runtime": 308.0276,
187
+ "eval_samples_per_second": 18.797,
188
+ "eval_steps_per_second": 1.568,
189
+ "eval_wo_beta": 14.566258430480957,
190
+ "step": 250
191
+ },
192
+ {
193
+ "dpo_loss": 1.8572473526000977,
194
+ "epoch": 0.8502598016060463,
195
+ "grad_norm": 169.7352822578473,
196
+ "learning_rate": 4.924776641419513e-06,
197
+ "logits": -0.485324501991272,
198
+ "logps": -79.25447082519531,
199
+ "loss": 3.5773,
200
+ "objective": 3.6851494312286377,
201
+ "ranking_idealized": 0.5062500238418579,
202
+ "ranking_idealized_expo": 0.4950000047683716,
203
+ "ranking_simple": 0.5112500190734863,
204
+ "regularize": 3.6851494312286377,
205
+ "step": 300,
206
+ "wo_beta": 15.083699226379395
207
+ },
208
+ {
209
+ "epoch": 0.8502598016060463,
210
+ "eval_dpo_loss": 2.2955195903778076,
211
+ "eval_logits": -0.5387536883354187,
212
+ "eval_logps": -83.8565444946289,
213
+ "eval_loss": 4.3904218673706055,
214
+ "eval_objective": 4.397186279296875,
215
+ "eval_ranking_idealized": 0.5295031070709229,
216
+ "eval_ranking_idealized_expo": 0.5212215185165405,
217
+ "eval_ranking_simple": 0.523809552192688,
218
+ "eval_regularize": 4.397186279296875,
219
+ "eval_runtime": 308.1076,
220
+ "eval_samples_per_second": 18.792,
221
+ "eval_steps_per_second": 1.568,
222
+ "eval_wo_beta": 14.309849739074707,
223
+ "step": 300
224
+ },
225
+ {
226
+ "dpo_loss": 1.6745129823684692,
227
+ "epoch": 0.9919697685403873,
228
+ "grad_norm": 175.43829028737628,
229
+ "learning_rate": 4.8526047530778175e-06,
230
+ "logits": -0.49194690585136414,
231
+ "logps": -78.66015625,
232
+ "loss": 3.359,
233
+ "objective": 3.3252944946289062,
234
+ "ranking_idealized": 0.5354166626930237,
235
+ "ranking_idealized_expo": 0.5254166722297668,
236
+ "ranking_simple": 0.5391666889190674,
237
+ "regularize": 3.3252944946289062,
238
+ "step": 350,
239
+ "wo_beta": 15.092531204223633
240
+ },
241
+ {
242
+ "epoch": 0.9919697685403873,
243
+ "eval_dpo_loss": 2.417555809020996,
244
+ "eval_logits": -0.555489718914032,
245
+ "eval_logps": -82.1212387084961,
246
+ "eval_loss": 4.6867547035217285,
247
+ "eval_objective": 4.629337787628174,
248
+ "eval_ranking_idealized": 0.5295031070709229,
249
+ "eval_ranking_idealized_expo": 0.5212215185165405,
250
+ "eval_ranking_simple": 0.5263975262641907,
251
+ "eval_regularize": 4.629337787628174,
252
+ "eval_runtime": 307.7738,
253
+ "eval_samples_per_second": 18.813,
254
+ "eval_steps_per_second": 1.569,
255
+ "eval_wo_beta": 14.317716598510742,
256
+ "step": 350
257
+ },
258
+ {
259
+ "dpo_loss": 1.6242923736572266,
260
+ "epoch": 1.1336797354747283,
261
+ "grad_norm": 167.92800627071566,
262
+ "learning_rate": 4.757316345716554e-06,
263
+ "logits": -0.42070272564888,
264
+ "logps": -77.89451599121094,
265
+ "loss": 3.0892,
266
+ "objective": 3.102174997329712,
267
+ "ranking_idealized": 0.5412499904632568,
268
+ "ranking_idealized_expo": 0.5320833325386047,
269
+ "ranking_simple": 0.5333333611488342,
270
+ "regularize": 3.102174997329712,
271
+ "step": 400,
272
+ "wo_beta": 15.249672889709473
273
+ },
274
+ {
275
+ "epoch": 1.1336797354747283,
276
+ "eval_dpo_loss": 2.573197364807129,
277
+ "eval_logits": -0.484580934047699,
278
+ "eval_logps": -80.18506622314453,
279
+ "eval_loss": 4.899093151092529,
280
+ "eval_objective": 4.920805931091309,
281
+ "eval_ranking_idealized": 0.5295031070709229,
282
+ "eval_ranking_idealized_expo": 0.5212215185165405,
283
+ "eval_ranking_simple": 0.523809552192688,
284
+ "eval_regularize": 4.920805931091309,
285
+ "eval_runtime": 307.8724,
286
+ "eval_samples_per_second": 18.806,
287
+ "eval_steps_per_second": 1.569,
288
+ "eval_wo_beta": 14.127137184143066,
289
+ "step": 400
290
+ },
291
+ {
292
+ "dpo_loss": 1.5614336729049683,
293
+ "epoch": 1.2753897024090695,
294
+ "grad_norm": 168.51104996086647,
295
+ "learning_rate": 4.639847716126855e-06,
296
+ "logits": -0.46561330556869507,
297
+ "logps": -76.85901641845703,
298
+ "loss": 3.001,
299
+ "objective": 3.0770418643951416,
300
+ "ranking_idealized": 0.5245833396911621,
301
+ "ranking_idealized_expo": 0.5191666483879089,
302
+ "ranking_simple": 0.5337499976158142,
303
+ "regularize": 3.0770418643951416,
304
+ "step": 450,
305
+ "wo_beta": 15.840205192565918
306
+ },
307
+ {
308
+ "epoch": 1.2753897024090695,
309
+ "eval_dpo_loss": 2.4965932369232178,
310
+ "eval_logits": -0.5097361207008362,
311
+ "eval_logps": -82.07725524902344,
312
+ "eval_loss": 4.865055561065674,
313
+ "eval_objective": 4.8037590980529785,
314
+ "eval_ranking_idealized": 0.5295031070709229,
315
+ "eval_ranking_idealized_expo": 0.5212215185165405,
316
+ "eval_ranking_simple": 0.5232919454574585,
317
+ "eval_regularize": 4.8037590980529785,
318
+ "eval_runtime": 308.1386,
319
+ "eval_samples_per_second": 18.79,
320
+ "eval_steps_per_second": 1.567,
321
+ "eval_wo_beta": 14.230860710144043,
322
+ "step": 450
323
+ },
324
+ {
325
+ "dpo_loss": 1.4661251306533813,
326
+ "epoch": 1.4170996693434104,
327
+ "grad_norm": 153.41700493191905,
328
+ "learning_rate": 4.501353102310901e-06,
329
+ "logits": -0.41387155652046204,
330
+ "logps": -77.91703033447266,
331
+ "loss": 2.8358,
332
+ "objective": 2.893219232559204,
333
+ "ranking_idealized": 0.5054166913032532,
334
+ "ranking_idealized_expo": 0.4970833361148834,
335
+ "ranking_simple": 0.5058333277702332,
336
+ "regularize": 2.893219232559204,
337
+ "step": 500,
338
+ "wo_beta": 15.346451759338379
339
+ },
340
+ {
341
+ "epoch": 1.4170996693434104,
342
+ "eval_dpo_loss": 2.5684561729431152,
343
+ "eval_logits": -0.4937358796596527,
344
+ "eval_logps": -81.95916748046875,
345
+ "eval_loss": 4.873396873474121,
346
+ "eval_objective": 4.854368686676025,
347
+ "eval_ranking_idealized": 0.5295031070709229,
348
+ "eval_ranking_idealized_expo": 0.5212215185165405,
349
+ "eval_ranking_simple": 0.5243270993232727,
350
+ "eval_regularize": 4.854368686676025,
351
+ "eval_runtime": 307.7716,
352
+ "eval_samples_per_second": 18.813,
353
+ "eval_steps_per_second": 1.569,
354
+ "eval_wo_beta": 14.26622486114502,
355
+ "step": 500
356
+ },
357
+ {
358
+ "dpo_loss": 1.3315966129302979,
359
+ "epoch": 1.5588096362777515,
360
+ "grad_norm": 153.30075040252478,
361
+ "learning_rate": 4.34319334202531e-06,
362
+ "logits": -0.4832386374473572,
363
+ "logps": -78.64344787597656,
364
+ "loss": 2.6622,
365
+ "objective": 2.6787972450256348,
366
+ "ranking_idealized": 0.5199999809265137,
367
+ "ranking_idealized_expo": 0.5108333230018616,
368
+ "ranking_simple": 0.5249999761581421,
369
+ "regularize": 2.6787972450256348,
370
+ "step": 550,
371
+ "wo_beta": 15.080702781677246
372
+ },
373
+ {
374
+ "epoch": 1.5588096362777515,
375
+ "eval_dpo_loss": 2.544066905975342,
376
+ "eval_logits": -0.551251232624054,
377
+ "eval_logps": -81.50196075439453,
378
+ "eval_loss": 4.876008033752441,
379
+ "eval_objective": 4.909796714782715,
380
+ "eval_ranking_idealized": 0.5295031070709229,
381
+ "eval_ranking_idealized_expo": 0.5212215185165405,
382
+ "eval_ranking_simple": 0.5243270993232727,
383
+ "eval_regularize": 4.909796714782715,
384
+ "eval_runtime": 307.8096,
385
+ "eval_samples_per_second": 18.81,
386
+ "eval_steps_per_second": 1.569,
387
+ "eval_wo_beta": 14.252228736877441,
388
+ "step": 550
389
+ },
390
+ {
391
+ "dpo_loss": 1.3174678087234497,
392
+ "epoch": 1.7005196032120926,
393
+ "grad_norm": 164.16680167227398,
394
+ "learning_rate": 4.16692250129073e-06,
395
+ "logits": -0.4034644663333893,
396
+ "logps": -80.2961654663086,
397
+ "loss": 2.5417,
398
+ "objective": 2.5419461727142334,
399
+ "ranking_idealized": 0.5220833420753479,
400
+ "ranking_idealized_expo": 0.5149999856948853,
401
+ "ranking_simple": 0.5220833420753479,
402
+ "regularize": 2.5419461727142334,
403
+ "step": 600,
404
+ "wo_beta": 15.080598831176758
405
+ },
406
+ {
407
+ "epoch": 1.7005196032120926,
408
+ "eval_dpo_loss": 2.5863354206085205,
409
+ "eval_logits": -0.5043439269065857,
410
+ "eval_logps": -83.9180908203125,
411
+ "eval_loss": 5.032442092895508,
412
+ "eval_objective": 5.0250935554504395,
413
+ "eval_ranking_idealized": 0.5295031070709229,
414
+ "eval_ranking_idealized_expo": 0.5212215185165405,
415
+ "eval_ranking_simple": 0.5258799195289612,
416
+ "eval_regularize": 5.0250935554504395,
417
+ "eval_runtime": 308.2064,
418
+ "eval_samples_per_second": 18.786,
419
+ "eval_steps_per_second": 1.567,
420
+ "eval_wo_beta": 14.232461929321289,
421
+ "step": 600
422
+ },
423
+ {
424
+ "dpo_loss": 1.3203412294387817,
425
+ "epoch": 1.8422295701464337,
426
+ "grad_norm": 160.7017872607094,
427
+ "learning_rate": 3.974272604254906e-06,
428
+ "logits": -0.48519474267959595,
429
+ "logps": -80.25173950195312,
430
+ "loss": 2.435,
431
+ "objective": 2.4747252464294434,
432
+ "ranking_idealized": 0.5408333539962769,
433
+ "ranking_idealized_expo": 0.527916669845581,
434
+ "ranking_simple": 0.5333333611488342,
435
+ "regularize": 2.4747252464294434,
436
+ "step": 650,
437
+ "wo_beta": 15.727615356445312
438
+ },
439
+ {
440
+ "epoch": 1.8422295701464337,
441
+ "eval_dpo_loss": 2.6193807125091553,
442
+ "eval_logits": -0.49383923411369324,
443
+ "eval_logps": -83.8819808959961,
444
+ "eval_loss": 5.0286407470703125,
445
+ "eval_objective": 5.001297950744629,
446
+ "eval_ranking_idealized": 0.5295031070709229,
447
+ "eval_ranking_idealized_expo": 0.5212215185165405,
448
+ "eval_ranking_simple": 0.5196687579154968,
449
+ "eval_regularize": 5.001297950744629,
450
+ "eval_runtime": 307.7261,
451
+ "eval_samples_per_second": 18.815,
452
+ "eval_steps_per_second": 1.57,
453
+ "eval_wo_beta": 14.250398635864258,
454
+ "step": 650
455
+ },
456
+ {
457
+ "epoch": 1.8422295701464337,
458
+ "step": 650,
459
+ "total_flos": 0.0,
460
+ "train_loss": 2.767508169504312,
461
+ "train_runtime": 17905.6458,
462
+ "train_samples_per_second": 14.186,
463
+ "train_steps_per_second": 0.098
464
+ }
465
+ ],
466
+ "logging_steps": 50,
467
+ "max_steps": 1760,
468
+ "num_input_tokens_seen": 0,
469
+ "num_train_epochs": 5,
470
+ "save_steps": 50,
471
+ "stateful_callbacks": {
472
+ "EarlyStoppingCallback": {
473
+ "args": {
474
+ "early_stopping_patience": 5,
475
+ "early_stopping_threshold": 0.0
476
+ },
477
+ "attributes": {
478
+ "early_stopping_patience_counter": 0
479
+ }
480
+ },
481
+ "TrainerControl": {
482
+ "args": {
483
+ "should_epoch_stop": false,
484
+ "should_evaluate": false,
485
+ "should_log": false,
486
+ "should_save": true,
487
+ "should_training_stop": true
488
+ },
489
+ "attributes": {}
490
+ }
491
+ },
492
+ "total_flos": 0.0,
493
+ "train_batch_size": 4,
494
+ "trial_name": null,
495
+ "trial_params": null
496
+ }