hZzy commited on
Commit
0200f4d
1 Parent(s): a927414

Model save

Browse files
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
4
+ tags:
5
+ - trl
6
+ - expo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: qwen2.5-0.5b-expo-DPO-ES-1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/qhqu3qtk)
17
+ # qwen2.5-0.5b-expo-DPO-ES-1
18
+
19
+ This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 2.3403
22
+ - Logps: -83.2967
23
+ - Logits: -0.7158
24
+ - Objective: 2.2649
25
+ - Dpo Loss: 2.2649
26
+ - Regularize: 2.2649
27
+ - Ranking Simple: 0.5331
28
+ - Ranking Idealized: 0.5295
29
+ - Ranking Idealized Expo: 0.5212
30
+ - Wo Beta: 6.8864
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 5e-06
50
+ - train_batch_size: 4
51
+ - eval_batch_size: 4
52
+ - seed: 42
53
+ - distributed_type: multi-GPU
54
+ - num_devices: 3
55
+ - gradient_accumulation_steps: 12
56
+ - total_train_batch_size: 144
57
+ - total_eval_batch_size: 12
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 5
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Logps | Logits | Objective | Dpo Loss | Regularize | Ranking Simple | Ranking Idealized | Ranking Idealized Expo | Wo Beta |
66
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:-------:|:---------:|:--------:|:----------:|:--------------:|:-----------------:|:----------------------:|:-------:|
67
+ | 0.7017 | 0.1417 | 50 | 0.8470 | -93.0243 | -1.4582 | 0.8570 | 0.8570 | 0.8570 | 0.5238 | 0.5295 | 0.5212 | 7.8507 |
68
+ | 0.8112 | 0.2834 | 100 | 1.0529 | -86.6835 | -1.4382 | 1.0273 | 1.0273 | 1.0273 | 0.5285 | 0.5295 | 0.5212 | 7.4982 |
69
+ | 1.0895 | 0.4251 | 150 | 1.4497 | -84.4337 | -1.2965 | 1.4010 | 1.4010 | 1.4010 | 0.5321 | 0.5295 | 0.5212 | 7.2692 |
70
+ | 1.2363 | 0.5668 | 200 | 1.7035 | -77.7201 | -1.2956 | 1.6116 | 1.6116 | 1.6116 | 0.5321 | 0.5295 | 0.5212 | 7.2264 |
71
+ | 1.3152 | 0.7085 | 250 | 1.9222 | -92.7241 | -1.2565 | 1.8319 | 1.8319 | 1.8319 | 0.5311 | 0.5295 | 0.5212 | 7.1856 |
72
+ | 1.1899 | 0.8503 | 300 | 2.0298 | -90.9373 | -0.9785 | 1.9588 | 1.9588 | 1.9588 | 0.5367 | 0.5295 | 0.5212 | 6.9336 |
73
+ | 1.1443 | 0.9920 | 350 | 2.1654 | -82.1414 | -1.0214 | 2.0541 | 2.0541 | 2.0541 | 0.5435 | 0.5295 | 0.5212 | 7.0024 |
74
+ | 0.725 | 1.1337 | 400 | 2.2884 | -84.2526 | -0.7535 | 2.2360 | 2.2360 | 2.2360 | 0.5336 | 0.5295 | 0.5212 | 7.1525 |
75
+ | 0.7629 | 1.2754 | 450 | 2.1606 | -80.4165 | -0.8866 | 2.0671 | 2.0671 | 2.0671 | 0.5321 | 0.5295 | 0.5212 | 6.7949 |
76
+ | 0.8044 | 1.4171 | 500 | 2.2094 | -82.3927 | -0.7503 | 2.0981 | 2.0981 | 2.0981 | 0.5347 | 0.5295 | 0.5212 | 6.8050 |
77
+ | 0.7105 | 1.5588 | 550 | 2.1697 | -84.9780 | -0.6734 | 2.0733 | 2.0733 | 2.0733 | 0.5321 | 0.5295 | 0.5212 | 6.8722 |
78
+ | 0.6925 | 1.7005 | 600 | 2.1957 | -81.5342 | -0.7411 | 2.0558 | 2.0558 | 2.0558 | 0.5357 | 0.5295 | 0.5212 | 6.7186 |
79
+ | 0.6883 | 1.8422 | 650 | 2.2080 | -82.7303 | -0.6908 | 2.1330 | 2.1330 | 2.1330 | 0.5383 | 0.5295 | 0.5212 | 6.8081 |
80
+ | 0.6486 | 1.9839 | 700 | 2.3243 | -83.2882 | -0.6651 | 2.2471 | 2.2471 | 2.2471 | 0.5378 | 0.5295 | 0.5212 | 6.6815 |
81
+ | 0.3793 | 2.1256 | 750 | 2.2675 | -84.2296 | -0.7879 | 2.1825 | 2.1825 | 2.1825 | 0.5409 | 0.5295 | 0.5212 | 6.8794 |
82
+ | 0.3314 | 2.2674 | 800 | 2.2106 | -84.3675 | -0.6651 | 2.1041 | 2.1041 | 2.1041 | 0.5414 | 0.5295 | 0.5212 | 6.7463 |
83
+ | 0.3301 | 2.4091 | 850 | 2.2964 | -84.8913 | -0.6177 | 2.2221 | 2.2221 | 2.2221 | 0.5388 | 0.5295 | 0.5212 | 6.8020 |
84
+ | 0.3509 | 2.5508 | 900 | 2.2796 | -84.3833 | -0.6097 | 2.2099 | 2.2099 | 2.2099 | 0.5393 | 0.5295 | 0.5212 | 6.7934 |
85
+ | 0.321 | 2.6925 | 950 | 2.3403 | -83.2967 | -0.7158 | 2.2649 | 2.2649 | 2.2649 | 0.5331 | 0.5295 | 0.5212 | 6.8864 |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.42.0
91
+ - Pytorch 2.3.0+cu121
92
+ - Datasets 2.19.1
93
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.69248937175248,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.7490891541932758,
5
+ "train_runtime": 28216.8085,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 9.002,
8
+ "train_steps_per_second": 0.062
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151644,
3
+ "eos_token_id": 151645,
4
+ "max_new_tokens": 2048,
5
+ "pad_token_id": 151645,
6
+ "transformers_version": "4.42.0"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67ea4f637539ff138e995b360f68b133c612f4b6e652f6b3c9366920b7a336c9
3
  size 1975192208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:298052dd057c2cd532a59aca77a07211f0fb5c7cd9cfae8f6f2297e3de96205c
3
  size 1975192208
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.69248937175248,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.7490891541932758,
5
+ "train_runtime": 28216.8085,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 9.002,
8
+ "train_steps_per_second": 0.062
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.681451797485352,
3
+ "best_model_checkpoint": "./qwen2.5-0.5b/qwen2.5-0.5b-expo-DPO-ES-1/checkpoint-700",
4
+ "epoch": 2.69248937175248,
5
+ "eval_steps": 50,
6
+ "global_step": 950,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "dpo_loss": 0.6931471824645996,
13
+ "epoch": 0.002834199338686821,
14
+ "grad_norm": 184.4249864582654,
15
+ "learning_rate": 2.840909090909091e-08,
16
+ "logits": -1.359458565711975,
17
+ "logps": -84.69721221923828,
18
+ "loss": 0.6931,
19
+ "objective": 0.6931471824645996,
20
+ "ranking_idealized": 0.5833333134651184,
21
+ "ranking_idealized_expo": 0.5833333134651184,
22
+ "ranking_simple": 0.5833333134651184,
23
+ "regularize": 0.6931471824645996,
24
+ "step": 1,
25
+ "wo_beta": 5.271125316619873
26
+ },
27
+ {
28
+ "dpo_loss": 0.7105385065078735,
29
+ "epoch": 0.14170996693434104,
30
+ "grad_norm": 184.86918480832395,
31
+ "learning_rate": 1.4204545454545458e-06,
32
+ "logits": -1.4640510082244873,
33
+ "logps": -84.16496276855469,
34
+ "loss": 0.7017,
35
+ "objective": 0.7105385065078735,
36
+ "ranking_idealized": 0.5289115905761719,
37
+ "ranking_idealized_expo": 0.5221088528633118,
38
+ "ranking_simple": 0.5225340127944946,
39
+ "regularize": 0.7105385065078735,
40
+ "step": 50,
41
+ "wo_beta": 7.0693583488464355
42
+ },
43
+ {
44
+ "epoch": 0.14170996693434104,
45
+ "eval_dpo_loss": 0.8569971919059753,
46
+ "eval_logits": -1.4581880569458008,
47
+ "eval_logps": -93.02434539794922,
48
+ "eval_loss": 0.8469977974891663,
49
+ "eval_objective": 0.8569971919059753,
50
+ "eval_ranking_idealized": 0.5295031070709229,
51
+ "eval_ranking_idealized_expo": 0.5212215185165405,
52
+ "eval_ranking_simple": 0.523809552192688,
53
+ "eval_regularize": 0.8569971919059753,
54
+ "eval_runtime": 308.8499,
55
+ "eval_samples_per_second": 18.747,
56
+ "eval_steps_per_second": 1.564,
57
+ "eval_wo_beta": 7.850653171539307,
58
+ "step": 50
59
+ },
60
+ {
61
+ "dpo_loss": 0.804880678653717,
62
+ "epoch": 0.2834199338686821,
63
+ "grad_norm": 165.0032665851806,
64
+ "learning_rate": 2.8409090909090916e-06,
65
+ "logits": -1.454908847808838,
66
+ "logps": -84.21910095214844,
67
+ "loss": 0.8112,
68
+ "objective": 0.804880678653717,
69
+ "ranking_idealized": 0.5241666436195374,
70
+ "ranking_idealized_expo": 0.5137500166893005,
71
+ "ranking_simple": 0.543749988079071,
72
+ "regularize": 0.804880678653717,
73
+ "step": 100,
74
+ "wo_beta": 6.3764801025390625
75
+ },
76
+ {
77
+ "epoch": 0.2834199338686821,
78
+ "eval_dpo_loss": 1.0273478031158447,
79
+ "eval_logits": -1.4382472038269043,
80
+ "eval_logps": -86.68347930908203,
81
+ "eval_loss": 1.0528956651687622,
82
+ "eval_objective": 1.0273478031158447,
83
+ "eval_ranking_idealized": 0.5295031070709229,
84
+ "eval_ranking_idealized_expo": 0.5212215185165405,
85
+ "eval_ranking_simple": 0.5284678936004639,
86
+ "eval_regularize": 1.0273478031158447,
87
+ "eval_runtime": 325.8896,
88
+ "eval_samples_per_second": 17.767,
89
+ "eval_steps_per_second": 1.482,
90
+ "eval_wo_beta": 7.498155117034912,
91
+ "step": 100
92
+ },
93
+ {
94
+ "dpo_loss": 1.0717346668243408,
95
+ "epoch": 0.42512990080302315,
96
+ "grad_norm": 132.12956442526567,
97
+ "learning_rate": 4.2613636363636365e-06,
98
+ "logits": -1.3764687776565552,
99
+ "logps": -75.39175415039062,
100
+ "loss": 1.0895,
101
+ "objective": 1.0717346668243408,
102
+ "ranking_idealized": 0.5333333611488342,
103
+ "ranking_idealized_expo": 0.527916669845581,
104
+ "ranking_simple": 0.5645833611488342,
105
+ "regularize": 1.0717346668243408,
106
+ "step": 150,
107
+ "wo_beta": 6.200186252593994
108
+ },
109
+ {
110
+ "epoch": 0.42512990080302315,
111
+ "eval_dpo_loss": 1.4009853601455688,
112
+ "eval_logits": -1.2964659929275513,
113
+ "eval_logps": -84.43374633789062,
114
+ "eval_loss": 1.4497475624084473,
115
+ "eval_objective": 1.4009853601455688,
116
+ "eval_ranking_idealized": 0.5295031070709229,
117
+ "eval_ranking_idealized_expo": 0.5212215185165405,
118
+ "eval_ranking_simple": 0.5320910811424255,
119
+ "eval_regularize": 1.4009853601455688,
120
+ "eval_runtime": 308.2116,
121
+ "eval_samples_per_second": 18.786,
122
+ "eval_steps_per_second": 1.567,
123
+ "eval_wo_beta": 7.269238471984863,
124
+ "step": 150
125
+ },
126
+ {
127
+ "dpo_loss": 1.2378712892532349,
128
+ "epoch": 0.5668398677373642,
129
+ "grad_norm": 120.89571295715444,
130
+ "learning_rate": 4.997168347957521e-06,
131
+ "logits": -1.3378713130950928,
132
+ "logps": -75.85318756103516,
133
+ "loss": 1.2363,
134
+ "objective": 1.2378712892532349,
135
+ "ranking_idealized": 0.5204166769981384,
136
+ "ranking_idealized_expo": 0.51541668176651,
137
+ "ranking_simple": 0.5699999928474426,
138
+ "regularize": 1.2378712892532349,
139
+ "step": 200,
140
+ "wo_beta": 6.13140869140625
141
+ },
142
+ {
143
+ "epoch": 0.5668398677373642,
144
+ "eval_dpo_loss": 1.6115626096725464,
145
+ "eval_logits": -1.295562505722046,
146
+ "eval_logps": -77.72007751464844,
147
+ "eval_loss": 1.7034852504730225,
148
+ "eval_objective": 1.6115626096725464,
149
+ "eval_ranking_idealized": 0.5295031070709229,
150
+ "eval_ranking_idealized_expo": 0.5212215185165405,
151
+ "eval_ranking_simple": 0.5320910811424255,
152
+ "eval_regularize": 1.6115626096725464,
153
+ "eval_runtime": 334.6734,
154
+ "eval_samples_per_second": 17.3,
155
+ "eval_steps_per_second": 1.443,
156
+ "eval_wo_beta": 7.226394176483154,
157
+ "step": 200
158
+ },
159
+ {
160
+ "dpo_loss": 1.2734622955322266,
161
+ "epoch": 0.7085498346717053,
162
+ "grad_norm": 126.97038898052826,
163
+ "learning_rate": 4.973122855144066e-06,
164
+ "logits": -1.1745208501815796,
165
+ "logps": -80.53473663330078,
166
+ "loss": 1.3152,
167
+ "objective": 1.2734622955322266,
168
+ "ranking_idealized": 0.5249999761581421,
169
+ "ranking_idealized_expo": 0.5162500143051147,
170
+ "ranking_simple": 0.5929166674613953,
171
+ "regularize": 1.2734622955322266,
172
+ "step": 250,
173
+ "wo_beta": 6.077574253082275
174
+ },
175
+ {
176
+ "epoch": 0.7085498346717053,
177
+ "eval_dpo_loss": 1.8319419622421265,
178
+ "eval_logits": -1.2564637660980225,
179
+ "eval_logps": -92.72406005859375,
180
+ "eval_loss": 1.9222145080566406,
181
+ "eval_objective": 1.8319419622421265,
182
+ "eval_ranking_idealized": 0.5295031070709229,
183
+ "eval_ranking_idealized_expo": 0.5212215185165405,
184
+ "eval_ranking_simple": 0.5310559272766113,
185
+ "eval_regularize": 1.8319419622421265,
186
+ "eval_runtime": 308.15,
187
+ "eval_samples_per_second": 18.79,
188
+ "eval_steps_per_second": 1.567,
189
+ "eval_wo_beta": 7.185611248016357,
190
+ "step": 250
191
+ },
192
+ {
193
+ "dpo_loss": 1.1058255434036255,
194
+ "epoch": 0.8502598016060463,
195
+ "grad_norm": 93.62858320092595,
196
+ "learning_rate": 4.924776641419513e-06,
197
+ "logits": -1.0275437831878662,
198
+ "logps": -86.21664428710938,
199
+ "loss": 1.1899,
200
+ "objective": 1.1058255434036255,
201
+ "ranking_idealized": 0.5062500238418579,
202
+ "ranking_idealized_expo": 0.4950000047683716,
203
+ "ranking_simple": 0.5833333134651184,
204
+ "regularize": 1.1058255434036255,
205
+ "step": 300,
206
+ "wo_beta": 5.932236194610596
207
+ },
208
+ {
209
+ "epoch": 0.8502598016060463,
210
+ "eval_dpo_loss": 1.958790898323059,
211
+ "eval_logits": -0.9785082340240479,
212
+ "eval_logps": -90.93729400634766,
213
+ "eval_loss": 2.0297622680664062,
214
+ "eval_objective": 1.958790898323059,
215
+ "eval_ranking_idealized": 0.5295031070709229,
216
+ "eval_ranking_idealized_expo": 0.5212215185165405,
217
+ "eval_ranking_simple": 0.5367494821548462,
218
+ "eval_regularize": 1.958790898323059,
219
+ "eval_runtime": 320.477,
220
+ "eval_samples_per_second": 18.067,
221
+ "eval_steps_per_second": 1.507,
222
+ "eval_wo_beta": 6.933594703674316,
223
+ "step": 300
224
+ },
225
+ {
226
+ "dpo_loss": 1.104241967201233,
227
+ "epoch": 0.9919697685403873,
228
+ "grad_norm": 81.69582378962005,
229
+ "learning_rate": 4.8526047530778175e-06,
230
+ "logits": -0.8600361943244934,
231
+ "logps": -83.0993423461914,
232
+ "loss": 1.1443,
233
+ "objective": 1.104241967201233,
234
+ "ranking_idealized": 0.5354166626930237,
235
+ "ranking_idealized_expo": 0.5254166722297668,
236
+ "ranking_simple": 0.6141666769981384,
237
+ "regularize": 1.104241967201233,
238
+ "step": 350,
239
+ "wo_beta": 5.234530925750732
240
+ },
241
+ {
242
+ "epoch": 0.9919697685403873,
243
+ "eval_dpo_loss": 2.0541160106658936,
244
+ "eval_logits": -1.0213720798492432,
245
+ "eval_logps": -82.14143371582031,
246
+ "eval_loss": 2.165400743484497,
247
+ "eval_objective": 2.0541160106658936,
248
+ "eval_ranking_idealized": 0.5295031070709229,
249
+ "eval_ranking_idealized_expo": 0.5212215185165405,
250
+ "eval_ranking_simple": 0.54347825050354,
251
+ "eval_regularize": 2.0541160106658936,
252
+ "eval_runtime": 352.7203,
253
+ "eval_samples_per_second": 16.415,
254
+ "eval_steps_per_second": 1.369,
255
+ "eval_wo_beta": 7.002377033233643,
256
+ "step": 350
257
+ },
258
+ {
259
+ "dpo_loss": 0.7012434005737305,
260
+ "epoch": 1.1336797354747283,
261
+ "grad_norm": 72.02321161366883,
262
+ "learning_rate": 4.757316345716554e-06,
263
+ "logits": -0.8228326439857483,
264
+ "logps": -77.82636260986328,
265
+ "loss": 0.725,
266
+ "objective": 0.7012434005737305,
267
+ "ranking_idealized": 0.5412499904632568,
268
+ "ranking_idealized_expo": 0.5320833325386047,
269
+ "ranking_simple": 0.6487500071525574,
270
+ "regularize": 0.7012434005737305,
271
+ "step": 400,
272
+ "wo_beta": 4.493627548217773
273
+ },
274
+ {
275
+ "epoch": 1.1336797354747283,
276
+ "eval_dpo_loss": 2.2360119819641113,
277
+ "eval_logits": -0.7534947991371155,
278
+ "eval_logps": -84.25261688232422,
279
+ "eval_loss": 2.288405179977417,
280
+ "eval_objective": 2.2360119819641113,
281
+ "eval_ranking_idealized": 0.5295031070709229,
282
+ "eval_ranking_idealized_expo": 0.5212215185165405,
283
+ "eval_ranking_simple": 0.533643901348114,
284
+ "eval_regularize": 2.2360119819641113,
285
+ "eval_runtime": 343.9192,
286
+ "eval_samples_per_second": 16.835,
287
+ "eval_steps_per_second": 1.404,
288
+ "eval_wo_beta": 7.152450084686279,
289
+ "step": 400
290
+ },
291
+ {
292
+ "dpo_loss": 0.787284255027771,
293
+ "epoch": 1.2753897024090695,
294
+ "grad_norm": 67.24645157140542,
295
+ "learning_rate": 4.639847716126855e-06,
296
+ "logits": -0.7304993867874146,
297
+ "logps": -79.94586181640625,
298
+ "loss": 0.7629,
299
+ "objective": 0.787284255027771,
300
+ "ranking_idealized": 0.5245833396911621,
301
+ "ranking_idealized_expo": 0.5191666483879089,
302
+ "ranking_simple": 0.6483333110809326,
303
+ "regularize": 0.787284255027771,
304
+ "step": 450,
305
+ "wo_beta": 5.019043922424316
306
+ },
307
+ {
308
+ "epoch": 1.2753897024090695,
309
+ "eval_dpo_loss": 2.0671327114105225,
310
+ "eval_logits": -0.8865557909011841,
311
+ "eval_logps": -80.41649627685547,
312
+ "eval_loss": 2.160627841949463,
313
+ "eval_objective": 2.0671327114105225,
314
+ "eval_ranking_idealized": 0.5295031070709229,
315
+ "eval_ranking_idealized_expo": 0.5212215185165405,
316
+ "eval_ranking_simple": 0.5320910811424255,
317
+ "eval_regularize": 2.0671327114105225,
318
+ "eval_runtime": 361.4999,
319
+ "eval_samples_per_second": 16.017,
320
+ "eval_steps_per_second": 1.336,
321
+ "eval_wo_beta": 6.794886589050293,
322
+ "step": 450
323
+ },
324
+ {
325
+ "dpo_loss": 0.7948352098464966,
326
+ "epoch": 1.4170996693434104,
327
+ "grad_norm": 80.7228445203749,
328
+ "learning_rate": 4.501353102310901e-06,
329
+ "logits": -0.7204355001449585,
330
+ "logps": -76.1670150756836,
331
+ "loss": 0.8044,
332
+ "objective": 0.7948352098464966,
333
+ "ranking_idealized": 0.5054166913032532,
334
+ "ranking_idealized_expo": 0.4970833361148834,
335
+ "ranking_simple": 0.6416666507720947,
336
+ "regularize": 0.7948352098464966,
337
+ "step": 500,
338
+ "wo_beta": 4.868573188781738
339
+ },
340
+ {
341
+ "epoch": 1.4170996693434104,
342
+ "eval_dpo_loss": 2.0981180667877197,
343
+ "eval_logits": -0.7502567172050476,
344
+ "eval_logps": -82.39266967773438,
345
+ "eval_loss": 2.2094054222106934,
346
+ "eval_objective": 2.0981180667877197,
347
+ "eval_ranking_idealized": 0.5295031070709229,
348
+ "eval_ranking_idealized_expo": 0.5212215185165405,
349
+ "eval_ranking_simple": 0.534679114818573,
350
+ "eval_regularize": 2.0981180667877197,
351
+ "eval_runtime": 308.1869,
352
+ "eval_samples_per_second": 18.787,
353
+ "eval_steps_per_second": 1.567,
354
+ "eval_wo_beta": 6.8049798011779785,
355
+ "step": 500
356
+ },
357
+ {
358
+ "dpo_loss": 0.7342749834060669,
359
+ "epoch": 1.5588096362777515,
360
+ "grad_norm": 72.59655396244632,
361
+ "learning_rate": 4.34319334202531e-06,
362
+ "logits": -0.6162157654762268,
363
+ "logps": -79.39734649658203,
364
+ "loss": 0.7105,
365
+ "objective": 0.7342749834060669,
366
+ "ranking_idealized": 0.5199999809265137,
367
+ "ranking_idealized_expo": 0.5108333230018616,
368
+ "ranking_simple": 0.6587499976158142,
369
+ "regularize": 0.7342749834060669,
370
+ "step": 550,
371
+ "wo_beta": 4.5973711013793945
372
+ },
373
+ {
374
+ "epoch": 1.5588096362777515,
375
+ "eval_dpo_loss": 2.073348045349121,
376
+ "eval_logits": -0.6734257340431213,
377
+ "eval_logps": -84.9780044555664,
378
+ "eval_loss": 2.16965651512146,
379
+ "eval_objective": 2.073348045349121,
380
+ "eval_ranking_idealized": 0.5295031070709229,
381
+ "eval_ranking_idealized_expo": 0.5212215185165405,
382
+ "eval_ranking_simple": 0.5320910811424255,
383
+ "eval_regularize": 2.073348045349121,
384
+ "eval_runtime": 374.3868,
385
+ "eval_samples_per_second": 15.465,
386
+ "eval_steps_per_second": 1.29,
387
+ "eval_wo_beta": 6.8721699714660645,
388
+ "step": 550
389
+ },
390
+ {
391
+ "dpo_loss": 0.6709804534912109,
392
+ "epoch": 1.7005196032120926,
393
+ "grad_norm": 58.045592434636404,
394
+ "learning_rate": 4.16692250129073e-06,
395
+ "logits": -0.5785077810287476,
396
+ "logps": -79.41590118408203,
397
+ "loss": 0.6925,
398
+ "objective": 0.6709804534912109,
399
+ "ranking_idealized": 0.5220833420753479,
400
+ "ranking_idealized_expo": 0.5149999856948853,
401
+ "ranking_simple": 0.6520833373069763,
402
+ "regularize": 0.6709804534912109,
403
+ "step": 600,
404
+ "wo_beta": 4.63525390625
405
+ },
406
+ {
407
+ "epoch": 1.7005196032120926,
408
+ "eval_dpo_loss": 2.055753707885742,
409
+ "eval_logits": -0.7411422729492188,
410
+ "eval_logps": -81.53422546386719,
411
+ "eval_loss": 2.195674419403076,
412
+ "eval_objective": 2.055753707885742,
413
+ "eval_ranking_idealized": 0.5295031070709229,
414
+ "eval_ranking_idealized_expo": 0.5212215185165405,
415
+ "eval_ranking_simple": 0.5357142686843872,
416
+ "eval_regularize": 2.055753707885742,
417
+ "eval_runtime": 308.1508,
418
+ "eval_samples_per_second": 18.79,
419
+ "eval_steps_per_second": 1.567,
420
+ "eval_wo_beta": 6.718571662902832,
421
+ "step": 600
422
+ },
423
+ {
424
+ "dpo_loss": 0.6708642244338989,
425
+ "epoch": 1.8422295701464337,
426
+ "grad_norm": 71.07668633244941,
427
+ "learning_rate": 3.974272604254906e-06,
428
+ "logits": -0.6567726135253906,
429
+ "logps": -78.93568420410156,
430
+ "loss": 0.6883,
431
+ "objective": 0.6708642244338989,
432
+ "ranking_idealized": 0.5408333539962769,
433
+ "ranking_idealized_expo": 0.527916669845581,
434
+ "ranking_simple": 0.6583333611488342,
435
+ "regularize": 0.6708642244338989,
436
+ "step": 650,
437
+ "wo_beta": 4.770144939422607
438
+ },
439
+ {
440
+ "epoch": 1.8422295701464337,
441
+ "eval_dpo_loss": 2.1330411434173584,
442
+ "eval_logits": -0.6908087134361267,
443
+ "eval_logps": -82.73030853271484,
444
+ "eval_loss": 2.208021640777588,
445
+ "eval_objective": 2.1330411434173584,
446
+ "eval_ranking_idealized": 0.5295031070709229,
447
+ "eval_ranking_idealized_expo": 0.5212215185165405,
448
+ "eval_ranking_simple": 0.5383023023605347,
449
+ "eval_regularize": 2.1330411434173584,
450
+ "eval_runtime": 351.6869,
451
+ "eval_samples_per_second": 16.464,
452
+ "eval_steps_per_second": 1.373,
453
+ "eval_wo_beta": 6.808131694793701,
454
+ "step": 650
455
+ },
456
+ {
457
+ "dpo_loss": 0.6912000179290771,
458
+ "epoch": 1.9839395370807746,
459
+ "grad_norm": 70.15640375480324,
460
+ "learning_rate": 3.767136614452458e-06,
461
+ "logits": -0.5081126093864441,
462
+ "logps": -78.9532699584961,
463
+ "loss": 0.6486,
464
+ "objective": 0.6912000179290771,
465
+ "ranking_idealized": 0.5170833468437195,
466
+ "ranking_idealized_expo": 0.5112500190734863,
467
+ "ranking_simple": 0.6445833444595337,
468
+ "regularize": 0.6912000179290771,
469
+ "step": 700,
470
+ "wo_beta": 4.81070613861084
471
+ },
472
+ {
473
+ "epoch": 1.9839395370807746,
474
+ "eval_dpo_loss": 2.2471413612365723,
475
+ "eval_logits": -0.6650525331497192,
476
+ "eval_logps": -83.28822326660156,
477
+ "eval_loss": 2.3242855072021484,
478
+ "eval_objective": 2.2471413612365723,
479
+ "eval_ranking_idealized": 0.5295031070709229,
480
+ "eval_ranking_idealized_expo": 0.5212215185165405,
481
+ "eval_ranking_simple": 0.5377846956253052,
482
+ "eval_regularize": 2.2471413612365723,
483
+ "eval_runtime": 308.3677,
484
+ "eval_samples_per_second": 18.776,
485
+ "eval_steps_per_second": 1.566,
486
+ "eval_wo_beta": 6.681451797485352,
487
+ "step": 700
488
+ },
489
+ {
490
+ "dpo_loss": 0.3827283978462219,
491
+ "epoch": 2.1256495040151155,
492
+ "grad_norm": 45.41487893439101,
493
+ "learning_rate": 3.547549834686222e-06,
494
+ "logits": -0.5765664577484131,
495
+ "logps": -79.59234619140625,
496
+ "loss": 0.3793,
497
+ "objective": 0.3827283978462219,
498
+ "ranking_idealized": 0.5216666460037231,
499
+ "ranking_idealized_expo": 0.5112500190734863,
500
+ "ranking_simple": 0.6779166460037231,
501
+ "regularize": 0.3827283978462219,
502
+ "step": 750,
503
+ "wo_beta": 4.318643569946289
504
+ },
505
+ {
506
+ "epoch": 2.1256495040151155,
507
+ "eval_dpo_loss": 2.182473659515381,
508
+ "eval_logits": -0.7878842353820801,
509
+ "eval_logps": -84.22957611083984,
510
+ "eval_loss": 2.2674732208251953,
511
+ "eval_objective": 2.182473659515381,
512
+ "eval_ranking_idealized": 0.5295031070709229,
513
+ "eval_ranking_idealized_expo": 0.5212215185165405,
514
+ "eval_ranking_simple": 0.5408902764320374,
515
+ "eval_regularize": 2.182473659515381,
516
+ "eval_runtime": 325.9016,
517
+ "eval_samples_per_second": 17.766,
518
+ "eval_steps_per_second": 1.482,
519
+ "eval_wo_beta": 6.879360675811768,
520
+ "step": 750
521
+ },
522
+ {
523
+ "dpo_loss": 0.3352104723453522,
524
+ "epoch": 2.2673594709494567,
525
+ "grad_norm": 40.69824942472133,
526
+ "learning_rate": 3.3176699082935546e-06,
527
+ "logits": -0.6111759543418884,
528
+ "logps": -80.46817779541016,
529
+ "loss": 0.3314,
530
+ "objective": 0.3352104723453522,
531
+ "ranking_idealized": 0.5195833444595337,
532
+ "ranking_idealized_expo": 0.512499988079071,
533
+ "ranking_simple": 0.6787499785423279,
534
+ "regularize": 0.3352104723453522,
535
+ "step": 800,
536
+ "wo_beta": 3.9888339042663574
537
+ },
538
+ {
539
+ "epoch": 2.2673594709494567,
540
+ "eval_dpo_loss": 2.104069471359253,
541
+ "eval_logits": -0.665122926235199,
542
+ "eval_logps": -84.3675308227539,
543
+ "eval_loss": 2.2106120586395264,
544
+ "eval_objective": 2.104069471359253,
545
+ "eval_ranking_idealized": 0.5295031070709229,
546
+ "eval_ranking_idealized_expo": 0.5212215185165405,
547
+ "eval_ranking_simple": 0.5414078831672668,
548
+ "eval_regularize": 2.104069471359253,
549
+ "eval_runtime": 341.6693,
550
+ "eval_samples_per_second": 16.946,
551
+ "eval_steps_per_second": 1.414,
552
+ "eval_wo_beta": 6.746298313140869,
553
+ "step": 800
554
+ },
555
+ {
556
+ "dpo_loss": 0.3527311086654663,
557
+ "epoch": 2.409069437883798,
558
+ "grad_norm": 34.29885501684047,
559
+ "learning_rate": 3.0797556183036582e-06,
560
+ "logits": -0.582695722579956,
561
+ "logps": -80.7869873046875,
562
+ "loss": 0.3301,
563
+ "objective": 0.3527311086654663,
564
+ "ranking_idealized": 0.5254166722297668,
565
+ "ranking_idealized_expo": 0.5141666531562805,
566
+ "ranking_simple": 0.6770833134651184,
567
+ "regularize": 0.3527311086654663,
568
+ "step": 850,
569
+ "wo_beta": 4.281794548034668
570
+ },
571
+ {
572
+ "epoch": 2.409069437883798,
573
+ "eval_dpo_loss": 2.222111940383911,
574
+ "eval_logits": -0.6176895499229431,
575
+ "eval_logps": -84.89134979248047,
576
+ "eval_loss": 2.296431303024292,
577
+ "eval_objective": 2.222111940383911,
578
+ "eval_ranking_idealized": 0.5295031070709229,
579
+ "eval_ranking_idealized_expo": 0.5212215185165405,
580
+ "eval_ranking_simple": 0.5388198494911194,
581
+ "eval_regularize": 2.222111940383911,
582
+ "eval_runtime": 308.4106,
583
+ "eval_samples_per_second": 18.774,
584
+ "eval_steps_per_second": 1.566,
585
+ "eval_wo_beta": 6.801980495452881,
586
+ "step": 850
587
+ },
588
+ {
589
+ "dpo_loss": 0.3560827672481537,
590
+ "epoch": 2.550779404818139,
591
+ "grad_norm": 59.77914010101811,
592
+ "learning_rate": 2.8361446928038298e-06,
593
+ "logits": -0.5723668336868286,
594
+ "logps": -79.74351501464844,
595
+ "loss": 0.3509,
596
+ "objective": 0.3560827672481537,
597
+ "ranking_idealized": 0.5249999761581421,
598
+ "ranking_idealized_expo": 0.5183333158493042,
599
+ "ranking_simple": 0.675000011920929,
600
+ "regularize": 0.3560827672481537,
601
+ "step": 900,
602
+ "wo_beta": 4.2809739112854
603
+ },
604
+ {
605
+ "epoch": 2.550779404818139,
606
+ "eval_dpo_loss": 2.2098634243011475,
607
+ "eval_logits": -0.6096944808959961,
608
+ "eval_logps": -84.38330841064453,
609
+ "eval_loss": 2.2795627117156982,
610
+ "eval_objective": 2.2098634243011475,
611
+ "eval_ranking_idealized": 0.5295031070709229,
612
+ "eval_ranking_idealized_expo": 0.5212215185165405,
613
+ "eval_ranking_simple": 0.5393374562263489,
614
+ "eval_regularize": 2.2098634243011475,
615
+ "eval_runtime": 342.057,
616
+ "eval_samples_per_second": 16.927,
617
+ "eval_steps_per_second": 1.412,
618
+ "eval_wo_beta": 6.793368339538574,
619
+ "step": 900
620
+ },
621
+ {
622
+ "dpo_loss": 0.3693172037601471,
623
+ "epoch": 2.69248937175248,
624
+ "grad_norm": 43.046650555153526,
625
+ "learning_rate": 2.5892308345974517e-06,
626
+ "logits": -0.5750654339790344,
627
+ "logps": -79.79653930664062,
628
+ "loss": 0.321,
629
+ "objective": 0.3693172037601471,
630
+ "ranking_idealized": 0.5162500143051147,
631
+ "ranking_idealized_expo": 0.5058333277702332,
632
+ "ranking_simple": 0.6879166960716248,
633
+ "regularize": 0.3693172037601471,
634
+ "step": 950,
635
+ "wo_beta": 4.201274871826172
636
+ },
637
+ {
638
+ "epoch": 2.69248937175248,
639
+ "eval_dpo_loss": 2.264862298965454,
640
+ "eval_logits": -0.7158052921295166,
641
+ "eval_logps": -83.29668426513672,
642
+ "eval_loss": 2.3403165340423584,
643
+ "eval_objective": 2.264862298965454,
644
+ "eval_ranking_idealized": 0.5295031070709229,
645
+ "eval_ranking_idealized_expo": 0.5212215185165405,
646
+ "eval_ranking_simple": 0.5331262946128845,
647
+ "eval_regularize": 2.264862298965454,
648
+ "eval_runtime": 315.8555,
649
+ "eval_samples_per_second": 18.331,
650
+ "eval_steps_per_second": 1.529,
651
+ "eval_wo_beta": 6.886437892913818,
652
+ "step": 950
653
+ },
654
+ {
655
+ "epoch": 2.69248937175248,
656
+ "step": 950,
657
+ "total_flos": 0.0,
658
+ "train_loss": 0.7490891541932758,
659
+ "train_runtime": 28216.8085,
660
+ "train_samples_per_second": 9.002,
661
+ "train_steps_per_second": 0.062
662
+ }
663
+ ],
664
+ "logging_steps": 50,
665
+ "max_steps": 1760,
666
+ "num_input_tokens_seen": 0,
667
+ "num_train_epochs": 5,
668
+ "save_steps": 50,
669
+ "stateful_callbacks": {
670
+ "EarlyStoppingCallback": {
671
+ "args": {
672
+ "early_stopping_patience": 5,
673
+ "early_stopping_threshold": 0.0
674
+ },
675
+ "attributes": {
676
+ "early_stopping_patience_counter": 0
677
+ }
678
+ },
679
+ "TrainerControl": {
680
+ "args": {
681
+ "should_epoch_stop": false,
682
+ "should_evaluate": false,
683
+ "should_log": false,
684
+ "should_save": true,
685
+ "should_training_stop": true
686
+ },
687
+ "attributes": {}
688
+ }
689
+ },
690
+ "total_flos": 0.0,
691
+ "train_batch_size": 4,
692
+ "trial_name": null,
693
+ "trial_params": null
694
+ }