hZzy commited on
Commit
6a901b2
1 Parent(s): 1c9a8ec

Model save

Browse files
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
4
+ tags:
5
+ - trl
6
+ - expo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: qwen2.5-0.5b-expo-DPO-ES2-0.1
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/vy4xlg1g)
17
+ # qwen2.5-0.5b-expo-DPO-ES2-0.1
18
+
19
+ This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6897
22
+ - Logps: -102.1435
23
+ - Logits: -1.9358
24
+ - Objective: 0.6916
25
+ - Dpo Loss: 0.6916
26
+ - Regularize: 0.6916
27
+ - Ranking Simple: 0.5419
28
+ - Ranking Idealized: 0.6030
29
+ - Ranking Idealized Expo: 0.5223
30
+ - Wo Beta: 8.3961
31
+
32
+ ## Model description
33
+
34
+ More information needed
35
+
36
+ ## Intended uses & limitations
37
+
38
+ More information needed
39
+
40
+ ## Training and evaluation data
41
+
42
+ More information needed
43
+
44
+ ## Training procedure
45
+
46
+ ### Training hyperparameters
47
+
48
+ The following hyperparameters were used during training:
49
+ - learning_rate: 1e-06
50
+ - train_batch_size: 4
51
+ - eval_batch_size: 4
52
+ - seed: 42
53
+ - distributed_type: multi-GPU
54
+ - num_devices: 3
55
+ - gradient_accumulation_steps: 12
56
+ - total_train_batch_size: 144
57
+ - total_eval_batch_size: 12
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: cosine
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 5
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Logps | Logits | Objective | Dpo Loss | Regularize | Ranking Simple | Ranking Idealized | Ranking Idealized Expo | Wo Beta |
66
+ |:-------------:|:------:|:----:|:---------------:|:---------:|:-------:|:---------:|:--------:|:----------:|:--------------:|:-----------------:|:----------------------:|:-------:|
67
+ | 0.689 | 0.1417 | 50 | 0.6875 | -90.0815 | -1.4869 | 0.6892 | 0.6892 | 0.6892 | 0.5259 | 0.6030 | 0.5223 | 7.8857 |
68
+ | 0.6673 | 0.2834 | 100 | 0.6808 | -90.9674 | -1.6164 | 0.6836 | 0.6836 | 0.6836 | 0.5331 | 0.6030 | 0.5223 | 7.8643 |
69
+ | 0.6376 | 0.4251 | 150 | 0.6785 | -94.6386 | -1.6873 | 0.6833 | 0.6833 | 0.6833 | 0.5342 | 0.6030 | 0.5223 | 8.1745 |
70
+ | 0.5955 | 0.5668 | 200 | 0.6808 | -100.2786 | -1.8583 | 0.6818 | 0.6818 | 0.6818 | 0.5342 | 0.6030 | 0.5223 | 7.9037 |
71
+ | 0.5623 | 0.7085 | 250 | 0.6757 | -97.3034 | -1.9407 | 0.6757 | 0.6757 | 0.6757 | 0.5362 | 0.6030 | 0.5223 | 7.9161 |
72
+ | 0.5255 | 0.8503 | 300 | 0.7037 | -102.4820 | -2.0313 | 0.7119 | 0.7119 | 0.7119 | 0.5352 | 0.6030 | 0.5223 | 8.7956 |
73
+ | 0.4939 | 0.9920 | 350 | 0.6897 | -102.1435 | -1.9358 | 0.6916 | 0.6916 | 0.6916 | 0.5419 | 0.6030 | 0.5223 | 8.3961 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.42.0
79
+ - Pytorch 2.3.0+cu121
80
+ - Datasets 2.19.1
81
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9919697685403873,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5958910301753453,
5
+ "train_runtime": 9674.8125,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 26.255,
8
+ "train_steps_per_second": 0.182
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151644,
3
+ "eos_token_id": 151645,
4
+ "max_new_tokens": 2048,
5
+ "pad_token_id": 151645,
6
+ "transformers_version": "4.42.0"
7
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bb2b9d3c397a29b6bdd3a6278e23b513b8ecf28c9b29562dee47dc8290907f7
3
  size 1975192208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac600b94196565ee85509c7575016178d5c3e3197230633e6a5523519a22c68f
3
  size 1975192208
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9919697685403873,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.5958910301753453,
5
+ "train_runtime": 9674.8125,
6
+ "train_samples": 50802,
7
+ "train_samples_per_second": 26.255,
8
+ "train_steps_per_second": 0.182
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 7.86433744430542,
3
+ "best_model_checkpoint": "./qwen2.5-0.5b/qwen2.5-0.5b-expo-DPO-ES2-0.1/checkpoint-100",
4
+ "epoch": 0.9919697685403873,
5
+ "eval_steps": 50,
6
+ "global_step": 350,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "dpo_loss": 0.6931471824645996,
13
+ "epoch": 0.002834199338686821,
14
+ "grad_norm": 18.442528136913708,
15
+ "learning_rate": 5.681818181818181e-09,
16
+ "logits": -1.359458565711975,
17
+ "logps": -84.69721221923828,
18
+ "loss": 0.6931,
19
+ "objective": 0.6931471824645996,
20
+ "ranking_idealized": 0.6458333134651184,
21
+ "ranking_idealized_expo": 0.5833333134651184,
22
+ "ranking_simple": 0.5833333134651184,
23
+ "regularize": 0.6931471824645996,
24
+ "step": 1,
25
+ "wo_beta": 5.271125316619873
26
+ },
27
+ {
28
+ "dpo_loss": 0.6897825002670288,
29
+ "epoch": 0.14170996693434104,
30
+ "grad_norm": 17.687331232419115,
31
+ "learning_rate": 2.840909090909091e-07,
32
+ "logits": -1.467690110206604,
33
+ "logps": -83.78176879882812,
34
+ "loss": 0.689,
35
+ "objective": 0.6897825002670288,
36
+ "ranking_idealized": 0.608418345451355,
37
+ "ranking_idealized_expo": 0.5229591727256775,
38
+ "ranking_simple": 0.5259353518486023,
39
+ "regularize": 0.6897825002670288,
40
+ "step": 50,
41
+ "wo_beta": 7.163306713104248
42
+ },
43
+ {
44
+ "epoch": 0.14170996693434104,
45
+ "eval_dpo_loss": 0.6892488598823547,
46
+ "eval_logits": -1.486922264099121,
47
+ "eval_logps": -90.08146667480469,
48
+ "eval_loss": 0.6874589920043945,
49
+ "eval_objective": 0.6892488598823547,
50
+ "eval_ranking_idealized": 0.6030020713806152,
51
+ "eval_ranking_idealized_expo": 0.5222567319869995,
52
+ "eval_ranking_simple": 0.5258799195289612,
53
+ "eval_regularize": 0.6892488598823547,
54
+ "eval_runtime": 308.1362,
55
+ "eval_samples_per_second": 18.79,
56
+ "eval_steps_per_second": 1.567,
57
+ "eval_wo_beta": 7.8856682777404785,
58
+ "step": 50
59
+ },
60
+ {
61
+ "dpo_loss": 0.6650223731994629,
62
+ "epoch": 0.2834199338686821,
63
+ "grad_norm": 19.38239421391755,
64
+ "learning_rate": 5.681818181818182e-07,
65
+ "logits": -1.5395574569702148,
66
+ "logps": -83.69255828857422,
67
+ "loss": 0.6673,
68
+ "objective": 0.6650223731994629,
69
+ "ranking_idealized": 0.6016666889190674,
70
+ "ranking_idealized_expo": 0.5141666531562805,
71
+ "ranking_simple": 0.527916669845581,
72
+ "regularize": 0.6650223731994629,
73
+ "step": 100,
74
+ "wo_beta": 6.79261589050293
75
+ },
76
+ {
77
+ "epoch": 0.2834199338686821,
78
+ "eval_dpo_loss": 0.6835533380508423,
79
+ "eval_logits": -1.6164112091064453,
80
+ "eval_logps": -90.96741485595703,
81
+ "eval_loss": 0.6808017492294312,
82
+ "eval_objective": 0.6835533380508423,
83
+ "eval_ranking_idealized": 0.6030020713806152,
84
+ "eval_ranking_idealized_expo": 0.5222567319869995,
85
+ "eval_ranking_simple": 0.5331262946128845,
86
+ "eval_regularize": 0.6835533380508423,
87
+ "eval_runtime": 307.7224,
88
+ "eval_samples_per_second": 18.816,
89
+ "eval_steps_per_second": 1.57,
90
+ "eval_wo_beta": 7.86433744430542,
91
+ "step": 100
92
+ },
93
+ {
94
+ "dpo_loss": 0.6406400799751282,
95
+ "epoch": 0.42512990080302315,
96
+ "grad_norm": 18.95367759941799,
97
+ "learning_rate": 8.522727272727273e-07,
98
+ "logits": -1.6548144817352295,
99
+ "logps": -86.31576538085938,
100
+ "loss": 0.6376,
101
+ "objective": 0.6406400799751282,
102
+ "ranking_idealized": 0.6066666841506958,
103
+ "ranking_idealized_expo": 0.5287500023841858,
104
+ "ranking_simple": 0.5554166436195374,
105
+ "regularize": 0.6406400799751282,
106
+ "step": 150,
107
+ "wo_beta": 6.808902740478516
108
+ },
109
+ {
110
+ "epoch": 0.42512990080302315,
111
+ "eval_dpo_loss": 0.683335542678833,
112
+ "eval_logits": -1.6873152256011963,
113
+ "eval_logps": -94.63862609863281,
114
+ "eval_loss": 0.6785484552383423,
115
+ "eval_objective": 0.683335542678833,
116
+ "eval_ranking_idealized": 0.6030020713806152,
117
+ "eval_ranking_idealized_expo": 0.5222567319869995,
118
+ "eval_ranking_simple": 0.5341615080833435,
119
+ "eval_regularize": 0.683335542678833,
120
+ "eval_runtime": 308.25,
121
+ "eval_samples_per_second": 18.783,
122
+ "eval_steps_per_second": 1.567,
123
+ "eval_wo_beta": 8.17447566986084,
124
+ "step": 150
125
+ },
126
+ {
127
+ "dpo_loss": 0.595325231552124,
128
+ "epoch": 0.5668398677373642,
129
+ "grad_norm": 18.26661888886481,
130
+ "learning_rate": 9.99433669591504e-07,
131
+ "logits": -1.7552146911621094,
132
+ "logps": -90.12830352783203,
133
+ "loss": 0.5955,
134
+ "objective": 0.595325231552124,
135
+ "ranking_idealized": 0.5924999713897705,
136
+ "ranking_idealized_expo": 0.5166666507720947,
137
+ "ranking_simple": 0.5649999976158142,
138
+ "regularize": 0.595325231552124,
139
+ "step": 200,
140
+ "wo_beta": 6.764244556427002
141
+ },
142
+ {
143
+ "epoch": 0.5668398677373642,
144
+ "eval_dpo_loss": 0.6818291544914246,
145
+ "eval_logits": -1.8582650423049927,
146
+ "eval_logps": -100.27864837646484,
147
+ "eval_loss": 0.6808217167854309,
148
+ "eval_objective": 0.6818291544914246,
149
+ "eval_ranking_idealized": 0.6030020713806152,
150
+ "eval_ranking_idealized_expo": 0.5222567319869995,
151
+ "eval_ranking_simple": 0.5341615080833435,
152
+ "eval_regularize": 0.6818291544914246,
153
+ "eval_runtime": 307.6797,
154
+ "eval_samples_per_second": 18.818,
155
+ "eval_steps_per_second": 1.57,
156
+ "eval_wo_beta": 7.903720855712891,
157
+ "step": 200
158
+ },
159
+ {
160
+ "dpo_loss": 0.5614926218986511,
161
+ "epoch": 0.7085498346717053,
162
+ "grad_norm": 17.975386844775777,
163
+ "learning_rate": 9.94624571028813e-07,
164
+ "logits": -1.847279667854309,
165
+ "logps": -92.22396850585938,
166
+ "loss": 0.5623,
167
+ "objective": 0.5614926218986511,
168
+ "ranking_idealized": 0.5991666913032532,
169
+ "ranking_idealized_expo": 0.5170833468437195,
170
+ "ranking_simple": 0.5924999713897705,
171
+ "regularize": 0.5614926218986511,
172
+ "step": 250,
173
+ "wo_beta": 6.590356349945068
174
+ },
175
+ {
176
+ "epoch": 0.7085498346717053,
177
+ "eval_dpo_loss": 0.675693154335022,
178
+ "eval_logits": -1.9406613111495972,
179
+ "eval_logps": -97.30335998535156,
180
+ "eval_loss": 0.6757029294967651,
181
+ "eval_objective": 0.675693154335022,
182
+ "eval_ranking_idealized": 0.6030020713806152,
183
+ "eval_ranking_idealized_expo": 0.5222567319869995,
184
+ "eval_ranking_simple": 0.5362318754196167,
185
+ "eval_regularize": 0.675693154335022,
186
+ "eval_runtime": 307.504,
187
+ "eval_samples_per_second": 18.829,
188
+ "eval_steps_per_second": 1.571,
189
+ "eval_wo_beta": 7.916144847869873,
190
+ "step": 250
191
+ },
192
+ {
193
+ "dpo_loss": 0.5231651067733765,
194
+ "epoch": 0.8502598016060463,
195
+ "grad_norm": 21.98858332084632,
196
+ "learning_rate": 9.849553282839024e-07,
197
+ "logits": -1.9162583351135254,
198
+ "logps": -92.107177734375,
199
+ "loss": 0.5255,
200
+ "objective": 0.5231651067733765,
201
+ "ranking_idealized": 0.5799999833106995,
202
+ "ranking_idealized_expo": 0.4970833361148834,
203
+ "ranking_simple": 0.5849999785423279,
204
+ "regularize": 0.5231651067733765,
205
+ "step": 300,
206
+ "wo_beta": 6.37797212600708
207
+ },
208
+ {
209
+ "epoch": 0.8502598016060463,
210
+ "eval_dpo_loss": 0.7118534445762634,
211
+ "eval_logits": -2.031287431716919,
212
+ "eval_logps": -102.48200988769531,
213
+ "eval_loss": 0.7037488222122192,
214
+ "eval_objective": 0.7118534445762634,
215
+ "eval_ranking_idealized": 0.6030020713806152,
216
+ "eval_ranking_idealized_expo": 0.5222567319869995,
217
+ "eval_ranking_simple": 0.5351966619491577,
218
+ "eval_regularize": 0.7118534445762634,
219
+ "eval_runtime": 307.7923,
220
+ "eval_samples_per_second": 18.811,
221
+ "eval_steps_per_second": 1.569,
222
+ "eval_wo_beta": 8.7955904006958,
223
+ "step": 300
224
+ },
225
+ {
226
+ "dpo_loss": 0.4828701913356781,
227
+ "epoch": 0.9919697685403873,
228
+ "grad_norm": 21.148105751270688,
229
+ "learning_rate": 9.705209506155634e-07,
230
+ "logits": -1.863725185394287,
231
+ "logps": -97.61394500732422,
232
+ "loss": 0.4939,
233
+ "objective": 0.4828701913356781,
234
+ "ranking_idealized": 0.60916668176651,
235
+ "ranking_idealized_expo": 0.5270833373069763,
236
+ "ranking_simple": 0.6449999809265137,
237
+ "regularize": 0.4828701913356781,
238
+ "step": 350,
239
+ "wo_beta": 5.530836582183838
240
+ },
241
+ {
242
+ "epoch": 0.9919697685403873,
243
+ "eval_dpo_loss": 0.6916147470474243,
244
+ "eval_logits": -1.9357593059539795,
245
+ "eval_logps": -102.14349365234375,
246
+ "eval_loss": 0.6897423267364502,
247
+ "eval_objective": 0.6916147470474243,
248
+ "eval_ranking_idealized": 0.6030020713806152,
249
+ "eval_ranking_idealized_expo": 0.5222567319869995,
250
+ "eval_ranking_simple": 0.5419254899024963,
251
+ "eval_regularize": 0.6916147470474243,
252
+ "eval_runtime": 309.7171,
253
+ "eval_samples_per_second": 18.694,
254
+ "eval_steps_per_second": 1.559,
255
+ "eval_wo_beta": 8.39614486694336,
256
+ "step": 350
257
+ },
258
+ {
259
+ "epoch": 0.9919697685403873,
260
+ "step": 350,
261
+ "total_flos": 0.0,
262
+ "train_loss": 0.5958910301753453,
263
+ "train_runtime": 9674.8125,
264
+ "train_samples_per_second": 26.255,
265
+ "train_steps_per_second": 0.182
266
+ }
267
+ ],
268
+ "logging_steps": 50,
269
+ "max_steps": 1760,
270
+ "num_input_tokens_seen": 0,
271
+ "num_train_epochs": 5,
272
+ "save_steps": 50,
273
+ "stateful_callbacks": {
274
+ "EarlyStoppingCallback": {
275
+ "args": {
276
+ "early_stopping_patience": 5,
277
+ "early_stopping_threshold": 0.0
278
+ },
279
+ "attributes": {
280
+ "early_stopping_patience_counter": 0
281
+ }
282
+ },
283
+ "TrainerControl": {
284
+ "args": {
285
+ "should_epoch_stop": false,
286
+ "should_evaluate": false,
287
+ "should_log": false,
288
+ "should_save": true,
289
+ "should_training_stop": true
290
+ },
291
+ "attributes": {}
292
+ }
293
+ },
294
+ "total_flos": 0.0,
295
+ "train_batch_size": 4,
296
+ "trial_name": null,
297
+ "trial_params": null
298
+ }