End of training
Browse files- README.md +16 -9
- all_results.json +3 -3
- config.json +1 -1
- eval_results.json +4 -4
README.md
CHANGED
@@ -2,11 +2,18 @@
|
|
2 |
license: apache-2.0
|
3 |
base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
|
4 |
tags:
|
|
|
|
|
|
|
|
|
|
|
5 |
- trl
|
6 |
- expo
|
7 |
- alignment-handbook
|
8 |
- ndcg
|
9 |
- generated_from_trainer
|
|
|
|
|
10 |
model-index:
|
11 |
- name: qwen2.5-0.5b-expo-DPO-ES-TRY
|
12 |
results: []
|
@@ -18,18 +25,18 @@ should probably proofread and complete it, then remove this comment. -->
|
|
18 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/jz5qh3m8)
|
19 |
# qwen2.5-0.5b-expo-DPO-ES-TRY
|
20 |
|
21 |
-
This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on
|
22 |
It achieves the following results on the evaluation set:
|
23 |
-
- Loss: 0.
|
24 |
-
- Logps: -
|
25 |
-
- Logits: -1.
|
26 |
-
- Objective: 0.
|
27 |
-
- Dpo Loss: 0.
|
28 |
-
- Regularize: 0.
|
29 |
-
- Ranking Simple: 0.
|
30 |
- Ranking Idealized: 0.5888
|
31 |
- Ranking Idealized Expo: 0.5093
|
32 |
-
- Dpo Wo Beta: -
|
33 |
|
34 |
## Model description
|
35 |
|
|
|
2 |
license: apache-2.0
|
3 |
base_model: hZzy/qwen2.5-0.5b-sft-news-IFT
|
4 |
tags:
|
5 |
+
- alignment-handbook
|
6 |
+
- ndcg
|
7 |
+
- trl
|
8 |
+
- expo
|
9 |
+
- generated_from_trainer
|
10 |
- trl
|
11 |
- expo
|
12 |
- alignment-handbook
|
13 |
- ndcg
|
14 |
- generated_from_trainer
|
15 |
+
datasets:
|
16 |
+
- hZzy/train_pairwise
|
17 |
model-index:
|
18 |
- name: qwen2.5-0.5b-expo-DPO-ES-TRY
|
19 |
results: []
|
|
|
25 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/zhiyuzha-university-of-florida/huggingface/runs/jz5qh3m8)
|
26 |
# qwen2.5-0.5b-expo-DPO-ES-TRY
|
27 |
|
28 |
+
This model is a fine-tuned version of [hZzy/qwen2.5-0.5b-sft-news-IFT](https://huggingface.co/hZzy/qwen2.5-0.5b-sft-news-IFT) on the hZzy/train_pairwise dataset.
|
29 |
It achieves the following results on the evaluation set:
|
30 |
+
- Loss: 0.6866
|
31 |
+
- Logps: -91.4116
|
32 |
+
- Logits: -1.5339
|
33 |
+
- Objective: 0.6926
|
34 |
+
- Dpo Loss: 0.6926
|
35 |
+
- Regularize: 0.6926
|
36 |
+
- Ranking Simple: 0.5052
|
37 |
- Ranking Idealized: 0.5888
|
38 |
- Ranking Idealized Expo: 0.5093
|
39 |
+
- Dpo Wo Beta: -0.9551
|
40 |
|
41 |
## Model description
|
42 |
|
all_results.json
CHANGED
@@ -10,10 +10,10 @@
|
|
10 |
"eval_ranking_idealized_expo": 0.5092975497245789,
|
11 |
"eval_ranking_simple": 0.5051652789115906,
|
12 |
"eval_regularize": 0.6925506591796875,
|
13 |
-
"eval_runtime": 210.
|
14 |
"eval_samples": 5790,
|
15 |
-
"eval_samples_per_second": 27.
|
16 |
-
"eval_steps_per_second": 1.
|
17 |
"total_flos": 0.0,
|
18 |
"train_loss": 0.29064991334338247,
|
19 |
"train_runtime": 6418.0206,
|
|
|
10 |
"eval_ranking_idealized_expo": 0.5092975497245789,
|
11 |
"eval_ranking_simple": 0.5051652789115906,
|
12 |
"eval_regularize": 0.6925506591796875,
|
13 |
+
"eval_runtime": 210.7989,
|
14 |
"eval_samples": 5790,
|
15 |
+
"eval_samples_per_second": 27.467,
|
16 |
+
"eval_steps_per_second": 1.148,
|
17 |
"total_flos": 0.0,
|
18 |
"train_loss": 0.29064991334338247,
|
19 |
"train_runtime": 6418.0206,
|
config.json
CHANGED
@@ -23,7 +23,7 @@
|
|
23 |
"tie_word_embeddings": true,
|
24 |
"torch_dtype": "float32",
|
25 |
"transformers_version": "4.42.0",
|
26 |
-
"use_cache":
|
27 |
"use_mrope": false,
|
28 |
"use_sliding_window": false,
|
29 |
"vocab_size": 151665
|
|
|
23 |
"tie_word_embeddings": true,
|
24 |
"torch_dtype": "float32",
|
25 |
"transformers_version": "4.42.0",
|
26 |
+
"use_cache": true,
|
27 |
"use_mrope": false,
|
28 |
"use_sliding_window": false,
|
29 |
"vocab_size": 151665
|
eval_results.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
"eval_dpo_loss": 0.6925506591796875,
|
4 |
"eval_dpo_wo_beta": -0.9551481008529663,
|
5 |
"eval_logits": -1.5338705778121948,
|
@@ -10,8 +10,8 @@
|
|
10 |
"eval_ranking_idealized_expo": 0.5092975497245789,
|
11 |
"eval_ranking_simple": 0.5051652789115906,
|
12 |
"eval_regularize": 0.6925506591796875,
|
13 |
-
"eval_runtime": 210.
|
14 |
"eval_samples": 5790,
|
15 |
-
"eval_samples_per_second": 27.
|
16 |
-
"eval_steps_per_second": 1.
|
17 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 1.83656117146906,
|
3 |
"eval_dpo_loss": 0.6925506591796875,
|
4 |
"eval_dpo_wo_beta": -0.9551481008529663,
|
5 |
"eval_logits": -1.5338705778121948,
|
|
|
10 |
"eval_ranking_idealized_expo": 0.5092975497245789,
|
11 |
"eval_ranking_simple": 0.5051652789115906,
|
12 |
"eval_regularize": 0.6925506591796875,
|
13 |
+
"eval_runtime": 210.7989,
|
14 |
"eval_samples": 5790,
|
15 |
+
"eval_samples_per_second": 27.467,
|
16 |
+
"eval_steps_per_second": 1.148
|
17 |
}
|