Model save
Browse files- README.md +76 -0
- all_results.json +8 -0
- generation_config.json +7 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +261 -0
- runs/Feb29_22-08-46_ip-26-0-161-178/events.out.tfevents.1709244785.ip-26-0-161-178.1167714.0 +2 -2
- train_results.json +8 -0
- trainer_state.json +814 -0
README.md
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: other
|
3 |
+
base_model: lewtun/gemma-7b-sft-full-ultrachat-v0
|
4 |
+
tags:
|
5 |
+
- trl
|
6 |
+
- dpo
|
7 |
+
- generated_from_trainer
|
8 |
+
model-index:
|
9 |
+
- name: gemma-7b-dpo-full-ultrafeedback-beta-0.01
|
10 |
+
results: []
|
11 |
+
---
|
12 |
+
|
13 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
+
should probably proofread and complete it, then remove this comment. -->
|
15 |
+
|
16 |
+
# gemma-7b-dpo-full-ultrafeedback-beta-0.01
|
17 |
+
|
18 |
+
This model is a fine-tuned version of [lewtun/gemma-7b-sft-full-ultrachat-v0](https://huggingface.co/lewtun/gemma-7b-sft-full-ultrachat-v0) on the None dataset.
|
19 |
+
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 0.4718
|
21 |
+
- Rewards/chosen: -0.8508
|
22 |
+
- Rewards/rejected: -2.1538
|
23 |
+
- Rewards/accuracies: 0.7817
|
24 |
+
- Rewards/margins: 1.3030
|
25 |
+
- Logps/rejected: -1100.8470
|
26 |
+
- Logps/chosen: -990.8950
|
27 |
+
- Logits/rejected: 89.1600
|
28 |
+
- Logits/chosen: 104.0108
|
29 |
+
|
30 |
+
## Model description
|
31 |
+
|
32 |
+
More information needed
|
33 |
+
|
34 |
+
## Intended uses & limitations
|
35 |
+
|
36 |
+
More information needed
|
37 |
+
|
38 |
+
## Training and evaluation data
|
39 |
+
|
40 |
+
More information needed
|
41 |
+
|
42 |
+
## Training procedure
|
43 |
+
|
44 |
+
### Training hyperparameters
|
45 |
+
|
46 |
+
The following hyperparameters were used during training:
|
47 |
+
- learning_rate: 5e-07
|
48 |
+
- train_batch_size: 2
|
49 |
+
- eval_batch_size: 4
|
50 |
+
- seed: 42
|
51 |
+
- distributed_type: multi-GPU
|
52 |
+
- num_devices: 8
|
53 |
+
- gradient_accumulation_steps: 8
|
54 |
+
- total_train_batch_size: 128
|
55 |
+
- total_eval_batch_size: 32
|
56 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
57 |
+
- lr_scheduler_type: cosine
|
58 |
+
- lr_scheduler_warmup_ratio: 0.1
|
59 |
+
- num_epochs: 1
|
60 |
+
|
61 |
+
### Training results
|
62 |
+
|
63 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
64 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
65 |
+
| 0.552 | 0.21 | 100 | 0.5756 | -2.8657 | -3.5901 | 0.7460 | 0.7243 | -1244.4771 | -1192.3933 | 82.3244 | 96.5612 |
|
66 |
+
| 0.501 | 0.42 | 200 | 0.4914 | -1.6427 | -2.6660 | 0.7817 | 1.0233 | -1152.0745 | -1070.0895 | 91.1202 | 105.1467 |
|
67 |
+
| 0.4893 | 0.63 | 300 | 0.4810 | -1.6604 | -2.8398 | 0.7619 | 1.1794 | -1169.4480 | -1071.8550 | 87.4237 | 101.9799 |
|
68 |
+
| 0.4759 | 0.84 | 400 | 0.4718 | -0.8508 | -2.1538 | 0.7817 | 1.3030 | -1100.8470 | -990.8950 | 89.1600 | 104.0108 |
|
69 |
+
|
70 |
+
|
71 |
+
### Framework versions
|
72 |
+
|
73 |
+
- Transformers 4.39.0.dev0
|
74 |
+
- Pytorch 2.1.2+cu121
|
75 |
+
- Datasets 2.14.6
|
76 |
+
- Tokenizers 0.15.1
|
all_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 1.0,
|
3 |
+
"train_loss": 0.5152078939433867,
|
4 |
+
"train_runtime": 5321.4428,
|
5 |
+
"train_samples": 61135,
|
6 |
+
"train_samples_per_second": 11.488,
|
7 |
+
"train_steps_per_second": 0.09
|
8 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 2,
|
4 |
+
"eos_token_id": 1,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.39.0.dev0"
|
7 |
+
}
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34d37d3e20e6926aee530b2cab69fbb64f6a22df46d727a472fd44b710f424e8
|
3 |
+
size 4995496656
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:17111c7a14501043c274618bb8ec08e66ddf9447e102b75cb2e3ebf07dffad6f
|
3 |
+
size 4982953168
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d3bbbffa1f9a6cd81d6b5b7273d1d26d399317f65517cbf5c74bd5f1d7e99c1
|
3 |
+
size 4982953200
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91e5b1fb7457fbf470a4844162ff99e5397754c5576b938a46e11c613151cfbc
|
3 |
+
size 2113988336
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 17075361792
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
7 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
8 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
26 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
27 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
28 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
29 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
30 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
31 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
32 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
71 |
+
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
72 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
73 |
+
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
74 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
75 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
80 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
81 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
82 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
83 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
84 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
85 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
86 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
87 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
88 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
89 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
90 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
91 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
92 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
93 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
94 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
95 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
96 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
97 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
98 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
99 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
100 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
101 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
102 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
103 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
104 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
105 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
106 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
107 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
108 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
109 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
110 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
111 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
112 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
113 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
114 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
115 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
116 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
117 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
118 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
119 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
120 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
121 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
122 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
123 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
124 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
125 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
126 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
127 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
128 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
131 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
132 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
134 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
135 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
136 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
137 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
138 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
139 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
140 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
153 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
154 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
155 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
156 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
157 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
158 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
159 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
160 |
+
"model.layers.24.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
161 |
+
"model.layers.24.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
162 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
163 |
+
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
164 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
165 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
170 |
+
"model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
171 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
172 |
+
"model.layers.25.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
173 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
174 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
175 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
176 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
177 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
178 |
+
"model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
179 |
+
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
180 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
181 |
+
"model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
182 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
183 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
184 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
185 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
186 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
187 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
188 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
189 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
190 |
+
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
191 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
192 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
193 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
194 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
195 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
196 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
197 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
198 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
199 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
200 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
201 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
202 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
203 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
204 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
205 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
206 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
207 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
208 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
209 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
210 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
211 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
212 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
213 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
214 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
215 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
216 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
217 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
218 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
219 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
220 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
221 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
222 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
223 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
224 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
225 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
226 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
227 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
228 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
229 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
230 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
231 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
232 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
233 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
234 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
235 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
236 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
237 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
238 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
239 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
240 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
241 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
242 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
243 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
244 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
245 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
246 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
247 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
248 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
249 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
250 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
251 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
252 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
253 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
254 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
255 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
256 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
257 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
258 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
259 |
+
"model.norm.weight": "model-00004-of-00004.safetensors"
|
260 |
+
}
|
261 |
+
}
|
runs/Feb29_22-08-46_ip-26-0-161-178/events.out.tfevents.1709244785.ip-26-0-161-178.1167714.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5278b627d72d88fd1f2e5d65580a25bb7b3aa9b7fb3793b1bfc38c30b7afa80
|
3 |
+
size 41062
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 1.0,
|
3 |
+
"train_loss": 0.5152078939433867,
|
4 |
+
"train_runtime": 5321.4428,
|
5 |
+
"train_samples": 61135,
|
6 |
+
"train_samples_per_second": 11.488,
|
7 |
+
"train_steps_per_second": 0.09
|
8 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,814 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.998691442030882,
|
5 |
+
"eval_steps": 100,
|
6 |
+
"global_step": 477,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.0,
|
13 |
+
"grad_norm": 19.889135020542987,
|
14 |
+
"learning_rate": 1.0416666666666666e-08,
|
15 |
+
"logits/chosen": 173.92840576171875,
|
16 |
+
"logits/rejected": 133.92694091796875,
|
17 |
+
"logps/chosen": -944.73779296875,
|
18 |
+
"logps/rejected": -877.1348876953125,
|
19 |
+
"loss": 0.6931,
|
20 |
+
"rewards/accuracies": 0.0,
|
21 |
+
"rewards/chosen": 0.0,
|
22 |
+
"rewards/margins": 0.0,
|
23 |
+
"rewards/rejected": 0.0,
|
24 |
+
"step": 1
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"epoch": 0.02,
|
28 |
+
"grad_norm": 18.648812633756638,
|
29 |
+
"learning_rate": 1.0416666666666667e-07,
|
30 |
+
"logits/chosen": 158.15008544921875,
|
31 |
+
"logits/rejected": 151.00912475585938,
|
32 |
+
"logps/chosen": -866.2621459960938,
|
33 |
+
"logps/rejected": -858.798583984375,
|
34 |
+
"loss": 0.6925,
|
35 |
+
"rewards/accuracies": 0.4444444477558136,
|
36 |
+
"rewards/chosen": 0.0011810450814664364,
|
37 |
+
"rewards/margins": -9.146943193627521e-05,
|
38 |
+
"rewards/rejected": 0.0012725150445476174,
|
39 |
+
"step": 10
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 0.04,
|
43 |
+
"grad_norm": 132.24935780648104,
|
44 |
+
"learning_rate": 2.0833333333333333e-07,
|
45 |
+
"logits/chosen": 162.58755493164062,
|
46 |
+
"logits/rejected": 141.87896728515625,
|
47 |
+
"logps/chosen": -871.9114990234375,
|
48 |
+
"logps/rejected": -856.8277587890625,
|
49 |
+
"loss": 0.69,
|
50 |
+
"rewards/accuracies": 0.5249999761581421,
|
51 |
+
"rewards/chosen": 0.02589397504925728,
|
52 |
+
"rewards/margins": 0.012839567847549915,
|
53 |
+
"rewards/rejected": 0.013054406270384789,
|
54 |
+
"step": 20
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 0.06,
|
58 |
+
"grad_norm": 29.299152561983977,
|
59 |
+
"learning_rate": 3.1249999999999997e-07,
|
60 |
+
"logits/chosen": 148.5764617919922,
|
61 |
+
"logits/rejected": 148.60226440429688,
|
62 |
+
"logps/chosen": -834.642578125,
|
63 |
+
"logps/rejected": -821.4388427734375,
|
64 |
+
"loss": 0.6792,
|
65 |
+
"rewards/accuracies": 0.5249999761581421,
|
66 |
+
"rewards/chosen": 0.09783056378364563,
|
67 |
+
"rewards/margins": 0.019630378112196922,
|
68 |
+
"rewards/rejected": 0.07820017635822296,
|
69 |
+
"step": 30
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"epoch": 0.08,
|
73 |
+
"grad_norm": 20.662529763504946,
|
74 |
+
"learning_rate": 4.1666666666666667e-07,
|
75 |
+
"logits/chosen": 149.3659210205078,
|
76 |
+
"logits/rejected": 131.38018798828125,
|
77 |
+
"logps/chosen": -907.97509765625,
|
78 |
+
"logps/rejected": -912.3134765625,
|
79 |
+
"loss": 0.6489,
|
80 |
+
"rewards/accuracies": 0.606249988079071,
|
81 |
+
"rewards/chosen": 0.009358148090541363,
|
82 |
+
"rewards/margins": 0.11060555279254913,
|
83 |
+
"rewards/rejected": -0.1012474074959755,
|
84 |
+
"step": 40
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"epoch": 0.1,
|
88 |
+
"grad_norm": 37.45560682117618,
|
89 |
+
"learning_rate": 4.999731868769026e-07,
|
90 |
+
"logits/chosen": 141.44911193847656,
|
91 |
+
"logits/rejected": 129.70591735839844,
|
92 |
+
"logps/chosen": -925.8624877929688,
|
93 |
+
"logps/rejected": -942.8903198242188,
|
94 |
+
"loss": 0.6263,
|
95 |
+
"rewards/accuracies": 0.7562500238418579,
|
96 |
+
"rewards/chosen": -0.42348790168762207,
|
97 |
+
"rewards/margins": 0.23968985676765442,
|
98 |
+
"rewards/rejected": -0.6631777286529541,
|
99 |
+
"step": 50
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 0.13,
|
103 |
+
"grad_norm": 35.27997957962436,
|
104 |
+
"learning_rate": 4.990353313429303e-07,
|
105 |
+
"logits/chosen": 104.57405090332031,
|
106 |
+
"logits/rejected": 102.98997497558594,
|
107 |
+
"logps/chosen": -994.7630615234375,
|
108 |
+
"logps/rejected": -1038.8465576171875,
|
109 |
+
"loss": 0.6063,
|
110 |
+
"rewards/accuracies": 0.6312500238418579,
|
111 |
+
"rewards/chosen": -1.1830905675888062,
|
112 |
+
"rewards/margins": 0.3269960284233093,
|
113 |
+
"rewards/rejected": -1.5100867748260498,
|
114 |
+
"step": 60
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.15,
|
118 |
+
"grad_norm": 49.67365299996332,
|
119 |
+
"learning_rate": 4.967625656594781e-07,
|
120 |
+
"logits/chosen": 119.69749450683594,
|
121 |
+
"logits/rejected": 109.80323791503906,
|
122 |
+
"logps/chosen": -1019.2648315429688,
|
123 |
+
"logps/rejected": -1040.623779296875,
|
124 |
+
"loss": 0.5565,
|
125 |
+
"rewards/accuracies": 0.6875,
|
126 |
+
"rewards/chosen": -1.3080466985702515,
|
127 |
+
"rewards/margins": 0.5215330123901367,
|
128 |
+
"rewards/rejected": -1.8295797109603882,
|
129 |
+
"step": 70
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"epoch": 0.17,
|
133 |
+
"grad_norm": 37.66437513199864,
|
134 |
+
"learning_rate": 4.93167072587771e-07,
|
135 |
+
"logits/chosen": 120.96675109863281,
|
136 |
+
"logits/rejected": 99.65242767333984,
|
137 |
+
"logps/chosen": -1079.8636474609375,
|
138 |
+
"logps/rejected": -1099.0491943359375,
|
139 |
+
"loss": 0.5567,
|
140 |
+
"rewards/accuracies": 0.7749999761581421,
|
141 |
+
"rewards/chosen": -1.4773412942886353,
|
142 |
+
"rewards/margins": 0.6472301483154297,
|
143 |
+
"rewards/rejected": -2.1245713233947754,
|
144 |
+
"step": 80
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"epoch": 0.19,
|
148 |
+
"grad_norm": 40.6528309450039,
|
149 |
+
"learning_rate": 4.882681251368548e-07,
|
150 |
+
"logits/chosen": 102.38163757324219,
|
151 |
+
"logits/rejected": 100.23868560791016,
|
152 |
+
"logps/chosen": -1027.5,
|
153 |
+
"logps/rejected": -1059.419921875,
|
154 |
+
"loss": 0.5699,
|
155 |
+
"rewards/accuracies": 0.699999988079071,
|
156 |
+
"rewards/chosen": -1.292600393295288,
|
157 |
+
"rewards/margins": 0.5152118802070618,
|
158 |
+
"rewards/rejected": -1.8078124523162842,
|
159 |
+
"step": 90
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"epoch": 0.21,
|
163 |
+
"grad_norm": 38.60858209436158,
|
164 |
+
"learning_rate": 4.820919832540181e-07,
|
165 |
+
"logits/chosen": 118.79133605957031,
|
166 |
+
"logits/rejected": 104.7783432006836,
|
167 |
+
"logps/chosen": -1127.0018310546875,
|
168 |
+
"logps/rejected": -1152.863037109375,
|
169 |
+
"loss": 0.552,
|
170 |
+
"rewards/accuracies": 0.6499999761581421,
|
171 |
+
"rewards/chosen": -2.3822240829467773,
|
172 |
+
"rewards/margins": 0.6033691167831421,
|
173 |
+
"rewards/rejected": -2.985593318939209,
|
174 |
+
"step": 100
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"epoch": 0.21,
|
178 |
+
"eval_logits/chosen": 96.5611801147461,
|
179 |
+
"eval_logits/rejected": 82.32440185546875,
|
180 |
+
"eval_logps/chosen": -1192.393310546875,
|
181 |
+
"eval_logps/rejected": -1244.47705078125,
|
182 |
+
"eval_loss": 0.5756222009658813,
|
183 |
+
"eval_rewards/accuracies": 0.7460317611694336,
|
184 |
+
"eval_rewards/chosen": -2.8657495975494385,
|
185 |
+
"eval_rewards/margins": 0.7243059873580933,
|
186 |
+
"eval_rewards/rejected": -3.590055465698242,
|
187 |
+
"eval_runtime": 98.8707,
|
188 |
+
"eval_samples_per_second": 20.228,
|
189 |
+
"eval_steps_per_second": 0.637,
|
190 |
+
"step": 100
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"epoch": 0.23,
|
194 |
+
"grad_norm": 34.77575362187783,
|
195 |
+
"learning_rate": 4.7467175306295647e-07,
|
196 |
+
"logits/chosen": 113.10517883300781,
|
197 |
+
"logits/rejected": 108.6837158203125,
|
198 |
+
"logps/chosen": -1058.0145263671875,
|
199 |
+
"logps/rejected": -1144.630859375,
|
200 |
+
"loss": 0.5379,
|
201 |
+
"rewards/accuracies": 0.7562500238418579,
|
202 |
+
"rewards/chosen": -1.9207611083984375,
|
203 |
+
"rewards/margins": 0.8025757074356079,
|
204 |
+
"rewards/rejected": -2.723336696624756,
|
205 |
+
"step": 110
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.25,
|
209 |
+
"grad_norm": 37.99281988815398,
|
210 |
+
"learning_rate": 4.6604720940421207e-07,
|
211 |
+
"logits/chosen": 127.37345123291016,
|
212 |
+
"logits/rejected": 113.18028259277344,
|
213 |
+
"logps/chosen": -1034.3157958984375,
|
214 |
+
"logps/rejected": -1087.8922119140625,
|
215 |
+
"loss": 0.561,
|
216 |
+
"rewards/accuracies": 0.75,
|
217 |
+
"rewards/chosen": -1.195056676864624,
|
218 |
+
"rewards/margins": 0.6624664068222046,
|
219 |
+
"rewards/rejected": -1.857522964477539,
|
220 |
+
"step": 120
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"epoch": 0.27,
|
224 |
+
"grad_norm": 30.358803498156306,
|
225 |
+
"learning_rate": 4.5626458262912735e-07,
|
226 |
+
"logits/chosen": 117.21207427978516,
|
227 |
+
"logits/rejected": 122.45979309082031,
|
228 |
+
"logps/chosen": -1013.4964599609375,
|
229 |
+
"logps/rejected": -1076.5015869140625,
|
230 |
+
"loss": 0.5326,
|
231 |
+
"rewards/accuracies": 0.71875,
|
232 |
+
"rewards/chosen": -1.4635175466537476,
|
233 |
+
"rewards/margins": 0.6195371150970459,
|
234 |
+
"rewards/rejected": -2.083055019378662,
|
235 |
+
"step": 130
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"epoch": 0.29,
|
239 |
+
"grad_norm": 33.33546006946111,
|
240 |
+
"learning_rate": 4.453763107901675e-07,
|
241 |
+
"logits/chosen": 120.96626281738281,
|
242 |
+
"logits/rejected": 99.00543212890625,
|
243 |
+
"logps/chosen": -1037.393798828125,
|
244 |
+
"logps/rejected": -1113.715576171875,
|
245 |
+
"loss": 0.5061,
|
246 |
+
"rewards/accuracies": 0.7437499761581421,
|
247 |
+
"rewards/chosen": -1.6467615365982056,
|
248 |
+
"rewards/margins": 0.959250807762146,
|
249 |
+
"rewards/rejected": -2.6060123443603516,
|
250 |
+
"step": 140
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"epoch": 0.31,
|
254 |
+
"grad_norm": 36.216638034956645,
|
255 |
+
"learning_rate": 4.3344075855595097e-07,
|
256 |
+
"logits/chosen": 107.6335678100586,
|
257 |
+
"logits/rejected": 96.20887756347656,
|
258 |
+
"logps/chosen": -982.7149658203125,
|
259 |
+
"logps/rejected": -1042.314697265625,
|
260 |
+
"loss": 0.5158,
|
261 |
+
"rewards/accuracies": 0.7749999761581421,
|
262 |
+
"rewards/chosen": -0.9869440197944641,
|
263 |
+
"rewards/margins": 0.917373538017273,
|
264 |
+
"rewards/rejected": -1.9043172597885132,
|
265 |
+
"step": 150
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"epoch": 0.33,
|
269 |
+
"grad_norm": 41.67256305682121,
|
270 |
+
"learning_rate": 4.2052190435769554e-07,
|
271 |
+
"logits/chosen": 113.42124938964844,
|
272 |
+
"logits/rejected": 102.60457611083984,
|
273 |
+
"logps/chosen": -1003.9383544921875,
|
274 |
+
"logps/rejected": -1058.088134765625,
|
275 |
+
"loss": 0.5019,
|
276 |
+
"rewards/accuracies": 0.6812499761581421,
|
277 |
+
"rewards/chosen": -1.3599907159805298,
|
278 |
+
"rewards/margins": 0.8325563669204712,
|
279 |
+
"rewards/rejected": -2.192547082901001,
|
280 |
+
"step": 160
|
281 |
+
},
|
282 |
+
{
|
283 |
+
"epoch": 0.36,
|
284 |
+
"grad_norm": 45.33240260875551,
|
285 |
+
"learning_rate": 4.0668899744407567e-07,
|
286 |
+
"logits/chosen": 111.82502746582031,
|
287 |
+
"logits/rejected": 104.21772766113281,
|
288 |
+
"logps/chosen": -1027.629638671875,
|
289 |
+
"logps/rejected": -1113.077880859375,
|
290 |
+
"loss": 0.5128,
|
291 |
+
"rewards/accuracies": 0.7250000238418579,
|
292 |
+
"rewards/chosen": -1.418601632118225,
|
293 |
+
"rewards/margins": 0.8126633763313293,
|
294 |
+
"rewards/rejected": -2.231265068054199,
|
295 |
+
"step": 170
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"epoch": 0.38,
|
299 |
+
"grad_norm": 39.41765859124985,
|
300 |
+
"learning_rate": 3.920161866827889e-07,
|
301 |
+
"logits/chosen": 119.5909423828125,
|
302 |
+
"logits/rejected": 101.44880676269531,
|
303 |
+
"logps/chosen": -926.8933715820312,
|
304 |
+
"logps/rejected": -969.1472778320312,
|
305 |
+
"loss": 0.5088,
|
306 |
+
"rewards/accuracies": 0.706250011920929,
|
307 |
+
"rewards/chosen": -0.4561997354030609,
|
308 |
+
"rewards/margins": 0.7785950899124146,
|
309 |
+
"rewards/rejected": -1.2347948551177979,
|
310 |
+
"step": 180
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.4,
|
314 |
+
"grad_norm": 43.60352127706551,
|
315 |
+
"learning_rate": 3.765821230985757e-07,
|
316 |
+
"logits/chosen": 129.9263916015625,
|
317 |
+
"logits/rejected": 123.3591079711914,
|
318 |
+
"logps/chosen": -996.8531494140625,
|
319 |
+
"logps/rejected": -1059.179443359375,
|
320 |
+
"loss": 0.5073,
|
321 |
+
"rewards/accuracies": 0.706250011920929,
|
322 |
+
"rewards/chosen": -1.3905432224273682,
|
323 |
+
"rewards/margins": 0.9462689161300659,
|
324 |
+
"rewards/rejected": -2.3368120193481445,
|
325 |
+
"step": 190
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"epoch": 0.42,
|
329 |
+
"grad_norm": 41.35808766754113,
|
330 |
+
"learning_rate": 3.604695382782159e-07,
|
331 |
+
"logits/chosen": 124.6067886352539,
|
332 |
+
"logits/rejected": 118.35124206542969,
|
333 |
+
"logps/chosen": -1090.52783203125,
|
334 |
+
"logps/rejected": -1147.1068115234375,
|
335 |
+
"loss": 0.501,
|
336 |
+
"rewards/accuracies": 0.7749999761581421,
|
337 |
+
"rewards/chosen": -2.105625629425049,
|
338 |
+
"rewards/margins": 0.8636106252670288,
|
339 |
+
"rewards/rejected": -2.969236373901367,
|
340 |
+
"step": 200
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"epoch": 0.42,
|
344 |
+
"eval_logits/chosen": 105.14665222167969,
|
345 |
+
"eval_logits/rejected": 91.12017059326172,
|
346 |
+
"eval_logps/chosen": -1070.0894775390625,
|
347 |
+
"eval_logps/rejected": -1152.074462890625,
|
348 |
+
"eval_loss": 0.49135974049568176,
|
349 |
+
"eval_rewards/accuracies": 0.7817460298538208,
|
350 |
+
"eval_rewards/chosen": -1.6427117586135864,
|
351 |
+
"eval_rewards/margins": 1.0233176946640015,
|
352 |
+
"eval_rewards/rejected": -2.666029453277588,
|
353 |
+
"eval_runtime": 98.2325,
|
354 |
+
"eval_samples_per_second": 20.36,
|
355 |
+
"eval_steps_per_second": 0.641,
|
356 |
+
"step": 200
|
357 |
+
},
|
358 |
+
{
|
359 |
+
"epoch": 0.44,
|
360 |
+
"grad_norm": 40.009657639886804,
|
361 |
+
"learning_rate": 3.4376480090239047e-07,
|
362 |
+
"logits/chosen": 119.9991683959961,
|
363 |
+
"logits/rejected": 104.25712585449219,
|
364 |
+
"logps/chosen": -1026.137451171875,
|
365 |
+
"logps/rejected": -1069.336669921875,
|
366 |
+
"loss": 0.4974,
|
367 |
+
"rewards/accuracies": 0.7250000238418579,
|
368 |
+
"rewards/chosen": -1.1238807439804077,
|
369 |
+
"rewards/margins": 0.9766911268234253,
|
370 |
+
"rewards/rejected": -2.100571870803833,
|
371 |
+
"step": 210
|
372 |
+
},
|
373 |
+
{
|
374 |
+
"epoch": 0.46,
|
375 |
+
"grad_norm": 37.28316867834671,
|
376 |
+
"learning_rate": 3.265574537815398e-07,
|
377 |
+
"logits/chosen": 124.17179107666016,
|
378 |
+
"logits/rejected": 104.73355865478516,
|
379 |
+
"logps/chosen": -962.50244140625,
|
380 |
+
"logps/rejected": -1003.451171875,
|
381 |
+
"loss": 0.4961,
|
382 |
+
"rewards/accuracies": 0.7437499761581421,
|
383 |
+
"rewards/chosen": -0.7499821782112122,
|
384 |
+
"rewards/margins": 0.8301311731338501,
|
385 |
+
"rewards/rejected": -1.580113172531128,
|
386 |
+
"step": 220
|
387 |
+
},
|
388 |
+
{
|
389 |
+
"epoch": 0.48,
|
390 |
+
"grad_norm": 54.558673930247686,
|
391 |
+
"learning_rate": 3.0893973387735683e-07,
|
392 |
+
"logits/chosen": 130.6039581298828,
|
393 |
+
"logits/rejected": 105.5712661743164,
|
394 |
+
"logps/chosen": -944.1550903320312,
|
395 |
+
"logps/rejected": -1030.9761962890625,
|
396 |
+
"loss": 0.4864,
|
397 |
+
"rewards/accuracies": 0.7437499761581421,
|
398 |
+
"rewards/chosen": -0.652384877204895,
|
399 |
+
"rewards/margins": 1.2353802919387817,
|
400 |
+
"rewards/rejected": -1.8877651691436768,
|
401 |
+
"step": 230
|
402 |
+
},
|
403 |
+
{
|
404 |
+
"epoch": 0.5,
|
405 |
+
"grad_norm": 44.93883051182552,
|
406 |
+
"learning_rate": 2.910060778827554e-07,
|
407 |
+
"logits/chosen": 124.2732162475586,
|
408 |
+
"logits/rejected": 106.26287841796875,
|
409 |
+
"logps/chosen": -974.0313720703125,
|
410 |
+
"logps/rejected": -1065.3079833984375,
|
411 |
+
"loss": 0.4814,
|
412 |
+
"rewards/accuracies": 0.7562500238418579,
|
413 |
+
"rewards/chosen": -0.758819043636322,
|
414 |
+
"rewards/margins": 1.1402368545532227,
|
415 |
+
"rewards/rejected": -1.8990558385849,
|
416 |
+
"step": 240
|
417 |
+
},
|
418 |
+
{
|
419 |
+
"epoch": 0.52,
|
420 |
+
"grad_norm": 44.64312531732698,
|
421 |
+
"learning_rate": 2.7285261601056697e-07,
|
422 |
+
"logits/chosen": 116.86869812011719,
|
423 |
+
"logits/rejected": 101.10758972167969,
|
424 |
+
"logps/chosen": -952.7760620117188,
|
425 |
+
"logps/rejected": -1048.2093505859375,
|
426 |
+
"loss": 0.4998,
|
427 |
+
"rewards/accuracies": 0.78125,
|
428 |
+
"rewards/chosen": -0.9232794046401978,
|
429 |
+
"rewards/margins": 1.1742398738861084,
|
430 |
+
"rewards/rejected": -2.0975191593170166,
|
431 |
+
"step": 250
|
432 |
+
},
|
433 |
+
{
|
434 |
+
"epoch": 0.54,
|
435 |
+
"grad_norm": 40.68735732627764,
|
436 |
+
"learning_rate": 2.5457665670441937e-07,
|
437 |
+
"logits/chosen": 123.28076171875,
|
438 |
+
"logits/rejected": 102.635986328125,
|
439 |
+
"logps/chosen": -893.8860473632812,
|
440 |
+
"logps/rejected": -1004.1375732421875,
|
441 |
+
"loss": 0.4785,
|
442 |
+
"rewards/accuracies": 0.7875000238418579,
|
443 |
+
"rewards/chosen": -0.23751500248908997,
|
444 |
+
"rewards/margins": 1.1629530191421509,
|
445 |
+
"rewards/rejected": -1.4004679918289185,
|
446 |
+
"step": 260
|
447 |
+
},
|
448 |
+
{
|
449 |
+
"epoch": 0.57,
|
450 |
+
"grad_norm": 38.53534572807841,
|
451 |
+
"learning_rate": 2.3627616503391812e-07,
|
452 |
+
"logits/chosen": 118.7505874633789,
|
453 |
+
"logits/rejected": 110.69478607177734,
|
454 |
+
"logps/chosen": -951.8123779296875,
|
455 |
+
"logps/rejected": -1055.506103515625,
|
456 |
+
"loss": 0.4888,
|
457 |
+
"rewards/accuracies": 0.731249988079071,
|
458 |
+
"rewards/chosen": -0.709445595741272,
|
459 |
+
"rewards/margins": 1.0170724391937256,
|
460 |
+
"rewards/rejected": -1.7265180349349976,
|
461 |
+
"step": 270
|
462 |
+
},
|
463 |
+
{
|
464 |
+
"epoch": 0.59,
|
465 |
+
"grad_norm": 39.95351928619943,
|
466 |
+
"learning_rate": 2.1804923757009882e-07,
|
467 |
+
"logits/chosen": 130.1121063232422,
|
468 |
+
"logits/rejected": 109.01686096191406,
|
469 |
+
"logps/chosen": -1051.2080078125,
|
470 |
+
"logps/rejected": -1150.492431640625,
|
471 |
+
"loss": 0.4577,
|
472 |
+
"rewards/accuracies": 0.7749999761581421,
|
473 |
+
"rewards/chosen": -1.1860597133636475,
|
474 |
+
"rewards/margins": 1.2530558109283447,
|
475 |
+
"rewards/rejected": -2.439115285873413,
|
476 |
+
"step": 280
|
477 |
+
},
|
478 |
+
{
|
479 |
+
"epoch": 0.61,
|
480 |
+
"grad_norm": 36.79184277903521,
|
481 |
+
"learning_rate": 1.9999357655598891e-07,
|
482 |
+
"logits/chosen": 125.7824935913086,
|
483 |
+
"logits/rejected": 115.7079086303711,
|
484 |
+
"logps/chosen": -1027.8924560546875,
|
485 |
+
"logps/rejected": -1112.7578125,
|
486 |
+
"loss": 0.4803,
|
487 |
+
"rewards/accuracies": 0.768750011920929,
|
488 |
+
"rewards/chosen": -1.5402090549468994,
|
489 |
+
"rewards/margins": 1.2050487995147705,
|
490 |
+
"rewards/rejected": -2.74525785446167,
|
491 |
+
"step": 290
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"epoch": 0.63,
|
495 |
+
"grad_norm": 42.82758424377552,
|
496 |
+
"learning_rate": 1.8220596619089573e-07,
|
497 |
+
"logits/chosen": 122.8412094116211,
|
498 |
+
"logits/rejected": 110.88859558105469,
|
499 |
+
"logps/chosen": -1007.1027221679688,
|
500 |
+
"logps/rejected": -1088.6361083984375,
|
501 |
+
"loss": 0.4893,
|
502 |
+
"rewards/accuracies": 0.75,
|
503 |
+
"rewards/chosen": -1.636523962020874,
|
504 |
+
"rewards/margins": 1.004431962966919,
|
505 |
+
"rewards/rejected": -2.640956163406372,
|
506 |
+
"step": 300
|
507 |
+
},
|
508 |
+
{
|
509 |
+
"epoch": 0.63,
|
510 |
+
"eval_logits/chosen": 101.97990417480469,
|
511 |
+
"eval_logits/rejected": 87.42365264892578,
|
512 |
+
"eval_logps/chosen": -1071.85498046875,
|
513 |
+
"eval_logps/rejected": -1169.447998046875,
|
514 |
+
"eval_loss": 0.48103758692741394,
|
515 |
+
"eval_rewards/accuracies": 0.761904776096344,
|
516 |
+
"eval_rewards/chosen": -1.6603679656982422,
|
517 |
+
"eval_rewards/margins": 1.1793973445892334,
|
518 |
+
"eval_rewards/rejected": -2.8397650718688965,
|
519 |
+
"eval_runtime": 101.0727,
|
520 |
+
"eval_samples_per_second": 19.788,
|
521 |
+
"eval_steps_per_second": 0.623,
|
522 |
+
"step": 300
|
523 |
+
},
|
524 |
+
{
|
525 |
+
"epoch": 0.65,
|
526 |
+
"grad_norm": 38.54631864265249,
|
527 |
+
"learning_rate": 1.647817538357072e-07,
|
528 |
+
"logits/chosen": 113.38005065917969,
|
529 |
+
"logits/rejected": 110.6764144897461,
|
530 |
+
"logps/chosen": -995.9137573242188,
|
531 |
+
"logps/rejected": -1127.708251953125,
|
532 |
+
"loss": 0.4446,
|
533 |
+
"rewards/accuracies": 0.78125,
|
534 |
+
"rewards/chosen": -1.5860974788665771,
|
535 |
+
"rewards/margins": 1.1870568990707397,
|
536 |
+
"rewards/rejected": -2.7731544971466064,
|
537 |
+
"step": 310
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"epoch": 0.67,
|
541 |
+
"grad_norm": 44.46063064657573,
|
542 |
+
"learning_rate": 1.478143389201113e-07,
|
543 |
+
"logits/chosen": 119.3581771850586,
|
544 |
+
"logits/rejected": 113.27059173583984,
|
545 |
+
"logps/chosen": -1008.1510620117188,
|
546 |
+
"logps/rejected": -1137.126220703125,
|
547 |
+
"loss": 0.4747,
|
548 |
+
"rewards/accuracies": 0.768750011920929,
|
549 |
+
"rewards/chosen": -1.4959969520568848,
|
550 |
+
"rewards/margins": 1.4020049571990967,
|
551 |
+
"rewards/rejected": -2.898001194000244,
|
552 |
+
"step": 320
|
553 |
+
},
|
554 |
+
{
|
555 |
+
"epoch": 0.69,
|
556 |
+
"grad_norm": 34.37130611293822,
|
557 |
+
"learning_rate": 1.3139467229135998e-07,
|
558 |
+
"logits/chosen": 129.7974090576172,
|
559 |
+
"logits/rejected": 115.29783630371094,
|
560 |
+
"logps/chosen": -1069.2001953125,
|
561 |
+
"logps/rejected": -1156.9002685546875,
|
562 |
+
"loss": 0.4666,
|
563 |
+
"rewards/accuracies": 0.75,
|
564 |
+
"rewards/chosen": -1.5326635837554932,
|
565 |
+
"rewards/margins": 1.1574156284332275,
|
566 |
+
"rewards/rejected": -2.6900792121887207,
|
567 |
+
"step": 330
|
568 |
+
},
|
569 |
+
{
|
570 |
+
"epoch": 0.71,
|
571 |
+
"grad_norm": 37.51413061347318,
|
572 |
+
"learning_rate": 1.1561076868822755e-07,
|
573 |
+
"logits/chosen": 135.8931884765625,
|
574 |
+
"logits/rejected": 121.91889953613281,
|
575 |
+
"logps/chosen": -1037.731689453125,
|
576 |
+
"logps/rejected": -1122.47412109375,
|
577 |
+
"loss": 0.466,
|
578 |
+
"rewards/accuracies": 0.7124999761581421,
|
579 |
+
"rewards/chosen": -1.680929183959961,
|
580 |
+
"rewards/margins": 1.011832356452942,
|
581 |
+
"rewards/rejected": -2.6927616596221924,
|
582 |
+
"step": 340
|
583 |
+
},
|
584 |
+
{
|
585 |
+
"epoch": 0.73,
|
586 |
+
"grad_norm": 44.78811791068193,
|
587 |
+
"learning_rate": 1.0054723495346482e-07,
|
588 |
+
"logits/chosen": 131.4265899658203,
|
589 |
+
"logits/rejected": 113.61830139160156,
|
590 |
+
"logps/chosen": -1038.681396484375,
|
591 |
+
"logps/rejected": -1120.1895751953125,
|
592 |
+
"loss": 0.4816,
|
593 |
+
"rewards/accuracies": 0.6937500238418579,
|
594 |
+
"rewards/chosen": -1.5251903533935547,
|
595 |
+
"rewards/margins": 1.061643362045288,
|
596 |
+
"rewards/rejected": -2.5868337154388428,
|
597 |
+
"step": 350
|
598 |
+
},
|
599 |
+
{
|
600 |
+
"epoch": 0.75,
|
601 |
+
"grad_norm": 48.873185347547825,
|
602 |
+
"learning_rate": 8.628481651367875e-08,
|
603 |
+
"logits/chosen": 131.0110321044922,
|
604 |
+
"logits/rejected": 106.7862548828125,
|
605 |
+
"logps/chosen": -1003.0614013671875,
|
606 |
+
"logps/rejected": -1117.0228271484375,
|
607 |
+
"loss": 0.4628,
|
608 |
+
"rewards/accuracies": 0.71875,
|
609 |
+
"rewards/chosen": -1.315259337425232,
|
610 |
+
"rewards/margins": 1.2398252487182617,
|
611 |
+
"rewards/rejected": -2.555084466934204,
|
612 |
+
"step": 360
|
613 |
+
},
|
614 |
+
{
|
615 |
+
"epoch": 0.77,
|
616 |
+
"grad_norm": 43.10852524272028,
|
617 |
+
"learning_rate": 7.289996455765748e-08,
|
618 |
+
"logits/chosen": 119.69869232177734,
|
619 |
+
"logits/rejected": 114.07867431640625,
|
620 |
+
"logps/chosen": -1036.690673828125,
|
621 |
+
"logps/rejected": -1146.2222900390625,
|
622 |
+
"loss": 0.4818,
|
623 |
+
"rewards/accuracies": 0.7749999761581421,
|
624 |
+
"rewards/chosen": -1.3266334533691406,
|
625 |
+
"rewards/margins": 1.4441391229629517,
|
626 |
+
"rewards/rejected": -2.7707724571228027,
|
627 |
+
"step": 370
|
628 |
+
},
|
629 |
+
{
|
630 |
+
"epoch": 0.8,
|
631 |
+
"grad_norm": 38.151146819157155,
|
632 |
+
"learning_rate": 6.046442623320145e-08,
|
633 |
+
"logits/chosen": 122.61408996582031,
|
634 |
+
"logits/rejected": 126.60284423828125,
|
635 |
+
"logps/chosen": -1007.2545166015625,
|
636 |
+
"logps/rejected": -1128.876220703125,
|
637 |
+
"loss": 0.4616,
|
638 |
+
"rewards/accuracies": 0.7437499761581421,
|
639 |
+
"rewards/chosen": -1.2509572505950928,
|
640 |
+
"rewards/margins": 1.2325677871704102,
|
641 |
+
"rewards/rejected": -2.483525037765503,
|
642 |
+
"step": 380
|
643 |
+
},
|
644 |
+
{
|
645 |
+
"epoch": 0.82,
|
646 |
+
"grad_norm": 42.436222296241255,
|
647 |
+
"learning_rate": 4.904486005914027e-08,
|
648 |
+
"logits/chosen": 134.9565887451172,
|
649 |
+
"logits/rejected": 112.53916931152344,
|
650 |
+
"logps/chosen": -989.7498779296875,
|
651 |
+
"logps/rejected": -1100.5008544921875,
|
652 |
+
"loss": 0.466,
|
653 |
+
"rewards/accuracies": 0.7875000238418579,
|
654 |
+
"rewards/chosen": -1.100543737411499,
|
655 |
+
"rewards/margins": 1.3375900983810425,
|
656 |
+
"rewards/rejected": -2.438133955001831,
|
657 |
+
"step": 390
|
658 |
+
},
|
659 |
+
{
|
660 |
+
"epoch": 0.84,
|
661 |
+
"grad_norm": 45.12243805369195,
|
662 |
+
"learning_rate": 3.8702478614051345e-08,
|
663 |
+
"logits/chosen": 124.96537780761719,
|
664 |
+
"logits/rejected": 101.06147766113281,
|
665 |
+
"logps/chosen": -953.6491088867188,
|
666 |
+
"logps/rejected": -1035.303466796875,
|
667 |
+
"loss": 0.4759,
|
668 |
+
"rewards/accuracies": 0.78125,
|
669 |
+
"rewards/chosen": -0.9369322061538696,
|
670 |
+
"rewards/margins": 1.2312959432601929,
|
671 |
+
"rewards/rejected": -2.1682283878326416,
|
672 |
+
"step": 400
|
673 |
+
},
|
674 |
+
{
|
675 |
+
"epoch": 0.84,
|
676 |
+
"eval_logits/chosen": 104.01083374023438,
|
677 |
+
"eval_logits/rejected": 89.1600341796875,
|
678 |
+
"eval_logps/chosen": -990.8949584960938,
|
679 |
+
"eval_logps/rejected": -1100.8470458984375,
|
680 |
+
"eval_loss": 0.47177109122276306,
|
681 |
+
"eval_rewards/accuracies": 0.7817460298538208,
|
682 |
+
"eval_rewards/chosen": -0.8507668972015381,
|
683 |
+
"eval_rewards/margins": 1.3029894828796387,
|
684 |
+
"eval_rewards/rejected": -2.153756618499756,
|
685 |
+
"eval_runtime": 100.3183,
|
686 |
+
"eval_samples_per_second": 19.937,
|
687 |
+
"eval_steps_per_second": 0.628,
|
688 |
+
"step": 400
|
689 |
+
},
|
690 |
+
{
|
691 |
+
"epoch": 0.86,
|
692 |
+
"grad_norm": 41.02552262522525,
|
693 |
+
"learning_rate": 2.9492720416985e-08,
|
694 |
+
"logits/chosen": 134.49705505371094,
|
695 |
+
"logits/rejected": 125.9228744506836,
|
696 |
+
"logps/chosen": -977.2227783203125,
|
697 |
+
"logps/rejected": -1063.806396484375,
|
698 |
+
"loss": 0.4743,
|
699 |
+
"rewards/accuracies": 0.706250011920929,
|
700 |
+
"rewards/chosen": -0.9780263900756836,
|
701 |
+
"rewards/margins": 0.9129725694656372,
|
702 |
+
"rewards/rejected": -1.8909988403320312,
|
703 |
+
"step": 410
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"epoch": 0.88,
|
707 |
+
"grad_norm": 38.58815640645097,
|
708 |
+
"learning_rate": 2.1464952759020856e-08,
|
709 |
+
"logits/chosen": 125.35734558105469,
|
710 |
+
"logits/rejected": 111.65289306640625,
|
711 |
+
"logps/chosen": -949.1236572265625,
|
712 |
+
"logps/rejected": -1051.959228515625,
|
713 |
+
"loss": 0.4823,
|
714 |
+
"rewards/accuracies": 0.7437499761581421,
|
715 |
+
"rewards/chosen": -0.9723941683769226,
|
716 |
+
"rewards/margins": 1.144566297531128,
|
717 |
+
"rewards/rejected": -2.116960287094116,
|
718 |
+
"step": 420
|
719 |
+
},
|
720 |
+
{
|
721 |
+
"epoch": 0.9,
|
722 |
+
"grad_norm": 31.91004161837456,
|
723 |
+
"learning_rate": 1.4662207078575684e-08,
|
724 |
+
"logits/chosen": 121.03411865234375,
|
725 |
+
"logits/rejected": 110.73746490478516,
|
726 |
+
"logps/chosen": -962.5640869140625,
|
727 |
+
"logps/rejected": -1089.4044189453125,
|
728 |
+
"loss": 0.4505,
|
729 |
+
"rewards/accuracies": 0.78125,
|
730 |
+
"rewards/chosen": -1.0843942165374756,
|
731 |
+
"rewards/margins": 1.454674243927002,
|
732 |
+
"rewards/rejected": -2.5390686988830566,
|
733 |
+
"step": 430
|
734 |
+
},
|
735 |
+
{
|
736 |
+
"epoch": 0.92,
|
737 |
+
"grad_norm": 37.24587161270191,
|
738 |
+
"learning_rate": 9.12094829893642e-09,
|
739 |
+
"logits/chosen": 122.1268081665039,
|
740 |
+
"logits/rejected": 124.87750244140625,
|
741 |
+
"logps/chosen": -947.4508666992188,
|
742 |
+
"logps/rejected": -1111.0018310546875,
|
743 |
+
"loss": 0.4731,
|
744 |
+
"rewards/accuracies": 0.75,
|
745 |
+
"rewards/chosen": -1.1550958156585693,
|
746 |
+
"rewards/margins": 1.279443621635437,
|
747 |
+
"rewards/rejected": -2.434539318084717,
|
748 |
+
"step": 440
|
749 |
+
},
|
750 |
+
{
|
751 |
+
"epoch": 0.94,
|
752 |
+
"grad_norm": 33.88329943101718,
|
753 |
+
"learning_rate": 4.8708793644441086e-09,
|
754 |
+
"logits/chosen": 123.2929916381836,
|
755 |
+
"logits/rejected": 118.64930725097656,
|
756 |
+
"logps/chosen": -1003.01220703125,
|
757 |
+
"logps/rejected": -1144.7047119140625,
|
758 |
+
"loss": 0.4683,
|
759 |
+
"rewards/accuracies": 0.793749988079071,
|
760 |
+
"rewards/chosen": -1.1160547733306885,
|
761 |
+
"rewards/margins": 1.316855549812317,
|
762 |
+
"rewards/rejected": -2.432910442352295,
|
763 |
+
"step": 450
|
764 |
+
},
|
765 |
+
{
|
766 |
+
"epoch": 0.96,
|
767 |
+
"grad_norm": 40.762224983725595,
|
768 |
+
"learning_rate": 1.9347820230782295e-09,
|
769 |
+
"logits/chosen": 126.6349105834961,
|
770 |
+
"logits/rejected": 116.65708923339844,
|
771 |
+
"logps/chosen": -975.12548828125,
|
772 |
+
"logps/rejected": -1110.140625,
|
773 |
+
"loss": 0.4745,
|
774 |
+
"rewards/accuracies": 0.78125,
|
775 |
+
"rewards/chosen": -1.0417004823684692,
|
776 |
+
"rewards/margins": 1.3975470066070557,
|
777 |
+
"rewards/rejected": -2.4392473697662354,
|
778 |
+
"step": 460
|
779 |
+
},
|
780 |
+
{
|
781 |
+
"epoch": 0.98,
|
782 |
+
"grad_norm": 47.51695448791754,
|
783 |
+
"learning_rate": 3.2839470889836627e-10,
|
784 |
+
"logits/chosen": 136.34127807617188,
|
785 |
+
"logits/rejected": 115.61125183105469,
|
786 |
+
"logps/chosen": -1022.4205322265625,
|
787 |
+
"logps/rejected": -1111.9298095703125,
|
788 |
+
"loss": 0.4773,
|
789 |
+
"rewards/accuracies": 0.768750011920929,
|
790 |
+
"rewards/chosen": -1.1639817953109741,
|
791 |
+
"rewards/margins": 1.1374759674072266,
|
792 |
+
"rewards/rejected": -2.3014578819274902,
|
793 |
+
"step": 470
|
794 |
+
},
|
795 |
+
{
|
796 |
+
"epoch": 1.0,
|
797 |
+
"step": 477,
|
798 |
+
"total_flos": 0.0,
|
799 |
+
"train_loss": 0.5152078939433867,
|
800 |
+
"train_runtime": 5321.4428,
|
801 |
+
"train_samples_per_second": 11.488,
|
802 |
+
"train_steps_per_second": 0.09
|
803 |
+
}
|
804 |
+
],
|
805 |
+
"logging_steps": 10,
|
806 |
+
"max_steps": 477,
|
807 |
+
"num_input_tokens_seen": 0,
|
808 |
+
"num_train_epochs": 1,
|
809 |
+
"save_steps": 100,
|
810 |
+
"total_flos": 0.0,
|
811 |
+
"train_batch_size": 2,
|
812 |
+
"trial_name": null,
|
813 |
+
"trial_params": null
|
814 |
+
}
|