Upload folder using huggingface_hub
Browse files- README.md +61 -0
- all_results.json +8 -0
- config.json +27 -0
- generation_config.json +6 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +298 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- train_results.json +8 -0
- trainer_log.jsonl +48 -0
- trainer_state.json +747 -0
- training_args.bin +3 -0
- training_loss.png +0 -0
- training_rewards_accuracies.png +0 -0
README.md
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: other
|
4 |
+
base_model: edinlp/mistral-7b-v0.3-sft
|
5 |
+
tags:
|
6 |
+
- llama-factory
|
7 |
+
- full
|
8 |
+
- generated_from_trainer
|
9 |
+
model-index:
|
10 |
+
- name: mistral-dpo
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# mistral-dpo
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [edinlp/mistral-7b-v0.3-sft](https://huggingface.co/edinlp/mistral-7b-v0.3-sft) on the ultrafeedback dataset.
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
More information needed
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training procedure
|
34 |
+
|
35 |
+
### Training hyperparameters
|
36 |
+
|
37 |
+
The following hyperparameters were used during training:
|
38 |
+
- learning_rate: 5e-07
|
39 |
+
- train_batch_size: 4
|
40 |
+
- eval_batch_size: 8
|
41 |
+
- seed: 42
|
42 |
+
- distributed_type: multi-GPU
|
43 |
+
- num_devices: 4
|
44 |
+
- gradient_accumulation_steps: 8
|
45 |
+
- total_train_batch_size: 128
|
46 |
+
- total_eval_batch_size: 32
|
47 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
48 |
+
- lr_scheduler_type: cosine
|
49 |
+
- lr_scheduler_warmup_ratio: 0.03
|
50 |
+
- num_epochs: 1.0
|
51 |
+
|
52 |
+
### Training results
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
### Framework versions
|
57 |
+
|
58 |
+
- Transformers 4.45.0
|
59 |
+
- Pytorch 2.4.0+cu121
|
60 |
+
- Datasets 2.21.0
|
61 |
+
- Tokenizers 0.20.0
|
all_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.998691442030882,
|
3 |
+
"total_flos": 5.005717235969294e+18,
|
4 |
+
"train_loss": 0.5631812908364542,
|
5 |
+
"train_runtime": 18694.5367,
|
6 |
+
"train_samples_per_second": 3.27,
|
7 |
+
"train_steps_per_second": 0.026
|
8 |
+
}
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "edinlp/mistral-7b-v0.3-sft",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"head_dim": 128,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 4096,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 14336,
|
14 |
+
"max_position_embeddings": 32768,
|
15 |
+
"model_type": "mistral",
|
16 |
+
"num_attention_heads": 32,
|
17 |
+
"num_hidden_layers": 32,
|
18 |
+
"num_key_value_heads": 8,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": null,
|
22 |
+
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.45.0",
|
25 |
+
"use_cache": false,
|
26 |
+
"vocab_size": 32768
|
27 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.45.0"
|
6 |
+
}
|
model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b37d818958e849eb28c4133802214d16d3ee2faea2971291e43472e62a07028
|
3 |
+
size 4949453792
|
model-00002-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3a5025286b1e5740f29f36083aab39ebf3aa149017f07a986bb44b53ebf0024
|
3 |
+
size 4999819336
|
model-00003-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba6205856efbab63dbd57d6dd2fc8c7904faffbfa2ff00deb9bed616c707ace4
|
3 |
+
size 4546807800
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 14496047104
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00003-of-00003.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
26 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
35 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
44 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
53 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
62 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
71 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
80 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
89 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
98 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
107 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
116 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
125 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
134 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
143 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
152 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
161 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
170 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
179 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
188 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
197 |
+
"model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
206 |
+
"model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
215 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
224 |
+
"model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
225 |
+
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
226 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
227 |
+
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
228 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
229 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
230 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
231 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
232 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
233 |
+
"model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
234 |
+
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
235 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
236 |
+
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
237 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
238 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
239 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
240 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
241 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
242 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
243 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
244 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
245 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
246 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
247 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
248 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
249 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
250 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
251 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
252 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
253 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
254 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
255 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
256 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
257 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
258 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
259 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
260 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
261 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
262 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
263 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
264 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
265 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
266 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
267 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
268 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
269 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
270 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
271 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
272 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
273 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
274 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
275 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
276 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
277 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
278 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
279 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
280 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
281 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
282 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
283 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
284 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
285 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
286 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
287 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
288 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
289 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
290 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
291 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
292 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
293 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
294 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
295 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
296 |
+
"model.norm.weight": "model-00003-of-00003.safetensors"
|
297 |
+
}
|
298 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
|
3 |
+
size 587404
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.998691442030882,
|
3 |
+
"total_flos": 5.005717235969294e+18,
|
4 |
+
"train_loss": 0.5631812908364542,
|
5 |
+
"train_runtime": 18694.5367,
|
6 |
+
"train_samples_per_second": 3.27,
|
7 |
+
"train_steps_per_second": 0.026
|
8 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 10, "total_steps": 477, "loss": 0.692, "accuracy": 0.4281249940395355, "learning_rate": 3.333333333333333e-07, "epoch": 0.02093692750588851, "percentage": 2.1, "elapsed_time": "0:06:25", "remaining_time": "4:59:53"}
|
2 |
+
{"current_steps": 20, "total_steps": 477, "loss": 0.6728, "accuracy": 0.628125011920929, "learning_rate": 4.998555145953054e-07, "epoch": 0.04187385501177702, "percentage": 4.19, "elapsed_time": "0:12:41", "remaining_time": "4:49:50"}
|
3 |
+
{"current_steps": 30, "total_steps": 477, "loss": 0.6305, "accuracy": 0.6781250238418579, "learning_rate": 4.98700633214251e-07, "epoch": 0.06281078251766553, "percentage": 6.29, "elapsed_time": "0:19:05", "remaining_time": "4:44:28"}
|
4 |
+
{"current_steps": 40, "total_steps": 477, "loss": 0.6267, "accuracy": 0.612500011920929, "learning_rate": 4.963962085412632e-07, "epoch": 0.08374771002355404, "percentage": 8.39, "elapsed_time": "0:25:37", "remaining_time": "4:40:02"}
|
5 |
+
{"current_steps": 50, "total_steps": 477, "loss": 0.6084, "accuracy": 0.6937500238418579, "learning_rate": 4.929528920808854e-07, "epoch": 0.10468463752944256, "percentage": 10.48, "elapsed_time": "0:32:16", "remaining_time": "4:35:35"}
|
6 |
+
{"current_steps": 60, "total_steps": 477, "loss": 0.5792, "accuracy": 0.7250000238418579, "learning_rate": 4.883865995197318e-07, "epoch": 0.12562156503533106, "percentage": 12.58, "elapsed_time": "0:38:56", "remaining_time": "4:30:36"}
|
7 |
+
{"current_steps": 70, "total_steps": 477, "loss": 0.5846, "accuracy": 0.6812499761581421, "learning_rate": 4.82718437161051e-07, "epoch": 0.14655849254121958, "percentage": 14.68, "elapsed_time": "0:45:09", "remaining_time": "4:22:33"}
|
8 |
+
{"current_steps": 80, "total_steps": 477, "loss": 0.5843, "accuracy": 0.703125, "learning_rate": 4.7597460436723613e-07, "epoch": 0.16749542004710807, "percentage": 16.77, "elapsed_time": "0:51:55", "remaining_time": "4:17:41"}
|
9 |
+
{"current_steps": 90, "total_steps": 477, "loss": 0.5849, "accuracy": 0.684374988079071, "learning_rate": 4.68186272461214e-07, "epoch": 0.1884323475529966, "percentage": 18.87, "elapsed_time": "0:58:22", "remaining_time": "4:11:00"}
|
10 |
+
{"current_steps": 100, "total_steps": 477, "loss": 0.5834, "accuracy": 0.703125, "learning_rate": 4.593894406464536e-07, "epoch": 0.2093692750588851, "percentage": 20.96, "elapsed_time": "1:04:51", "remaining_time": "4:04:30"}
|
11 |
+
{"current_steps": 110, "total_steps": 477, "loss": 0.5804, "accuracy": 0.715624988079071, "learning_rate": 4.496247696115597e-07, "epoch": 0.23030620256477363, "percentage": 23.06, "elapsed_time": "1:11:26", "remaining_time": "3:58:20"}
|
12 |
+
{"current_steps": 120, "total_steps": 477, "loss": 0.5582, "accuracy": 0.721875011920929, "learning_rate": 4.3893739358856455e-07, "epoch": 0.2512431300706621, "percentage": 25.16, "elapsed_time": "1:17:51", "remaining_time": "3:51:38"}
|
13 |
+
{"current_steps": 130, "total_steps": 477, "loss": 0.5478, "accuracy": 0.737500011920929, "learning_rate": 4.273767117336217e-07, "epoch": 0.2721800575765506, "percentage": 27.25, "elapsed_time": "1:24:29", "remaining_time": "3:45:31"}
|
14 |
+
{"current_steps": 140, "total_steps": 477, "loss": 0.5548, "accuracy": 0.71875, "learning_rate": 4.1499615979437983e-07, "epoch": 0.29311698508243916, "percentage": 29.35, "elapsed_time": "1:30:25", "remaining_time": "3:37:39"}
|
15 |
+
{"current_steps": 150, "total_steps": 477, "loss": 0.5703, "accuracy": 0.721875011920929, "learning_rate": 4.018529631194369e-07, "epoch": 0.31405391258832765, "percentage": 31.45, "elapsed_time": "1:37:10", "remaining_time": "3:31:50"}
|
16 |
+
{"current_steps": 160, "total_steps": 477, "loss": 0.5392, "accuracy": 0.7437499761581421, "learning_rate": 3.8800787215151164e-07, "epoch": 0.33499084009421615, "percentage": 33.54, "elapsed_time": "1:43:51", "remaining_time": "3:25:45"}
|
17 |
+
{"current_steps": 170, "total_steps": 477, "loss": 0.5505, "accuracy": 0.6968749761581421, "learning_rate": 3.7352488162693715e-07, "epoch": 0.3559277676001047, "percentage": 35.64, "elapsed_time": "1:50:18", "remaining_time": "3:19:11"}
|
18 |
+
{"current_steps": 180, "total_steps": 477, "loss": 0.5508, "accuracy": 0.675000011920929, "learning_rate": 3.584709347793895e-07, "epoch": 0.3768646951059932, "percentage": 37.74, "elapsed_time": "1:56:47", "remaining_time": "3:12:42"}
|
19 |
+
{"current_steps": 190, "total_steps": 477, "loss": 0.5632, "accuracy": 0.699999988079071, "learning_rate": 3.4291561391508185e-07, "epoch": 0.39780162261188173, "percentage": 39.83, "elapsed_time": "2:03:06", "remaining_time": "3:05:57"}
|
20 |
+
{"current_steps": 200, "total_steps": 477, "loss": 0.5475, "accuracy": 0.75, "learning_rate": 3.2693081878964544e-07, "epoch": 0.4187385501177702, "percentage": 41.93, "elapsed_time": "2:09:32", "remaining_time": "2:59:24"}
|
21 |
+
{"current_steps": 210, "total_steps": 477, "loss": 0.533, "accuracy": 0.778124988079071, "learning_rate": 3.1059043427330314e-07, "epoch": 0.4396754776236587, "percentage": 44.03, "elapsed_time": "2:16:18", "remaining_time": "2:53:18"}
|
22 |
+
{"current_steps": 220, "total_steps": 477, "loss": 0.5389, "accuracy": 0.734375, "learning_rate": 2.9396998884045234e-07, "epoch": 0.46061240512954726, "percentage": 46.12, "elapsed_time": "2:22:43", "remaining_time": "2:46:43"}
|
23 |
+
{"current_steps": 230, "total_steps": 477, "loss": 0.5438, "accuracy": 0.7093750238418579, "learning_rate": 2.7714630546218634e-07, "epoch": 0.48154933263543576, "percentage": 48.22, "elapsed_time": "2:28:56", "remaining_time": "2:39:57"}
|
24 |
+
{"current_steps": 240, "total_steps": 477, "loss": 0.5647, "accuracy": 0.734375, "learning_rate": 2.6019714651539645e-07, "epoch": 0.5024862601413242, "percentage": 50.31, "elapsed_time": "2:35:10", "remaining_time": "2:33:14"}
|
25 |
+
{"current_steps": 250, "total_steps": 477, "loss": 0.56, "accuracy": 0.7093750238418579, "learning_rate": 2.4320085434975556e-07, "epoch": 0.5234231876472127, "percentage": 52.41, "elapsed_time": "2:42:00", "remaining_time": "2:27:05"}
|
26 |
+
{"current_steps": 260, "total_steps": 477, "loss": 0.5463, "accuracy": 0.7250000238418579, "learning_rate": 2.2623598917395436e-07, "epoch": 0.5443601151531012, "percentage": 54.51, "elapsed_time": "2:49:55", "remaining_time": "2:21:49"}
|
27 |
+
{"current_steps": 270, "total_steps": 477, "loss": 0.5256, "accuracy": 0.731249988079071, "learning_rate": 2.0938096593494853e-07, "epoch": 0.5652970426589898, "percentage": 56.6, "elapsed_time": "2:56:27", "remaining_time": "2:15:16"}
|
28 |
+
{"current_steps": 280, "total_steps": 477, "loss": 0.5551, "accuracy": 0.675000011920929, "learning_rate": 1.9271369186863618e-07, "epoch": 0.5862339701648783, "percentage": 58.7, "elapsed_time": "3:03:00", "remaining_time": "2:08:45"}
|
29 |
+
{"current_steps": 290, "total_steps": 477, "loss": 0.5278, "accuracy": 0.7718750238418579, "learning_rate": 1.763112063972739e-07, "epoch": 0.6071708976707668, "percentage": 60.8, "elapsed_time": "3:09:25", "remaining_time": "2:02:08"}
|
30 |
+
{"current_steps": 300, "total_steps": 477, "loss": 0.564, "accuracy": 0.699999988079071, "learning_rate": 1.602493250381003e-07, "epoch": 0.6281078251766553, "percentage": 62.89, "elapsed_time": "3:15:53", "remaining_time": "1:55:34"}
|
31 |
+
{"current_steps": 310, "total_steps": 477, "loss": 0.5304, "accuracy": 0.734375, "learning_rate": 1.446022889690875e-07, "epoch": 0.6490447526825438, "percentage": 64.99, "elapsed_time": "3:22:36", "remaining_time": "1:49:08"}
|
32 |
+
{"current_steps": 320, "total_steps": 477, "loss": 0.5819, "accuracy": 0.7406250238418579, "learning_rate": 1.2944242187160015e-07, "epoch": 0.6699816801884323, "percentage": 67.09, "elapsed_time": "3:28:43", "remaining_time": "1:42:24"}
|
33 |
+
{"current_steps": 330, "total_steps": 477, "loss": 0.5374, "accuracy": 0.768750011920929, "learning_rate": 1.1483979563610069e-07, "epoch": 0.6909186076943209, "percentage": 69.18, "elapsed_time": "3:35:26", "remaining_time": "1:35:58"}
|
34 |
+
{"current_steps": 340, "total_steps": 477, "loss": 0.5607, "accuracy": 0.753125011920929, "learning_rate": 1.0086190647607529e-07, "epoch": 0.7118555352002094, "percentage": 71.28, "elapsed_time": "3:41:58", "remaining_time": "1:29:26"}
|
35 |
+
{"current_steps": 350, "total_steps": 477, "loss": 0.5348, "accuracy": 0.703125, "learning_rate": 8.757336294724687e-08, "epoch": 0.7327924627060979, "percentage": 73.38, "elapsed_time": "3:48:05", "remaining_time": "1:22:45"}
|
36 |
+
{"current_steps": 360, "total_steps": 477, "loss": 0.5477, "accuracy": 0.71875, "learning_rate": 7.503558731410958e-08, "epoch": 0.7537293902119864, "percentage": 75.47, "elapsed_time": "3:54:10", "remaining_time": "1:16:06"}
|
37 |
+
{"current_steps": 370, "total_steps": 477, "loss": 0.5639, "accuracy": 0.6812499761581421, "learning_rate": 6.330653164412908e-08, "epoch": 0.7746663177178749, "percentage": 77.57, "elapsed_time": "4:00:40", "remaining_time": "1:09:35"}
|
38 |
+
{"current_steps": 380, "total_steps": 477, "loss": 0.5627, "accuracy": 0.721875011920929, "learning_rate": 5.2440409941877456e-08, "epoch": 0.7956032452237635, "percentage": 79.66, "elapsed_time": "4:06:49", "remaining_time": "1:03:00"}
|
39 |
+
{"current_steps": 390, "total_steps": 477, "loss": 0.5397, "accuracy": 0.7124999761581421, "learning_rate": 4.248744756122985e-08, "epoch": 0.816540172729652, "percentage": 81.76, "elapsed_time": "4:13:16", "remaining_time": "0:56:29"}
|
40 |
+
{"current_steps": 400, "total_steps": 477, "loss": 0.5557, "accuracy": 0.7281249761581421, "learning_rate": 3.349364905389032e-08, "epoch": 0.8374771002355405, "percentage": 83.86, "elapsed_time": "4:19:51", "remaining_time": "0:50:01"}
|
41 |
+
{"current_steps": 410, "total_steps": 477, "loss": 0.5378, "accuracy": 0.737500011920929, "learning_rate": 2.550058552729639e-08, "epoch": 0.8584140277414289, "percentage": 85.95, "elapsed_time": "4:26:20", "remaining_time": "0:43:31"}
|
42 |
+
{"current_steps": 420, "total_steps": 477, "loss": 0.5338, "accuracy": 0.7437499761581421, "learning_rate": 1.854520249477551e-08, "epoch": 0.8793509552473174, "percentage": 88.05, "elapsed_time": "4:32:55", "remaining_time": "0:37:02"}
|
43 |
+
{"current_steps": 430, "total_steps": 477, "loss": 0.5455, "accuracy": 0.737500011920929, "learning_rate": 1.265964910610884e-08, "epoch": 0.9002878827532059, "percentage": 90.15, "elapsed_time": "4:39:25", "remaining_time": "0:30:32"}
|
44 |
+
{"current_steps": 440, "total_steps": 477, "loss": 0.5408, "accuracy": 0.7093750238418579, "learning_rate": 7.871129547831062e-09, "epoch": 0.9212248102590945, "percentage": 92.24, "elapsed_time": "4:46:09", "remaining_time": "0:24:03"}
|
45 |
+
{"current_steps": 450, "total_steps": 477, "loss": 0.5495, "accuracy": 0.746874988079071, "learning_rate": 4.201777300124249e-09, "epoch": 0.942161737764983, "percentage": 94.34, "elapsed_time": "4:52:27", "remaining_time": "0:17:32"}
|
46 |
+
{"current_steps": 460, "total_steps": 477, "loss": 0.5443, "accuracy": 0.71875, "learning_rate": 1.6685528315146802e-09, "epoch": 0.9630986652708715, "percentage": 96.44, "elapsed_time": "4:58:49", "remaining_time": "0:11:02"}
|
47 |
+
{"current_steps": 470, "total_steps": 477, "loss": 0.5439, "accuracy": 0.737500011920929, "learning_rate": 2.831652042480093e-10, "epoch": 0.98403559277676, "percentage": 98.53, "elapsed_time": "5:05:23", "remaining_time": "0:04:32"}
|
48 |
+
{"current_steps": 477, "total_steps": 477, "epoch": 0.998691442030882, "percentage": 100.0, "elapsed_time": "5:11:32", "remaining_time": "0:00:00"}
|
trainer_state.json
ADDED
@@ -0,0 +1,747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.998691442030882,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 477,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.02093692750588851,
|
13 |
+
"grad_norm": 85.02439880371094,
|
14 |
+
"learning_rate": 3.333333333333333e-07,
|
15 |
+
"logits/chosen": -3.096651315689087,
|
16 |
+
"logits/rejected": -3.0814244747161865,
|
17 |
+
"logps/chosen": -295.3846130371094,
|
18 |
+
"logps/rejected": -279.3940124511719,
|
19 |
+
"loss": 0.692,
|
20 |
+
"rewards/accuracies": 0.4281249940395355,
|
21 |
+
"rewards/chosen": 0.002148410538211465,
|
22 |
+
"rewards/margins": 0.004025185946375132,
|
23 |
+
"rewards/rejected": -0.0018767757574096322,
|
24 |
+
"step": 10
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"epoch": 0.04187385501177702,
|
28 |
+
"grad_norm": 74.03569030761719,
|
29 |
+
"learning_rate": 4.998555145953054e-07,
|
30 |
+
"logits/chosen": -3.083890199661255,
|
31 |
+
"logits/rejected": -3.068505048751831,
|
32 |
+
"logps/chosen": -278.1134338378906,
|
33 |
+
"logps/rejected": -266.706298828125,
|
34 |
+
"loss": 0.6728,
|
35 |
+
"rewards/accuracies": 0.628125011920929,
|
36 |
+
"rewards/chosen": 0.011356602422893047,
|
37 |
+
"rewards/margins": 0.07497048377990723,
|
38 |
+
"rewards/rejected": -0.0636138841509819,
|
39 |
+
"step": 20
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 0.06281078251766553,
|
43 |
+
"grad_norm": 67.47853088378906,
|
44 |
+
"learning_rate": 4.98700633214251e-07,
|
45 |
+
"logits/chosen": -3.0271506309509277,
|
46 |
+
"logits/rejected": -3.0370867252349854,
|
47 |
+
"logps/chosen": -246.0901336669922,
|
48 |
+
"logps/rejected": -250.2740478515625,
|
49 |
+
"loss": 0.6305,
|
50 |
+
"rewards/accuracies": 0.6781250238418579,
|
51 |
+
"rewards/chosen": 0.018177634105086327,
|
52 |
+
"rewards/margins": 0.28142982721328735,
|
53 |
+
"rewards/rejected": -0.2632521986961365,
|
54 |
+
"step": 30
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 0.08374771002355404,
|
58 |
+
"grad_norm": 75.60296630859375,
|
59 |
+
"learning_rate": 4.963962085412632e-07,
|
60 |
+
"logits/chosen": -3.030393123626709,
|
61 |
+
"logits/rejected": -3.009413242340088,
|
62 |
+
"logps/chosen": -298.85662841796875,
|
63 |
+
"logps/rejected": -275.070068359375,
|
64 |
+
"loss": 0.6267,
|
65 |
+
"rewards/accuracies": 0.612500011920929,
|
66 |
+
"rewards/chosen": -0.03324083238840103,
|
67 |
+
"rewards/margins": 0.2483668327331543,
|
68 |
+
"rewards/rejected": -0.28160765767097473,
|
69 |
+
"step": 40
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"epoch": 0.10468463752944256,
|
73 |
+
"grad_norm": 69.39188385009766,
|
74 |
+
"learning_rate": 4.929528920808854e-07,
|
75 |
+
"logits/chosen": -3.052746534347534,
|
76 |
+
"logits/rejected": -3.066401720046997,
|
77 |
+
"logps/chosen": -281.92706298828125,
|
78 |
+
"logps/rejected": -246.51901245117188,
|
79 |
+
"loss": 0.6084,
|
80 |
+
"rewards/accuracies": 0.6937500238418579,
|
81 |
+
"rewards/chosen": -0.020495222881436348,
|
82 |
+
"rewards/margins": 0.40510186553001404,
|
83 |
+
"rewards/rejected": -0.42559710144996643,
|
84 |
+
"step": 50
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"epoch": 0.12562156503533106,
|
88 |
+
"grad_norm": 83.05278015136719,
|
89 |
+
"learning_rate": 4.883865995197318e-07,
|
90 |
+
"logits/chosen": -3.035808563232422,
|
91 |
+
"logits/rejected": -3.0392653942108154,
|
92 |
+
"logps/chosen": -290.5362548828125,
|
93 |
+
"logps/rejected": -272.5738830566406,
|
94 |
+
"loss": 0.5792,
|
95 |
+
"rewards/accuracies": 0.7250000238418579,
|
96 |
+
"rewards/chosen": -0.049367621541023254,
|
97 |
+
"rewards/margins": 0.44638770818710327,
|
98 |
+
"rewards/rejected": -0.49575528502464294,
|
99 |
+
"step": 60
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 0.14655849254121958,
|
103 |
+
"grad_norm": 68.99510955810547,
|
104 |
+
"learning_rate": 4.82718437161051e-07,
|
105 |
+
"logits/chosen": -3.0192034244537354,
|
106 |
+
"logits/rejected": -3.006897449493408,
|
107 |
+
"logps/chosen": -265.6653747558594,
|
108 |
+
"logps/rejected": -260.2899169921875,
|
109 |
+
"loss": 0.5846,
|
110 |
+
"rewards/accuracies": 0.6812499761581421,
|
111 |
+
"rewards/chosen": -0.11491725593805313,
|
112 |
+
"rewards/margins": 0.38759148120880127,
|
113 |
+
"rewards/rejected": -0.5025087594985962,
|
114 |
+
"step": 70
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.16749542004710807,
|
118 |
+
"grad_norm": 63.006248474121094,
|
119 |
+
"learning_rate": 4.7597460436723613e-07,
|
120 |
+
"logits/chosen": -3.007894992828369,
|
121 |
+
"logits/rejected": -2.984534740447998,
|
122 |
+
"logps/chosen": -291.2572326660156,
|
123 |
+
"logps/rejected": -261.5260009765625,
|
124 |
+
"loss": 0.5843,
|
125 |
+
"rewards/accuracies": 0.703125,
|
126 |
+
"rewards/chosen": -0.1174750104546547,
|
127 |
+
"rewards/margins": 0.4169933795928955,
|
128 |
+
"rewards/rejected": -0.5344683527946472,
|
129 |
+
"step": 80
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"epoch": 0.1884323475529966,
|
133 |
+
"grad_norm": 69.54000854492188,
|
134 |
+
"learning_rate": 4.68186272461214e-07,
|
135 |
+
"logits/chosen": -3.0481808185577393,
|
136 |
+
"logits/rejected": -3.036348819732666,
|
137 |
+
"logps/chosen": -273.8735656738281,
|
138 |
+
"logps/rejected": -258.81866455078125,
|
139 |
+
"loss": 0.5849,
|
140 |
+
"rewards/accuracies": 0.684374988079071,
|
141 |
+
"rewards/chosen": -0.09798178821802139,
|
142 |
+
"rewards/margins": 0.40805816650390625,
|
143 |
+
"rewards/rejected": -0.5060399770736694,
|
144 |
+
"step": 90
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"epoch": 0.2093692750588851,
|
148 |
+
"grad_norm": 75.06998443603516,
|
149 |
+
"learning_rate": 4.593894406464536e-07,
|
150 |
+
"logits/chosen": -3.038364887237549,
|
151 |
+
"logits/rejected": -3.0354368686676025,
|
152 |
+
"logps/chosen": -296.1470031738281,
|
153 |
+
"logps/rejected": -286.38592529296875,
|
154 |
+
"loss": 0.5834,
|
155 |
+
"rewards/accuracies": 0.703125,
|
156 |
+
"rewards/chosen": -0.08966656774282455,
|
157 |
+
"rewards/margins": 0.5078560709953308,
|
158 |
+
"rewards/rejected": -0.5975226759910583,
|
159 |
+
"step": 100
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"epoch": 0.23030620256477363,
|
163 |
+
"grad_norm": 137.9207305908203,
|
164 |
+
"learning_rate": 4.496247696115597e-07,
|
165 |
+
"logits/chosen": -3.039151191711426,
|
166 |
+
"logits/rejected": -3.0391647815704346,
|
167 |
+
"logps/chosen": -303.8061828613281,
|
168 |
+
"logps/rejected": -295.7118225097656,
|
169 |
+
"loss": 0.5804,
|
170 |
+
"rewards/accuracies": 0.715624988079071,
|
171 |
+
"rewards/chosen": -0.07505225390195847,
|
172 |
+
"rewards/margins": 0.6039966344833374,
|
173 |
+
"rewards/rejected": -0.6790488958358765,
|
174 |
+
"step": 110
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"epoch": 0.2512431300706621,
|
178 |
+
"grad_norm": 77.84745788574219,
|
179 |
+
"learning_rate": 4.3893739358856455e-07,
|
180 |
+
"logits/chosen": -3.008737087249756,
|
181 |
+
"logits/rejected": -2.9903557300567627,
|
182 |
+
"logps/chosen": -305.4298095703125,
|
183 |
+
"logps/rejected": -278.39947509765625,
|
184 |
+
"loss": 0.5582,
|
185 |
+
"rewards/accuracies": 0.721875011920929,
|
186 |
+
"rewards/chosen": -0.14889295399188995,
|
187 |
+
"rewards/margins": 0.5994052886962891,
|
188 |
+
"rewards/rejected": -0.7482982277870178,
|
189 |
+
"step": 120
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"epoch": 0.2721800575765506,
|
193 |
+
"grad_norm": 67.5359115600586,
|
194 |
+
"learning_rate": 4.273767117336217e-07,
|
195 |
+
"logits/chosen": -3.0301320552825928,
|
196 |
+
"logits/rejected": -3.012173891067505,
|
197 |
+
"logps/chosen": -308.94891357421875,
|
198 |
+
"logps/rejected": -295.3975524902344,
|
199 |
+
"loss": 0.5478,
|
200 |
+
"rewards/accuracies": 0.737500011920929,
|
201 |
+
"rewards/chosen": -0.14121344685554504,
|
202 |
+
"rewards/margins": 0.6831844449043274,
|
203 |
+
"rewards/rejected": -0.82439786195755,
|
204 |
+
"step": 130
|
205 |
+
},
|
206 |
+
{
|
207 |
+
"epoch": 0.29311698508243916,
|
208 |
+
"grad_norm": 70.47966766357422,
|
209 |
+
"learning_rate": 4.1499615979437983e-07,
|
210 |
+
"logits/chosen": -2.9864563941955566,
|
211 |
+
"logits/rejected": -2.9899039268493652,
|
212 |
+
"logps/chosen": -279.08477783203125,
|
213 |
+
"logps/rejected": -257.7115173339844,
|
214 |
+
"loss": 0.5548,
|
215 |
+
"rewards/accuracies": 0.71875,
|
216 |
+
"rewards/chosen": -0.09717626124620438,
|
217 |
+
"rewards/margins": 0.624592661857605,
|
218 |
+
"rewards/rejected": -0.7217689752578735,
|
219 |
+
"step": 140
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.31405391258832765,
|
223 |
+
"grad_norm": 90.4140396118164,
|
224 |
+
"learning_rate": 4.018529631194369e-07,
|
225 |
+
"logits/chosen": -2.9848761558532715,
|
226 |
+
"logits/rejected": -2.9709620475769043,
|
227 |
+
"logps/chosen": -281.3067932128906,
|
228 |
+
"logps/rejected": -271.0277099609375,
|
229 |
+
"loss": 0.5703,
|
230 |
+
"rewards/accuracies": 0.721875011920929,
|
231 |
+
"rewards/chosen": -0.2502523362636566,
|
232 |
+
"rewards/margins": 0.6211402416229248,
|
233 |
+
"rewards/rejected": -0.871392548084259,
|
234 |
+
"step": 150
|
235 |
+
},
|
236 |
+
{
|
237 |
+
"epoch": 0.33499084009421615,
|
238 |
+
"grad_norm": 68.7781753540039,
|
239 |
+
"learning_rate": 3.8800787215151164e-07,
|
240 |
+
"logits/chosen": -3.032036066055298,
|
241 |
+
"logits/rejected": -3.009941339492798,
|
242 |
+
"logps/chosen": -321.748779296875,
|
243 |
+
"logps/rejected": -281.04107666015625,
|
244 |
+
"loss": 0.5392,
|
245 |
+
"rewards/accuracies": 0.7437499761581421,
|
246 |
+
"rewards/chosen": -0.12777641415596008,
|
247 |
+
"rewards/margins": 0.6283574104309082,
|
248 |
+
"rewards/rejected": -0.7561337947845459,
|
249 |
+
"step": 160
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"epoch": 0.3559277676001047,
|
253 |
+
"grad_norm": 66.1634292602539,
|
254 |
+
"learning_rate": 3.7352488162693715e-07,
|
255 |
+
"logits/chosen": -3.0462286472320557,
|
256 |
+
"logits/rejected": -3.030794620513916,
|
257 |
+
"logps/chosen": -274.5036926269531,
|
258 |
+
"logps/rejected": -251.90499877929688,
|
259 |
+
"loss": 0.5505,
|
260 |
+
"rewards/accuracies": 0.6968749761581421,
|
261 |
+
"rewards/chosen": -0.12955203652381897,
|
262 |
+
"rewards/margins": 0.6119082570075989,
|
263 |
+
"rewards/rejected": -0.7414603233337402,
|
264 |
+
"step": 170
|
265 |
+
},
|
266 |
+
{
|
267 |
+
"epoch": 0.3768646951059932,
|
268 |
+
"grad_norm": 75.37867736816406,
|
269 |
+
"learning_rate": 3.584709347793895e-07,
|
270 |
+
"logits/chosen": -3.058922052383423,
|
271 |
+
"logits/rejected": -3.0691912174224854,
|
272 |
+
"logps/chosen": -301.69635009765625,
|
273 |
+
"logps/rejected": -248.55593872070312,
|
274 |
+
"loss": 0.5508,
|
275 |
+
"rewards/accuracies": 0.675000011920929,
|
276 |
+
"rewards/chosen": -0.2145983725786209,
|
277 |
+
"rewards/margins": 0.5311049222946167,
|
278 |
+
"rewards/rejected": -0.7457033395767212,
|
279 |
+
"step": 180
|
280 |
+
},
|
281 |
+
{
|
282 |
+
"epoch": 0.39780162261188173,
|
283 |
+
"grad_norm": 75.07308959960938,
|
284 |
+
"learning_rate": 3.4291561391508185e-07,
|
285 |
+
"logits/chosen": -3.0233283042907715,
|
286 |
+
"logits/rejected": -3.0086400508880615,
|
287 |
+
"logps/chosen": -278.5184326171875,
|
288 |
+
"logps/rejected": -270.7456970214844,
|
289 |
+
"loss": 0.5632,
|
290 |
+
"rewards/accuracies": 0.699999988079071,
|
291 |
+
"rewards/chosen": -0.28689366579055786,
|
292 |
+
"rewards/margins": 0.6087759733200073,
|
293 |
+
"rewards/rejected": -0.8956696391105652,
|
294 |
+
"step": 190
|
295 |
+
},
|
296 |
+
{
|
297 |
+
"epoch": 0.4187385501177702,
|
298 |
+
"grad_norm": 71.18640899658203,
|
299 |
+
"learning_rate": 3.2693081878964544e-07,
|
300 |
+
"logits/chosen": -3.0013060569763184,
|
301 |
+
"logits/rejected": -3.005615472793579,
|
302 |
+
"logps/chosen": -292.04852294921875,
|
303 |
+
"logps/rejected": -276.50811767578125,
|
304 |
+
"loss": 0.5475,
|
305 |
+
"rewards/accuracies": 0.75,
|
306 |
+
"rewards/chosen": -0.20541390776634216,
|
307 |
+
"rewards/margins": 0.6916528940200806,
|
308 |
+
"rewards/rejected": -0.8970667719841003,
|
309 |
+
"step": 200
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"epoch": 0.4396754776236587,
|
313 |
+
"grad_norm": 85.28279113769531,
|
314 |
+
"learning_rate": 3.1059043427330314e-07,
|
315 |
+
"logits/chosen": -2.9617443084716797,
|
316 |
+
"logits/rejected": -2.9682388305664062,
|
317 |
+
"logps/chosen": -261.1861572265625,
|
318 |
+
"logps/rejected": -263.7696838378906,
|
319 |
+
"loss": 0.533,
|
320 |
+
"rewards/accuracies": 0.778124988079071,
|
321 |
+
"rewards/chosen": -0.204990416765213,
|
322 |
+
"rewards/margins": 0.7386445999145508,
|
323 |
+
"rewards/rejected": -0.9436351656913757,
|
324 |
+
"step": 210
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.46061240512954726,
|
328 |
+
"grad_norm": 70.95091247558594,
|
329 |
+
"learning_rate": 2.9396998884045234e-07,
|
330 |
+
"logits/chosen": -3.0342681407928467,
|
331 |
+
"logits/rejected": -3.040320873260498,
|
332 |
+
"logps/chosen": -300.98077392578125,
|
333 |
+
"logps/rejected": -272.7954406738281,
|
334 |
+
"loss": 0.5389,
|
335 |
+
"rewards/accuracies": 0.734375,
|
336 |
+
"rewards/chosen": -0.25737327337265015,
|
337 |
+
"rewards/margins": 0.695563018321991,
|
338 |
+
"rewards/rejected": -0.9529362916946411,
|
339 |
+
"step": 220
|
340 |
+
},
|
341 |
+
{
|
342 |
+
"epoch": 0.48154933263543576,
|
343 |
+
"grad_norm": 64.26698303222656,
|
344 |
+
"learning_rate": 2.7714630546218634e-07,
|
345 |
+
"logits/chosen": -3.1135382652282715,
|
346 |
+
"logits/rejected": -3.1126351356506348,
|
347 |
+
"logps/chosen": -326.8101806640625,
|
348 |
+
"logps/rejected": -296.044921875,
|
349 |
+
"loss": 0.5438,
|
350 |
+
"rewards/accuracies": 0.7093750238418579,
|
351 |
+
"rewards/chosen": -0.2218112051486969,
|
352 |
+
"rewards/margins": 0.7040417790412903,
|
353 |
+
"rewards/rejected": -0.9258529543876648,
|
354 |
+
"step": 230
|
355 |
+
},
|
356 |
+
{
|
357 |
+
"epoch": 0.5024862601413242,
|
358 |
+
"grad_norm": 85.34664154052734,
|
359 |
+
"learning_rate": 2.6019714651539645e-07,
|
360 |
+
"logits/chosen": -3.0325405597686768,
|
361 |
+
"logits/rejected": -3.017796516418457,
|
362 |
+
"logps/chosen": -297.9241638183594,
|
363 |
+
"logps/rejected": -286.4637756347656,
|
364 |
+
"loss": 0.5647,
|
365 |
+
"rewards/accuracies": 0.734375,
|
366 |
+
"rewards/chosen": -0.274208128452301,
|
367 |
+
"rewards/margins": 0.7521761655807495,
|
368 |
+
"rewards/rejected": -1.0263843536376953,
|
369 |
+
"step": 240
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.5234231876472127,
|
373 |
+
"grad_norm": 70.70326232910156,
|
374 |
+
"learning_rate": 2.4320085434975556e-07,
|
375 |
+
"logits/chosen": -3.0199804306030273,
|
376 |
+
"logits/rejected": -3.01350736618042,
|
377 |
+
"logps/chosen": -284.5586853027344,
|
378 |
+
"logps/rejected": -259.7466125488281,
|
379 |
+
"loss": 0.56,
|
380 |
+
"rewards/accuracies": 0.7093750238418579,
|
381 |
+
"rewards/chosen": -0.2730976641178131,
|
382 |
+
"rewards/margins": 0.7632043957710266,
|
383 |
+
"rewards/rejected": -1.036302089691162,
|
384 |
+
"step": 250
|
385 |
+
},
|
386 |
+
{
|
387 |
+
"epoch": 0.5443601151531012,
|
388 |
+
"grad_norm": 63.27799606323242,
|
389 |
+
"learning_rate": 2.2623598917395436e-07,
|
390 |
+
"logits/chosen": -2.9862048625946045,
|
391 |
+
"logits/rejected": -3.020139217376709,
|
392 |
+
"logps/chosen": -296.0469665527344,
|
393 |
+
"logps/rejected": -276.1849365234375,
|
394 |
+
"loss": 0.5463,
|
395 |
+
"rewards/accuracies": 0.7250000238418579,
|
396 |
+
"rewards/chosen": -0.23287267982959747,
|
397 |
+
"rewards/margins": 0.7090679407119751,
|
398 |
+
"rewards/rejected": -0.9419406652450562,
|
399 |
+
"step": 260
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"epoch": 0.5652970426589898,
|
403 |
+
"grad_norm": 66.7594223022461,
|
404 |
+
"learning_rate": 2.0938096593494853e-07,
|
405 |
+
"logits/chosen": -3.041605234146118,
|
406 |
+
"logits/rejected": -3.052452325820923,
|
407 |
+
"logps/chosen": -286.18707275390625,
|
408 |
+
"logps/rejected": -260.3746032714844,
|
409 |
+
"loss": 0.5256,
|
410 |
+
"rewards/accuracies": 0.731249988079071,
|
411 |
+
"rewards/chosen": -0.11122454702854156,
|
412 |
+
"rewards/margins": 0.802563488483429,
|
413 |
+
"rewards/rejected": -0.9137881398200989,
|
414 |
+
"step": 270
|
415 |
+
},
|
416 |
+
{
|
417 |
+
"epoch": 0.5862339701648783,
|
418 |
+
"grad_norm": 88.30416107177734,
|
419 |
+
"learning_rate": 1.9271369186863618e-07,
|
420 |
+
"logits/chosen": -3.0525062084198,
|
421 |
+
"logits/rejected": -3.0589468479156494,
|
422 |
+
"logps/chosen": -284.6452941894531,
|
423 |
+
"logps/rejected": -277.75067138671875,
|
424 |
+
"loss": 0.5551,
|
425 |
+
"rewards/accuracies": 0.675000011920929,
|
426 |
+
"rewards/chosen": -0.22388038039207458,
|
427 |
+
"rewards/margins": 0.6198626756668091,
|
428 |
+
"rewards/rejected": -0.8437430262565613,
|
429 |
+
"step": 280
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.6071708976707668,
|
433 |
+
"grad_norm": 65.08110809326172,
|
434 |
+
"learning_rate": 1.763112063972739e-07,
|
435 |
+
"logits/chosen": -3.044279098510742,
|
436 |
+
"logits/rejected": -3.0555179119110107,
|
437 |
+
"logps/chosen": -285.0969543457031,
|
438 |
+
"logps/rejected": -259.02142333984375,
|
439 |
+
"loss": 0.5278,
|
440 |
+
"rewards/accuracies": 0.7718750238418579,
|
441 |
+
"rewards/chosen": -0.16408179700374603,
|
442 |
+
"rewards/margins": 0.8104633092880249,
|
443 |
+
"rewards/rejected": -0.9745450019836426,
|
444 |
+
"step": 290
|
445 |
+
},
|
446 |
+
{
|
447 |
+
"epoch": 0.6281078251766553,
|
448 |
+
"grad_norm": 87.96784210205078,
|
449 |
+
"learning_rate": 1.602493250381003e-07,
|
450 |
+
"logits/chosen": -3.0667061805725098,
|
451 |
+
"logits/rejected": -3.064436435699463,
|
452 |
+
"logps/chosen": -287.88372802734375,
|
453 |
+
"logps/rejected": -248.08615112304688,
|
454 |
+
"loss": 0.564,
|
455 |
+
"rewards/accuracies": 0.699999988079071,
|
456 |
+
"rewards/chosen": -0.27062320709228516,
|
457 |
+
"rewards/margins": 0.6274420022964478,
|
458 |
+
"rewards/rejected": -0.8980652093887329,
|
459 |
+
"step": 300
|
460 |
+
},
|
461 |
+
{
|
462 |
+
"epoch": 0.6490447526825438,
|
463 |
+
"grad_norm": 67.1192398071289,
|
464 |
+
"learning_rate": 1.446022889690875e-07,
|
465 |
+
"logits/chosen": -3.0603392124176025,
|
466 |
+
"logits/rejected": -3.0506479740142822,
|
467 |
+
"logps/chosen": -275.33941650390625,
|
468 |
+
"logps/rejected": -292.2793884277344,
|
469 |
+
"loss": 0.5304,
|
470 |
+
"rewards/accuracies": 0.734375,
|
471 |
+
"rewards/chosen": -0.27399036288261414,
|
472 |
+
"rewards/margins": 0.7174574136734009,
|
473 |
+
"rewards/rejected": -0.9914478063583374,
|
474 |
+
"step": 310
|
475 |
+
},
|
476 |
+
{
|
477 |
+
"epoch": 0.6699816801884323,
|
478 |
+
"grad_norm": 68.73091125488281,
|
479 |
+
"learning_rate": 1.2944242187160015e-07,
|
480 |
+
"logits/chosen": -3.0304224491119385,
|
481 |
+
"logits/rejected": -3.0630006790161133,
|
482 |
+
"logps/chosen": -265.5944519042969,
|
483 |
+
"logps/rejected": -270.86041259765625,
|
484 |
+
"loss": 0.5819,
|
485 |
+
"rewards/accuracies": 0.7406250238418579,
|
486 |
+
"rewards/chosen": -0.20603282749652863,
|
487 |
+
"rewards/margins": 0.8553716540336609,
|
488 |
+
"rewards/rejected": -1.0614043474197388,
|
489 |
+
"step": 320
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"epoch": 0.6909186076943209,
|
493 |
+
"grad_norm": 78.73789978027344,
|
494 |
+
"learning_rate": 1.1483979563610069e-07,
|
495 |
+
"logits/chosen": -3.044661045074463,
|
496 |
+
"logits/rejected": -3.035492181777954,
|
497 |
+
"logps/chosen": -274.28204345703125,
|
498 |
+
"logps/rejected": -274.99151611328125,
|
499 |
+
"loss": 0.5374,
|
500 |
+
"rewards/accuracies": 0.768750011920929,
|
501 |
+
"rewards/chosen": -0.1646738052368164,
|
502 |
+
"rewards/margins": 0.8839667439460754,
|
503 |
+
"rewards/rejected": -1.048640489578247,
|
504 |
+
"step": 330
|
505 |
+
},
|
506 |
+
{
|
507 |
+
"epoch": 0.7118555352002094,
|
508 |
+
"grad_norm": 70.24629211425781,
|
509 |
+
"learning_rate": 1.0086190647607529e-07,
|
510 |
+
"logits/chosen": -3.0631115436553955,
|
511 |
+
"logits/rejected": -3.089351177215576,
|
512 |
+
"logps/chosen": -287.9900817871094,
|
513 |
+
"logps/rejected": -272.482421875,
|
514 |
+
"loss": 0.5607,
|
515 |
+
"rewards/accuracies": 0.753125011920929,
|
516 |
+
"rewards/chosen": -0.11857350915670395,
|
517 |
+
"rewards/margins": 0.8544532060623169,
|
518 |
+
"rewards/rejected": -0.9730268716812134,
|
519 |
+
"step": 340
|
520 |
+
},
|
521 |
+
{
|
522 |
+
"epoch": 0.7327924627060979,
|
523 |
+
"grad_norm": 96.91629791259766,
|
524 |
+
"learning_rate": 8.757336294724687e-08,
|
525 |
+
"logits/chosen": -3.068084239959717,
|
526 |
+
"logits/rejected": -3.0875658988952637,
|
527 |
+
"logps/chosen": -291.7541198730469,
|
528 |
+
"logps/rejected": -258.79132080078125,
|
529 |
+
"loss": 0.5348,
|
530 |
+
"rewards/accuracies": 0.703125,
|
531 |
+
"rewards/chosen": -0.15175102651119232,
|
532 |
+
"rewards/margins": 0.8772052526473999,
|
533 |
+
"rewards/rejected": -1.028956413269043,
|
534 |
+
"step": 350
|
535 |
+
},
|
536 |
+
{
|
537 |
+
"epoch": 0.7537293902119864,
|
538 |
+
"grad_norm": 69.54812622070312,
|
539 |
+
"learning_rate": 7.503558731410958e-08,
|
540 |
+
"logits/chosen": -3.07660174369812,
|
541 |
+
"logits/rejected": -3.0733513832092285,
|
542 |
+
"logps/chosen": -252.8855438232422,
|
543 |
+
"logps/rejected": -264.5438232421875,
|
544 |
+
"loss": 0.5477,
|
545 |
+
"rewards/accuracies": 0.71875,
|
546 |
+
"rewards/chosen": -0.31728893518447876,
|
547 |
+
"rewards/margins": 0.6826174259185791,
|
548 |
+
"rewards/rejected": -0.9999063611030579,
|
549 |
+
"step": 360
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.7746663177178749,
|
553 |
+
"grad_norm": 68.41463470458984,
|
554 |
+
"learning_rate": 6.330653164412908e-08,
|
555 |
+
"logits/chosen": -3.0837528705596924,
|
556 |
+
"logits/rejected": -3.074859619140625,
|
557 |
+
"logps/chosen": -292.6845703125,
|
558 |
+
"logps/rejected": -274.19189453125,
|
559 |
+
"loss": 0.5639,
|
560 |
+
"rewards/accuracies": 0.6812499761581421,
|
561 |
+
"rewards/chosen": -0.18258486688137054,
|
562 |
+
"rewards/margins": 0.7360013723373413,
|
563 |
+
"rewards/rejected": -0.9185863733291626,
|
564 |
+
"step": 370
|
565 |
+
},
|
566 |
+
{
|
567 |
+
"epoch": 0.7956032452237635,
|
568 |
+
"grad_norm": 73.8513412475586,
|
569 |
+
"learning_rate": 5.2440409941877456e-08,
|
570 |
+
"logits/chosen": -3.080451250076294,
|
571 |
+
"logits/rejected": -3.1014645099639893,
|
572 |
+
"logps/chosen": -282.2720642089844,
|
573 |
+
"logps/rejected": -274.5783996582031,
|
574 |
+
"loss": 0.5627,
|
575 |
+
"rewards/accuracies": 0.721875011920929,
|
576 |
+
"rewards/chosen": -0.17349520325660706,
|
577 |
+
"rewards/margins": 0.7617751359939575,
|
578 |
+
"rewards/rejected": -0.9352704286575317,
|
579 |
+
"step": 380
|
580 |
+
},
|
581 |
+
{
|
582 |
+
"epoch": 0.816540172729652,
|
583 |
+
"grad_norm": 62.425689697265625,
|
584 |
+
"learning_rate": 4.248744756122985e-08,
|
585 |
+
"logits/chosen": -3.1146225929260254,
|
586 |
+
"logits/rejected": -3.1159985065460205,
|
587 |
+
"logps/chosen": -284.4311828613281,
|
588 |
+
"logps/rejected": -270.375244140625,
|
589 |
+
"loss": 0.5397,
|
590 |
+
"rewards/accuracies": 0.7124999761581421,
|
591 |
+
"rewards/chosen": -0.1737302988767624,
|
592 |
+
"rewards/margins": 0.7495090365409851,
|
593 |
+
"rewards/rejected": -0.9232394099235535,
|
594 |
+
"step": 390
|
595 |
+
},
|
596 |
+
{
|
597 |
+
"epoch": 0.8374771002355405,
|
598 |
+
"grad_norm": 67.75579833984375,
|
599 |
+
"learning_rate": 3.349364905389032e-08,
|
600 |
+
"logits/chosen": -3.039133071899414,
|
601 |
+
"logits/rejected": -3.0417704582214355,
|
602 |
+
"logps/chosen": -289.43792724609375,
|
603 |
+
"logps/rejected": -279.08123779296875,
|
604 |
+
"loss": 0.5557,
|
605 |
+
"rewards/accuracies": 0.7281249761581421,
|
606 |
+
"rewards/chosen": -0.19276252388954163,
|
607 |
+
"rewards/margins": 0.713485062122345,
|
608 |
+
"rewards/rejected": -0.906247615814209,
|
609 |
+
"step": 400
|
610 |
+
},
|
611 |
+
{
|
612 |
+
"epoch": 0.8584140277414289,
|
613 |
+
"grad_norm": 60.96617126464844,
|
614 |
+
"learning_rate": 2.550058552729639e-08,
|
615 |
+
"logits/chosen": -3.0589489936828613,
|
616 |
+
"logits/rejected": -3.0491528511047363,
|
617 |
+
"logps/chosen": -298.5786437988281,
|
618 |
+
"logps/rejected": -275.2989807128906,
|
619 |
+
"loss": 0.5378,
|
620 |
+
"rewards/accuracies": 0.737500011920929,
|
621 |
+
"rewards/chosen": -0.22146447002887726,
|
622 |
+
"rewards/margins": 0.7704640626907349,
|
623 |
+
"rewards/rejected": -0.9919285774230957,
|
624 |
+
"step": 410
|
625 |
+
},
|
626 |
+
{
|
627 |
+
"epoch": 0.8793509552473174,
|
628 |
+
"grad_norm": 57.156639099121094,
|
629 |
+
"learning_rate": 1.854520249477551e-08,
|
630 |
+
"logits/chosen": -3.0775399208068848,
|
631 |
+
"logits/rejected": -3.0917420387268066,
|
632 |
+
"logps/chosen": -281.49053955078125,
|
633 |
+
"logps/rejected": -252.451416015625,
|
634 |
+
"loss": 0.5338,
|
635 |
+
"rewards/accuracies": 0.7437499761581421,
|
636 |
+
"rewards/chosen": -0.14828899502754211,
|
637 |
+
"rewards/margins": 0.7465869188308716,
|
638 |
+
"rewards/rejected": -0.8948760032653809,
|
639 |
+
"step": 420
|
640 |
+
},
|
641 |
+
{
|
642 |
+
"epoch": 0.9002878827532059,
|
643 |
+
"grad_norm": 80.24808502197266,
|
644 |
+
"learning_rate": 1.265964910610884e-08,
|
645 |
+
"logits/chosen": -3.1026782989501953,
|
646 |
+
"logits/rejected": -3.111166477203369,
|
647 |
+
"logps/chosen": -285.04193115234375,
|
648 |
+
"logps/rejected": -284.14410400390625,
|
649 |
+
"loss": 0.5455,
|
650 |
+
"rewards/accuracies": 0.737500011920929,
|
651 |
+
"rewards/chosen": -0.1942686289548874,
|
652 |
+
"rewards/margins": 0.8707529306411743,
|
653 |
+
"rewards/rejected": -1.0650215148925781,
|
654 |
+
"step": 430
|
655 |
+
},
|
656 |
+
{
|
657 |
+
"epoch": 0.9212248102590945,
|
658 |
+
"grad_norm": 61.17852020263672,
|
659 |
+
"learning_rate": 7.871129547831062e-09,
|
660 |
+
"logits/chosen": -3.0820913314819336,
|
661 |
+
"logits/rejected": -3.0653717517852783,
|
662 |
+
"logps/chosen": -278.7796325683594,
|
663 |
+
"logps/rejected": -235.0684814453125,
|
664 |
+
"loss": 0.5408,
|
665 |
+
"rewards/accuracies": 0.7093750238418579,
|
666 |
+
"rewards/chosen": -0.23389343917369843,
|
667 |
+
"rewards/margins": 0.6883670091629028,
|
668 |
+
"rewards/rejected": -0.9222604632377625,
|
669 |
+
"step": 440
|
670 |
+
},
|
671 |
+
{
|
672 |
+
"epoch": 0.942161737764983,
|
673 |
+
"grad_norm": 85.3263168334961,
|
674 |
+
"learning_rate": 4.201777300124249e-09,
|
675 |
+
"logits/chosen": -3.0574049949645996,
|
676 |
+
"logits/rejected": -3.0575528144836426,
|
677 |
+
"logps/chosen": -273.01531982421875,
|
678 |
+
"logps/rejected": -243.1544189453125,
|
679 |
+
"loss": 0.5495,
|
680 |
+
"rewards/accuracies": 0.746874988079071,
|
681 |
+
"rewards/chosen": -0.13399073481559753,
|
682 |
+
"rewards/margins": 0.6954258680343628,
|
683 |
+
"rewards/rejected": -0.8294164538383484,
|
684 |
+
"step": 450
|
685 |
+
},
|
686 |
+
{
|
687 |
+
"epoch": 0.9630986652708715,
|
688 |
+
"grad_norm": 67.3755874633789,
|
689 |
+
"learning_rate": 1.6685528315146802e-09,
|
690 |
+
"logits/chosen": -3.0953588485717773,
|
691 |
+
"logits/rejected": -3.0970802307128906,
|
692 |
+
"logps/chosen": -282.9346618652344,
|
693 |
+
"logps/rejected": -261.16497802734375,
|
694 |
+
"loss": 0.5443,
|
695 |
+
"rewards/accuracies": 0.71875,
|
696 |
+
"rewards/chosen": -0.24288193881511688,
|
697 |
+
"rewards/margins": 0.7198012471199036,
|
698 |
+
"rewards/rejected": -0.9626832008361816,
|
699 |
+
"step": 460
|
700 |
+
},
|
701 |
+
{
|
702 |
+
"epoch": 0.98403559277676,
|
703 |
+
"grad_norm": 61.79122543334961,
|
704 |
+
"learning_rate": 2.831652042480093e-10,
|
705 |
+
"logits/chosen": -3.086475372314453,
|
706 |
+
"logits/rejected": -3.0854830741882324,
|
707 |
+
"logps/chosen": -301.7154235839844,
|
708 |
+
"logps/rejected": -291.1816101074219,
|
709 |
+
"loss": 0.5439,
|
710 |
+
"rewards/accuracies": 0.737500011920929,
|
711 |
+
"rewards/chosen": -0.232115238904953,
|
712 |
+
"rewards/margins": 0.7247028350830078,
|
713 |
+
"rewards/rejected": -0.9568179845809937,
|
714 |
+
"step": 470
|
715 |
+
},
|
716 |
+
{
|
717 |
+
"epoch": 0.998691442030882,
|
718 |
+
"step": 477,
|
719 |
+
"total_flos": 5.005717235969294e+18,
|
720 |
+
"train_loss": 0.5631812908364542,
|
721 |
+
"train_runtime": 18694.5367,
|
722 |
+
"train_samples_per_second": 3.27,
|
723 |
+
"train_steps_per_second": 0.026
|
724 |
+
}
|
725 |
+
],
|
726 |
+
"logging_steps": 10,
|
727 |
+
"max_steps": 477,
|
728 |
+
"num_input_tokens_seen": 0,
|
729 |
+
"num_train_epochs": 1,
|
730 |
+
"save_steps": 256,
|
731 |
+
"stateful_callbacks": {
|
732 |
+
"TrainerControl": {
|
733 |
+
"args": {
|
734 |
+
"should_epoch_stop": false,
|
735 |
+
"should_evaluate": false,
|
736 |
+
"should_log": false,
|
737 |
+
"should_save": true,
|
738 |
+
"should_training_stop": true
|
739 |
+
},
|
740 |
+
"attributes": {}
|
741 |
+
}
|
742 |
+
},
|
743 |
+
"total_flos": 5.005717235969294e+18,
|
744 |
+
"train_batch_size": 4,
|
745 |
+
"trial_name": null,
|
746 |
+
"trial_params": null
|
747 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8de8cb747fa534b66c18b4a78fd78d28754a3eac6ab3db99190fff463cb838a
|
3 |
+
size 6648
|
training_loss.png
ADDED
![]() |
training_rewards_accuracies.png
ADDED
![]() |