Model save
Browse files
README.md
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
license: llama2
|
4 |
+
base_model: meta-llama/Llama-2-13b-hf
|
5 |
+
tags:
|
6 |
+
- trl
|
7 |
+
- sft
|
8 |
+
- generated_from_trainer
|
9 |
+
datasets:
|
10 |
+
- generator
|
11 |
+
model-index:
|
12 |
+
- name: llama2-13b-lora-alpaca-11-v1
|
13 |
+
results: []
|
14 |
+
---
|
15 |
+
|
16 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
17 |
+
should probably proofread and complete it, then remove this comment. -->
|
18 |
+
|
19 |
+
# llama2-13b-lora-alpaca-11-v1
|
20 |
+
|
21 |
+
This model is a fine-tuned version of [meta-llama/Llama-2-13b-hf](https://huggingface.co/meta-llama/Llama-2-13b-hf) on the generator dataset.
|
22 |
+
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 1.4780
|
24 |
+
|
25 |
+
## Model description
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Intended uses & limitations
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training and evaluation data
|
34 |
+
|
35 |
+
More information needed
|
36 |
+
|
37 |
+
## Training procedure
|
38 |
+
|
39 |
+
### Training hyperparameters
|
40 |
+
|
41 |
+
The following hyperparameters were used during training:
|
42 |
+
- learning_rate: 0.0002
|
43 |
+
- train_batch_size: 16
|
44 |
+
- eval_batch_size: 16
|
45 |
+
- seed: 42
|
46 |
+
- distributed_type: multi-GPU
|
47 |
+
- num_devices: 8
|
48 |
+
- gradient_accumulation_steps: 2
|
49 |
+
- total_train_batch_size: 256
|
50 |
+
- total_eval_batch_size: 128
|
51 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
52 |
+
- lr_scheduler_type: cosine
|
53 |
+
- lr_scheduler_warmup_ratio: 0.1
|
54 |
+
- num_epochs: 1
|
55 |
+
|
56 |
+
### Training results
|
57 |
+
|
58 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
+
|:-------------:|:------:|:----:|:---------------:|
|
60 |
+
| 1.467 | 0.9938 | 80 | 1.4780 |
|
61 |
+
|
62 |
+
|
63 |
+
### Framework versions
|
64 |
+
|
65 |
+
- PEFT 0.13.2
|
66 |
+
- Transformers 4.46.2
|
67 |
+
- Pytorch 2.5.1+cu124
|
68 |
+
- Datasets 3.1.0
|
69 |
+
- Tokenizers 0.20.3
|
all_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.9937888198757764,
|
3 |
+
"total_flos": 8.089924109389005e+17,
|
4 |
+
"train_loss": 2.0078666627407076,
|
5 |
+
"train_runtime": 559.3432,
|
6 |
+
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 36.804,
|
8 |
+
"train_steps_per_second": 0.143
|
9 |
+
}
|
runs/Nov22_14-21-04_metallic-vm-falls-fin-02/events.out.tfevents.1732285433.metallic-vm-falls-fin-02.44068.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a21ca21684994976127bc97d97232074b15467df1c1f5d98ca59cd6be07fc726
|
3 |
+
size 9717
|
train_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 0.9937888198757764,
|
3 |
+
"total_flos": 8.089924109389005e+17,
|
4 |
+
"train_loss": 2.0078666627407076,
|
5 |
+
"train_runtime": 559.3432,
|
6 |
+
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 36.804,
|
8 |
+
"train_steps_per_second": 0.143
|
9 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.9937888198757764,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 80,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.012422360248447204,
|
13 |
+
"grad_norm": 1.3089314699172974,
|
14 |
+
"learning_rate": 2.5e-05,
|
15 |
+
"loss": 3.7777,
|
16 |
+
"step": 1
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.062111801242236024,
|
20 |
+
"grad_norm": 2.2394518852233887,
|
21 |
+
"learning_rate": 0.000125,
|
22 |
+
"loss": 3.9074,
|
23 |
+
"step": 5
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.12422360248447205,
|
27 |
+
"grad_norm": 2.3361098766326904,
|
28 |
+
"learning_rate": 0.00019961946980917456,
|
29 |
+
"loss": 3.5292,
|
30 |
+
"step": 10
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.18633540372670807,
|
34 |
+
"grad_norm": 2.7219650745391846,
|
35 |
+
"learning_rate": 0.0001953716950748227,
|
36 |
+
"loss": 2.9255,
|
37 |
+
"step": 15
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 0.2484472049689441,
|
41 |
+
"grad_norm": 2.1357812881469727,
|
42 |
+
"learning_rate": 0.00018660254037844388,
|
43 |
+
"loss": 2.5287,
|
44 |
+
"step": 20
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.3105590062111801,
|
48 |
+
"grad_norm": 1.425875186920166,
|
49 |
+
"learning_rate": 0.0001737277336810124,
|
50 |
+
"loss": 2.121,
|
51 |
+
"step": 25
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 0.37267080745341613,
|
55 |
+
"grad_norm": 0.8381262421607971,
|
56 |
+
"learning_rate": 0.0001573576436351046,
|
57 |
+
"loss": 1.882,
|
58 |
+
"step": 30
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 0.43478260869565216,
|
62 |
+
"grad_norm": 0.4776783585548401,
|
63 |
+
"learning_rate": 0.000138268343236509,
|
64 |
+
"loss": 1.6893,
|
65 |
+
"step": 35
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 0.4968944099378882,
|
69 |
+
"grad_norm": 0.5259610414505005,
|
70 |
+
"learning_rate": 0.00011736481776669306,
|
71 |
+
"loss": 1.6159,
|
72 |
+
"step": 40
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 0.5590062111801242,
|
76 |
+
"grad_norm": 0.4893004596233368,
|
77 |
+
"learning_rate": 9.563806126346642e-05,
|
78 |
+
"loss": 1.5552,
|
79 |
+
"step": 45
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 0.6211180124223602,
|
83 |
+
"grad_norm": 0.3604605495929718,
|
84 |
+
"learning_rate": 7.411809548974792e-05,
|
85 |
+
"loss": 1.5203,
|
86 |
+
"step": 50
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.6832298136645962,
|
90 |
+
"grad_norm": 0.40665724873542786,
|
91 |
+
"learning_rate": 5.382513867649663e-05,
|
92 |
+
"loss": 1.5131,
|
93 |
+
"step": 55
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 0.7453416149068323,
|
97 |
+
"grad_norm": 0.3896912634372711,
|
98 |
+
"learning_rate": 3.5721239031346066e-05,
|
99 |
+
"loss": 1.4889,
|
100 |
+
"step": 60
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 0.8074534161490683,
|
104 |
+
"grad_norm": 0.3924713730812073,
|
105 |
+
"learning_rate": 2.0664665970876496e-05,
|
106 |
+
"loss": 1.4691,
|
107 |
+
"step": 65
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 0.8695652173913043,
|
111 |
+
"grad_norm": 0.3828902840614319,
|
112 |
+
"learning_rate": 9.369221296335006e-06,
|
113 |
+
"loss": 1.4818,
|
114 |
+
"step": 70
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 0.9316770186335404,
|
118 |
+
"grad_norm": 0.2997172176837921,
|
119 |
+
"learning_rate": 2.3703992880066638e-06,
|
120 |
+
"loss": 1.4576,
|
121 |
+
"step": 75
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 0.9937888198757764,
|
125 |
+
"grad_norm": 0.4181534945964813,
|
126 |
+
"learning_rate": 0.0,
|
127 |
+
"loss": 1.467,
|
128 |
+
"step": 80
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.9937888198757764,
|
132 |
+
"eval_loss": 1.4779901504516602,
|
133 |
+
"eval_runtime": 2.2938,
|
134 |
+
"eval_samples_per_second": 89.373,
|
135 |
+
"eval_steps_per_second": 0.872,
|
136 |
+
"step": 80
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"epoch": 0.9937888198757764,
|
140 |
+
"step": 80,
|
141 |
+
"total_flos": 8.089924109389005e+17,
|
142 |
+
"train_loss": 2.0078666627407076,
|
143 |
+
"train_runtime": 559.3432,
|
144 |
+
"train_samples_per_second": 36.804,
|
145 |
+
"train_steps_per_second": 0.143
|
146 |
+
}
|
147 |
+
],
|
148 |
+
"logging_steps": 5,
|
149 |
+
"max_steps": 80,
|
150 |
+
"num_input_tokens_seen": 0,
|
151 |
+
"num_train_epochs": 1,
|
152 |
+
"save_steps": 100,
|
153 |
+
"stateful_callbacks": {
|
154 |
+
"TrainerControl": {
|
155 |
+
"args": {
|
156 |
+
"should_epoch_stop": false,
|
157 |
+
"should_evaluate": false,
|
158 |
+
"should_log": false,
|
159 |
+
"should_save": true,
|
160 |
+
"should_training_stop": true
|
161 |
+
},
|
162 |
+
"attributes": {}
|
163 |
+
}
|
164 |
+
},
|
165 |
+
"total_flos": 8.089924109389005e+17,
|
166 |
+
"train_batch_size": 16,
|
167 |
+
"trial_name": null,
|
168 |
+
"trial_params": null
|
169 |
+
}
|