christineyu commited on
Commit
6774c37
·
verified ·
1 Parent(s): 0b59c8c

Model save

Browse files
Files changed (4) hide show
  1. README.md +57 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +253 -0
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: mistralai/Mistral-7B-Instruct-v0.2
3
+ library_name: transformers
4
+ model_name: prometheus-7b-v1.5-beta-1-0109-sum-pref-overfit-v3-flash
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for prometheus-7b-v1.5-beta-1-0109-sum-pref-overfit-v3-flash
13
+
14
+ This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="christineyu/prometheus-7b-v1.5-beta-1-0109-sum-pref-overfit-v3-flash", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/myexp/huggingface/runs/7ncrpm53)
31
+
32
+ This model was trained with SFT.
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.12.2
37
+ - Transformers: 4.46.3
38
+ - Pytorch: 2.3.0
39
+ - Datasets: 3.2.0
40
+ - Tokenizers: 0.20.3
41
+
42
+ ## Citations
43
+
44
+
45
+
46
+ Cite TRL as:
47
+
48
+ ```bibtex
49
+ @misc{vonwerra2022trl,
50
+ title = {{TRL: Transformer Reinforcement Learning}},
51
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
52
+ year = 2020,
53
+ journal = {GitHub repository},
54
+ publisher = {GitHub},
55
+ howpublished = {\url{https://github.com/huggingface/trl}}
56
+ }
57
+ ```
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 2.5427771438137344e+16,
4
+ "train_loss": 0.06073189970825845,
5
+ "train_runtime": 598.238,
6
+ "train_samples": 990,
7
+ "train_samples_per_second": 0.236,
8
+ "train_steps_per_second": 0.236
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 2.5427771438137344e+16,
4
+ "train_loss": 0.06073189970825845,
5
+ "train_runtime": 598.238,
6
+ "train_samples": 990,
7
+ "train_samples_per_second": 0.236,
8
+ "train_steps_per_second": 0.236
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 141,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0070921985815602835,
13
+ "grad_norm": 8.373100280761719,
14
+ "learning_rate": 6.666666666666667e-07,
15
+ "loss": 0.5467,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.03546099290780142,
20
+ "grad_norm": 5.184474945068359,
21
+ "learning_rate": 3.3333333333333333e-06,
22
+ "loss": 0.5472,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.07092198581560284,
27
+ "grad_norm": 2.155921697616577,
28
+ "learning_rate": 6.666666666666667e-06,
29
+ "loss": 0.4131,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.10638297872340426,
34
+ "grad_norm": 1.2390508651733398,
35
+ "learning_rate": 1e-05,
36
+ "loss": 0.291,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.14184397163120568,
41
+ "grad_norm": 1.1246978044509888,
42
+ "learning_rate": 9.961196033000862e-06,
43
+ "loss": 0.1965,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.1773049645390071,
48
+ "grad_norm": 1.2201730012893677,
49
+ "learning_rate": 9.84538643114539e-06,
50
+ "loss": 0.101,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.2127659574468085,
55
+ "grad_norm": 2.5533862113952637,
56
+ "learning_rate": 9.654368743221022e-06,
57
+ "loss": 0.0424,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.24822695035460993,
62
+ "grad_norm": 0.4596536457538605,
63
+ "learning_rate": 9.391107866851143e-06,
64
+ "loss": 0.0168,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.28368794326241137,
69
+ "grad_norm": 0.3188921809196472,
70
+ "learning_rate": 9.059690028579285e-06,
71
+ "loss": 0.0103,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.3191489361702128,
76
+ "grad_norm": 0.2514072060585022,
77
+ "learning_rate": 8.665259359149132e-06,
78
+ "loss": 0.0089,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.3546099290780142,
83
+ "grad_norm": 4.992562294006348,
84
+ "learning_rate": 8.213938048432697e-06,
85
+ "loss": 0.0071,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.3900709219858156,
90
+ "grad_norm": 0.2883487343788147,
91
+ "learning_rate": 7.712731319328798e-06,
92
+ "loss": 0.0063,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.425531914893617,
97
+ "grad_norm": 0.8716890215873718,
98
+ "learning_rate": 7.169418695587791e-06,
99
+ "loss": 0.0076,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.46099290780141844,
104
+ "grad_norm": 0.2935401201248169,
105
+ "learning_rate": 6.592433251258423e-06,
106
+ "loss": 0.007,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.49645390070921985,
111
+ "grad_norm": 0.11323031038045883,
112
+ "learning_rate": 5.990730715996989e-06,
113
+ "loss": 0.0053,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.5319148936170213,
118
+ "grad_norm": 0.45351043343544006,
119
+ "learning_rate": 5.373650467932122e-06,
120
+ "loss": 0.0044,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.5673758865248227,
125
+ "grad_norm": 0.22218099236488342,
126
+ "learning_rate": 4.750770571696514e-06,
127
+ "loss": 0.0038,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.6028368794326241,
132
+ "grad_norm": 0.1962103545665741,
133
+ "learning_rate": 4.131759111665349e-06,
134
+ "loss": 0.0032,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.6382978723404256,
139
+ "grad_norm": 0.7398797273635864,
140
+ "learning_rate": 3.526224127945479e-06,
141
+ "loss": 0.0046,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.6737588652482269,
146
+ "grad_norm": 0.21601063013076782,
147
+ "learning_rate": 2.9435644843469434e-06,
148
+ "loss": 0.0038,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.7092198581560284,
153
+ "grad_norm": 0.13288450241088867,
154
+ "learning_rate": 2.39282398310251e-06,
155
+ "loss": 0.0036,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.7446808510638298,
160
+ "grad_norm": 0.1287090927362442,
161
+ "learning_rate": 1.8825509907063328e-06,
162
+ "loss": 0.0037,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.7801418439716312,
167
+ "grad_norm": 0.291775107383728,
168
+ "learning_rate": 1.4206657537014078e-06,
169
+ "loss": 0.003,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.8156028368794326,
174
+ "grad_norm": 0.2999476194381714,
175
+ "learning_rate": 1.0143374638853892e-06,
176
+ "loss": 0.0042,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.851063829787234,
181
+ "grad_norm": 0.2297772467136383,
182
+ "learning_rate": 6.698729810778065e-07,
183
+ "loss": 0.0041,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.8865248226950354,
188
+ "grad_norm": 0.1276993751525879,
189
+ "learning_rate": 3.9261894064796136e-07,
190
+ "loss": 0.0029,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.9219858156028369,
195
+ "grad_norm": 0.2057814598083496,
196
+ "learning_rate": 1.8687876524993987e-07,
197
+ "loss": 0.0049,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.9574468085106383,
202
+ "grad_norm": 0.33941203355789185,
203
+ "learning_rate": 5.584586887435739e-08,
204
+ "loss": 0.0032,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.9929078014184397,
209
+ "grad_norm": 0.39472323656082153,
210
+ "learning_rate": 1.5540899959187727e-09,
211
+ "loss": 0.0026,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 1.0,
216
+ "eval_loss": 0.010593634098768234,
217
+ "eval_runtime": 1.2814,
218
+ "eval_samples_per_second": 0.78,
219
+ "eval_steps_per_second": 0.78,
220
+ "step": 141
221
+ },
222
+ {
223
+ "epoch": 1.0,
224
+ "step": 141,
225
+ "total_flos": 2.5427771438137344e+16,
226
+ "train_loss": 0.06073189970825845,
227
+ "train_runtime": 598.238,
228
+ "train_samples_per_second": 0.236,
229
+ "train_steps_per_second": 0.236
230
+ }
231
+ ],
232
+ "logging_steps": 5,
233
+ "max_steps": 141,
234
+ "num_input_tokens_seen": 0,
235
+ "num_train_epochs": 1,
236
+ "save_steps": 100,
237
+ "stateful_callbacks": {
238
+ "TrainerControl": {
239
+ "args": {
240
+ "should_epoch_stop": false,
241
+ "should_evaluate": false,
242
+ "should_log": false,
243
+ "should_save": true,
244
+ "should_training_stop": true
245
+ },
246
+ "attributes": {}
247
+ }
248
+ },
249
+ "total_flos": 2.5427771438137344e+16,
250
+ "train_batch_size": 1,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }