superbigtree commited on
Commit
aa16ccf
1 Parent(s): 0cf2467

Model save

Browse files
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
- - ydshieh/coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
@@ -13,9 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # clip-roberta-finetuned
15
 
16
- This model was trained from scratch on the ydshieh/coco_dataset_script 2017 dataset.
17
- It achieves the following results on the evaluation set:
18
- - Loss: 1.5879
19
 
20
  ## Model description
21
 
@@ -39,9 +37,9 @@ The following hyperparameters were used during training:
39
  - eval_batch_size: 64
40
  - seed: 42
41
  - distributed_type: multi-GPU
42
- - num_devices: 4
43
- - total_train_batch_size: 256
44
- - total_eval_batch_size: 256
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: linear
47
  - num_epochs: 3.0
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
+ - coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
 
13
 
14
  # clip-roberta-finetuned
15
 
16
+ This model was trained from scratch on the coco_dataset_script dataset.
 
 
17
 
18
  ## Model description
19
 
 
37
  - eval_batch_size: 64
38
  - seed: 42
39
  - distributed_type: multi-GPU
40
+ - num_devices: 2
41
+ - total_train_batch_size: 128
42
+ - total_eval_batch_size: 128
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
  - num_epochs: 3.0
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 1.5879405736923218,
4
- "eval_runtime": 43.7011,
5
- "eval_samples_per_second": 572.389,
6
- "eval_steps_per_second": 2.243,
7
- "train_loss": 0.3107225017701603,
8
- "train_runtime": 5244.1454,
9
- "train_samples_per_second": 338.522,
10
- "train_steps_per_second": 1.323
11
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 1.6256719827651978,
4
+ "eval_runtime": 85.7309,
5
+ "eval_samples_per_second": 291.773,
6
+ "eval_steps_per_second": 2.286,
7
+ "train_loss": 0.3325984596243642,
8
+ "train_runtime": 10519.3103,
9
+ "train_samples_per_second": 168.762,
10
+ "train_steps_per_second": 1.319
11
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 1.5879405736923218,
4
- "eval_runtime": 43.7011,
5
- "eval_samples_per_second": 572.389,
6
- "eval_steps_per_second": 2.243
7
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 1.6256719827651978,
4
+ "eval_runtime": 85.7309,
5
+ "eval_samples_per_second": 291.773,
6
+ "eval_steps_per_second": 2.286
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:438762c56d01e05b354f213a6c855f666e8628760029636e421e41e9926365d0
3
  size 851603588
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8bf2b8d3d2d96231abb7a52d7dba7eef5263e0aa36c7ade943c82823bcd0976
3
  size 851603588
runs/Feb08_06-07-11_d0d182b85cf0/events.out.tfevents.1707372446.d0d182b85cf0.139131.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06dfc83f182901e05a77ab1d38a3524d9b8129aea600b5f4a7f956267c4efca7
3
- size 12582
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:874c8b223c2fa45713c9d9615d2eaac58b63d00ffd76056c33d821b510ba2898
3
+ size 12936
tokenizer.json CHANGED
@@ -1,21 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 128,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": {
11
- "Fixed": 128
12
- },
13
- "direction": "Right",
14
- "pad_to_multiple_of": null,
15
- "pad_id": 1,
16
- "pad_type_id": 0,
17
- "pad_token": "<pad>"
18
- },
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 3.0,
3
- "train_loss": 0.3107225017701603,
4
- "train_runtime": 5244.1454,
5
- "train_samples_per_second": 338.522,
6
- "train_steps_per_second": 1.323
7
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "train_loss": 0.3325984596243642,
4
+ "train_runtime": 10519.3103,
5
+ "train_samples_per_second": 168.762,
6
+ "train_steps_per_second": 1.319
7
  }
trainer_state.json CHANGED
@@ -3,101 +3,185 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
- "global_step": 6936,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
11
  {
12
  "epoch": 0.22,
13
  "learning_rate": 4.639561707035756e-05,
14
- "loss": 0.8543,
15
- "step": 500
 
 
 
 
 
 
16
  },
17
  {
18
  "epoch": 0.43,
19
  "learning_rate": 4.2791234140715114e-05,
20
- "loss": 0.512,
21
- "step": 1000
 
 
 
 
 
 
22
  },
23
  {
24
  "epoch": 0.65,
25
  "learning_rate": 3.9186851211072664e-05,
26
- "loss": 0.4291,
27
- "step": 1500
 
 
 
 
 
 
28
  },
29
  {
30
  "epoch": 0.87,
31
  "learning_rate": 3.558246828143022e-05,
32
- "loss": 0.3857,
33
- "step": 2000
 
 
 
 
 
 
34
  },
35
  {
36
  "epoch": 1.08,
37
  "learning_rate": 3.1978085351787776e-05,
38
- "loss": 0.3264,
39
- "step": 2500
 
 
 
 
 
 
40
  },
41
  {
42
  "epoch": 1.3,
43
  "learning_rate": 2.8373702422145332e-05,
44
- "loss": 0.2763,
45
- "step": 3000
 
 
 
 
 
 
46
  },
47
  {
48
  "epoch": 1.51,
49
  "learning_rate": 2.4769319492502884e-05,
50
- "loss": 0.2598,
51
- "step": 3500
 
 
 
 
 
 
52
  },
53
  {
54
  "epoch": 1.73,
55
  "learning_rate": 2.116493656286044e-05,
56
- "loss": 0.2409,
57
- "step": 4000
 
 
 
 
 
 
58
  },
59
  {
60
  "epoch": 1.95,
61
  "learning_rate": 1.7560553633217993e-05,
62
- "loss": 0.2283,
63
- "step": 4500
 
 
 
 
 
 
64
  },
65
  {
66
  "epoch": 2.16,
67
  "learning_rate": 1.395617070357555e-05,
68
- "loss": 0.185,
69
- "step": 5000
 
 
 
 
 
 
70
  },
71
  {
72
  "epoch": 2.38,
73
  "learning_rate": 1.0351787773933102e-05,
74
- "loss": 0.1686,
75
- "step": 5500
 
 
 
 
 
 
76
  },
77
  {
78
  "epoch": 2.6,
79
  "learning_rate": 6.747404844290659e-06,
80
- "loss": 0.1595,
81
- "step": 6000
 
 
 
 
 
 
82
  },
83
  {
84
  "epoch": 2.81,
85
  "learning_rate": 3.143021914648212e-06,
86
- "loss": 0.1537,
87
- "step": 6500
 
 
 
 
 
 
88
  },
89
  {
90
  "epoch": 3.0,
91
- "step": 6936,
92
  "total_flos": 2.370754172808069e+17,
93
- "train_loss": 0.3107225017701603,
94
- "train_runtime": 5244.1454,
95
- "train_samples_per_second": 338.522,
96
- "train_steps_per_second": 1.323
97
  }
98
  ],
99
  "logging_steps": 500,
100
- "max_steps": 6936,
101
  "num_input_tokens_seen": 0,
102
  "num_train_epochs": 3,
103
  "save_steps": 500,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 13872,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
+ {
12
+ "epoch": 0.11,
13
+ "learning_rate": 4.819780853517878e-05,
14
+ "loss": 1.0435,
15
+ "step": 500
16
+ },
17
  {
18
  "epoch": 0.22,
19
  "learning_rate": 4.639561707035756e-05,
20
+ "loss": 0.6931,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.32,
25
+ "learning_rate": 4.459342560553633e-05,
26
+ "loss": 0.6098,
27
+ "step": 1500
28
  },
29
  {
30
  "epoch": 0.43,
31
  "learning_rate": 4.2791234140715114e-05,
32
+ "loss": 0.5507,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.54,
37
+ "learning_rate": 4.098904267589389e-05,
38
+ "loss": 0.5055,
39
+ "step": 2500
40
  },
41
  {
42
  "epoch": 0.65,
43
  "learning_rate": 3.9186851211072664e-05,
44
+ "loss": 0.4718,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.76,
49
+ "learning_rate": 3.7384659746251445e-05,
50
+ "loss": 0.4472,
51
+ "step": 3500
52
  },
53
  {
54
  "epoch": 0.87,
55
  "learning_rate": 3.558246828143022e-05,
56
+ "loss": 0.408,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.97,
61
+ "learning_rate": 3.3780276816608994e-05,
62
+ "loss": 0.386,
63
+ "step": 4500
64
  },
65
  {
66
  "epoch": 1.08,
67
  "learning_rate": 3.1978085351787776e-05,
68
+ "loss": 0.3326,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 1.19,
73
+ "learning_rate": 3.0175893886966554e-05,
74
+ "loss": 0.3051,
75
+ "step": 5500
76
  },
77
  {
78
  "epoch": 1.3,
79
  "learning_rate": 2.8373702422145332e-05,
80
+ "loss": 0.2956,
81
+ "step": 6000
82
+ },
83
+ {
84
+ "epoch": 1.41,
85
+ "learning_rate": 2.657151095732411e-05,
86
+ "loss": 0.2854,
87
+ "step": 6500
88
  },
89
  {
90
  "epoch": 1.51,
91
  "learning_rate": 2.4769319492502884e-05,
92
+ "loss": 0.2789,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 1.62,
97
+ "learning_rate": 2.2967128027681662e-05,
98
+ "loss": 0.263,
99
+ "step": 7500
100
  },
101
  {
102
  "epoch": 1.73,
103
  "learning_rate": 2.116493656286044e-05,
104
+ "loss": 0.2532,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 1.84,
109
+ "learning_rate": 1.936274509803922e-05,
110
+ "loss": 0.2419,
111
+ "step": 8500
112
  },
113
  {
114
  "epoch": 1.95,
115
  "learning_rate": 1.7560553633217993e-05,
116
+ "loss": 0.2387,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 2.05,
121
+ "learning_rate": 1.575836216839677e-05,
122
+ "loss": 0.206,
123
+ "step": 9500
124
  },
125
  {
126
  "epoch": 2.16,
127
  "learning_rate": 1.395617070357555e-05,
128
+ "loss": 0.1783,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 2.27,
133
+ "learning_rate": 1.2153979238754325e-05,
134
+ "loss": 0.1758,
135
+ "step": 10500
136
  },
137
  {
138
  "epoch": 2.38,
139
  "learning_rate": 1.0351787773933102e-05,
140
+ "loss": 0.1696,
141
+ "step": 11000
142
+ },
143
+ {
144
+ "epoch": 2.49,
145
+ "learning_rate": 8.54959630911188e-06,
146
+ "loss": 0.1632,
147
+ "step": 11500
148
  },
149
  {
150
  "epoch": 2.6,
151
  "learning_rate": 6.747404844290659e-06,
152
+ "loss": 0.1592,
153
+ "step": 12000
154
+ },
155
+ {
156
+ "epoch": 2.7,
157
+ "learning_rate": 4.945213379469435e-06,
158
+ "loss": 0.1542,
159
+ "step": 12500
160
  },
161
  {
162
  "epoch": 2.81,
163
  "learning_rate": 3.143021914648212e-06,
164
+ "loss": 0.1542,
165
+ "step": 13000
166
+ },
167
+ {
168
+ "epoch": 2.92,
169
+ "learning_rate": 1.3408304498269898e-06,
170
+ "loss": 0.1481,
171
+ "step": 13500
172
  },
173
  {
174
  "epoch": 3.0,
175
+ "step": 13872,
176
  "total_flos": 2.370754172808069e+17,
177
+ "train_loss": 0.3325984596243642,
178
+ "train_runtime": 10519.3103,
179
+ "train_samples_per_second": 168.762,
180
+ "train_steps_per_second": 1.319
181
  }
182
  ],
183
  "logging_steps": 500,
184
+ "max_steps": 13872,
185
  "num_input_tokens_seen": 0,
186
  "num_train_epochs": 3,
187
  "save_steps": 500,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b55918f59c2c93817a7e136c35a2527d8564139604465a4d6afa5d9d3ac3812
3
  size 4347
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6660050e1f57c34181cf3294825f8f508833b8386043d9d31a8d0e846841ab
3
  size 4347