IlyasMoutawwakil HF staff commited on
Commit
1c896c4
·
verified ·
1 Parent(s): 01a19b8

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.1+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
@@ -117,33 +117,33 @@
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
- "max_ram": 1271.5008,
121
- "max_global_vram": 2958.061568,
122
- "max_process_vram": 300161.98656,
123
  "max_reserved": 2497.708032,
124
  "max_allocated": 2195.345408
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
- "total": 0.7368733367919923,
130
- "mean": 0.14737466735839846,
131
- "stdev": 0.21022850913148108,
132
- "p50": 0.042222904205322265,
133
- "p90": 0.35788311767578135,
134
- "p95": 0.46285705566406243,
135
- "p99": 0.5468362060546875,
136
  "values": [
137
- 0.5678309936523438,
138
- 0.0429613037109375,
139
- 0.04184210968017578,
140
- 0.04201602554321289,
141
- 0.042222904205322265
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
- "value": 67.85426681019158
147
  },
148
  "energy": null,
149
  "efficiency": null
@@ -151,30 +151,30 @@
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 1271.5008,
155
- "max_global_vram": 2958.061568,
156
- "max_process_vram": 300161.98656,
157
  "max_reserved": 2497.708032,
158
  "max_allocated": 2195.345408
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
- "total": 0.6107922973632813,
164
- "mean": 0.30539614868164067,
165
- "stdev": 0.2624348449707032,
166
- "p50": 0.30539614868164067,
167
- "p90": 0.5153440246582032,
168
- "p95": 0.5415875091552734,
169
- "p99": 0.5625822967529297,
170
  "values": [
171
- 0.5678309936523438,
172
- 0.0429613037109375
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
- "value": 13.097742120414846
178
  },
179
  "energy": null,
180
  "efficiency": null
@@ -182,31 +182,31 @@
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
- "max_ram": 1271.5008,
186
- "max_global_vram": 2958.061568,
187
- "max_process_vram": 300161.98656,
188
  "max_reserved": 2497.708032,
189
  "max_allocated": 2195.345408
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
- "total": 0.12608103942871093,
195
- "mean": 0.04202701314290364,
196
- "stdev": 0.00015565273978873092,
197
- "p50": 0.04201602554321289,
198
- "p90": 0.04218152847290039,
199
- "p95": 0.04220221633911133,
200
- "p99": 0.04221876663208008,
201
  "values": [
202
- 0.04184210968017578,
203
- 0.04201602554321289,
204
- 0.042222904205322265
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
- "value": 142.76532047610226
210
  },
211
  "energy": null,
212
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
 
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
+ "max_ram": 1631.309824,
121
+ "max_global_vram": 12.038144,
122
+ "max_process_vram": 0.0,
123
  "max_reserved": 2497.708032,
124
  "max_allocated": 2195.345408
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
+ "total": 0.7480139007568359,
130
+ "mean": 0.1496027801513672,
131
+ "stdev": 0.2117695812414152,
132
+ "p50": 0.04354911041259766,
133
+ "p90": 0.3617914947509766,
134
+ "p95": 0.46746599884033196,
135
+ "p99": 0.5520056021118165,
136
  "values": [
137
+ 0.5731405029296875,
138
+ 0.044767982482910154,
139
+ 0.0433351936340332,
140
+ 0.04354911041259766,
141
+ 0.043221111297607424
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
+ "value": 66.8436775699092
147
  },
148
  "energy": null,
149
  "efficiency": null
 
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 1631.309824,
155
+ "max_global_vram": 12.038144,
156
+ "max_process_vram": 0.0,
157
  "max_reserved": 2497.708032,
158
  "max_allocated": 2195.345408
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
+ "total": 0.6179084854125977,
164
+ "mean": 0.30895424270629884,
165
+ "stdev": 0.2641862602233887,
166
+ "p50": 0.30895424270629884,
167
+ "p90": 0.5203032508850098,
168
+ "p95": 0.5467218769073486,
169
+ "p99": 0.5678567777252198,
170
  "values": [
171
+ 0.5731405029296875,
172
+ 0.044767982482910154
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
+ "value": 12.946901019910964
178
  },
179
  "energy": null,
180
  "efficiency": null
 
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
+ "max_ram": 1631.309824,
186
+ "max_global_vram": 12.038144,
187
+ "max_process_vram": 0.0,
188
  "max_reserved": 2497.708032,
189
  "max_allocated": 2195.345408
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
+ "total": 0.13010541534423828,
195
+ "mean": 0.04336847178141276,
196
+ "stdev": 0.00013595693234343524,
197
+ "p50": 0.0433351936340332,
198
+ "p90": 0.043506327056884765,
199
+ "p95": 0.04352771873474121,
200
+ "p99": 0.04354483207702637,
201
  "values": [
202
+ 0.0433351936340332,
203
+ 0.04354911041259766,
204
+ 0.043221111297607424
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
+ "value": 138.34935273351118
210
  },
211
  "energy": null,
212
  "efficiency": null