IlyasMoutawwakil HF staff commited on
Commit
c610ae5
1 Parent(s): c4ca28f

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -110,33 +110,33 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1641.086976,
114
- "max_global_vram": 0.0,
115
- "max_process_vram": 0.0,
116
  "max_reserved": 2707.423232,
117
  "max_allocated": 2497.88416
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.7255548782348633,
123
- "mean": 0.14511097564697267,
124
- "stdev": 0.1980350948981833,
125
- "p50": 0.04612321853637695,
126
- "p90": 0.34334536895751955,
127
- "p95": 0.4422630470275878,
128
- "p99": 0.5213971894836426,
129
  "values": [
130
- 0.5411807250976562,
131
- 0.04659233474731445,
132
- 0.04612321853637695,
133
- 0.045676498413085936,
134
- 0.04598210144042969
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 68.9127748980759
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,30 +144,30 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1641.086976,
148
- "max_global_vram": 0.0,
149
- "max_process_vram": 0.0,
150
  "max_reserved": 2707.423232,
151
  "max_allocated": 2497.88416
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.5877730598449707,
157
- "mean": 0.2938865299224854,
158
- "stdev": 0.2472941951751709,
159
- "p50": 0.2938865299224853,
160
- "p90": 0.49172188606262207,
161
- "p95": 0.5164513055801392,
162
- "p99": 0.5362348411941529,
163
  "values": [
164
- 0.5411807250976562,
165
- 0.04659233474731445
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 13.610695260701563
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,31 +175,31 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1641.086976,
179
- "max_global_vram": 0.0,
180
- "max_process_vram": 0.0,
181
  "max_reserved": 2707.423232,
182
  "max_allocated": 2497.88416
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.13778181838989256,
188
- "mean": 0.04592727279663086,
189
- "stdev": 0.00018644812017269756,
190
- "p50": 0.04598210144042969,
191
- "p90": 0.046094995117187496,
192
- "p95": 0.04610910682678222,
193
- "p99": 0.046120396194458006,
194
  "values": [
195
- 0.04612321853637695,
196
- 0.045676498413085936,
197
- 0.04598210144042969
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 130.64132996897976
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.2.2+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1135.886336,
114
+ "max_global_vram": 3149.000704,
115
+ "max_process_vram": 307436.105728,
116
  "max_reserved": 2707.423232,
117
  "max_allocated": 2497.88416
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.7310609092712402,
123
+ "mean": 0.14621218185424806,
124
+ "stdev": 0.20397802394193557,
125
+ "p50": 0.044439399719238284,
126
+ "p90": 0.35034440002441414,
127
+ "p95": 0.4522560928344726,
128
+ "p99": 0.5337854470825195,
129
  "values": [
130
+ 0.5541677856445313,
131
+ 0.044439399719238284,
132
+ 0.04372740173339844,
133
+ 0.04411700057983398,
134
+ 0.044609321594238284
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 68.39375401680635
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1135.886336,
148
+ "max_global_vram": 3149.000704,
149
+ "max_process_vram": 307436.105728,
150
  "max_reserved": 2707.423232,
151
  "max_allocated": 2497.88416
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.5986071853637696,
157
+ "mean": 0.2993035926818848,
158
+ "stdev": 0.2548641929626465,
159
+ "p50": 0.2993035926818848,
160
+ "p90": 0.503194947052002,
161
+ "p95": 0.5286813663482666,
162
+ "p99": 0.5490705017852784,
163
  "values": [
164
+ 0.5541677856445313,
165
+ 0.044439399719238284
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 13.364356786226102
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1135.886336,
179
+ "max_global_vram": 3149.000704,
180
+ "max_process_vram": 307436.105728,
181
  "max_reserved": 2707.423232,
182
  "max_allocated": 2497.88416
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.1324537239074707,
188
+ "mean": 0.044151241302490235,
189
+ "stdev": 0.00036085544709939834,
190
+ "p50": 0.04411700057983398,
191
+ "p90": 0.044510857391357427,
192
+ "p95": 0.044560089492797855,
193
+ "p99": 0.0445994751739502,
194
  "values": [
195
+ 0.04372740173339844,
196
+ 0.04411700057983398,
197
+ 0.044609321594238284
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 135.89651894252825
203
  },
204
  "energy": null,
205
  "efficiency": null