IlyasMoutawwakil HF staff commited on
Commit
1e3005f
1 Parent(s): 5148d5e

Upload cuda_training_transformers_token-classification_microsoft/deberta-v3-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_token-classification_microsoft/deberta-v3-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "token-classification",
9
  "library": "transformers",
@@ -110,33 +110,33 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1637.429248,
114
- "max_global_vram": 0.0,
115
- "max_process_vram": 0.0,
116
  "max_reserved": 3919.577088,
117
  "max_allocated": 3695.353344
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 1.0212988204956055,
123
- "mean": 0.2042597640991211,
124
- "stdev": 0.23972354994993678,
125
- "p50": 0.08431379699707031,
126
- "p90": 0.44495632934570317,
127
- "p95": 0.5643278961181639,
128
- "p99": 0.6598251495361328,
129
  "values": [
130
- 0.683699462890625,
131
- 0.08684162902832031,
132
- 0.08431379699707031,
133
- 0.08347348022460938,
134
- 0.08297045135498046
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 48.957267938228405
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,30 +144,30 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1637.429248,
148
- "max_global_vram": 0.0,
149
- "max_process_vram": 0.0,
150
  "max_reserved": 3919.577088,
151
  "max_allocated": 3695.353344
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.7705410919189453,
157
- "mean": 0.38527054595947263,
158
- "stdev": 0.29842891693115237,
159
- "p50": 0.38527054595947263,
160
- "p90": 0.6240136795043946,
161
- "p95": 0.6538565711975097,
162
- "p99": 0.6777308845520019,
163
  "values": [
164
- 0.683699462890625,
165
- 0.08684162902832031
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 10.382314562974061
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,31 +175,31 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1637.429248,
179
- "max_global_vram": 0.0,
180
- "max_process_vram": 0.0,
181
  "max_reserved": 3919.577088,
182
  "max_allocated": 3695.353344
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.2507577285766601,
188
- "mean": 0.08358590952555338,
189
- "stdev": 0.0005541507853244254,
190
- "p50": 0.08347348022460938,
191
- "p90": 0.08414573364257813,
192
- "p95": 0.08422976531982422,
193
- "p99": 0.0842969906616211,
194
  "values": [
195
- 0.08431379699707031,
196
- 0.08347348022460938,
197
- 0.08297045135498046
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 71.78243359505129
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.2.2+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "token-classification",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1156.796416,
114
+ "max_global_vram": 4373.233664,
115
+ "max_process_vram": 472834.469888,
116
  "max_reserved": 3919.577088,
117
  "max_allocated": 3695.353344
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.88155233001709,
123
+ "mean": 0.176310466003418,
124
+ "stdev": 0.20769205971333674,
125
+ "p50": 0.07223963165283204,
126
+ "p90": 0.384371078491211,
127
+ "p95": 0.48803227996826165,
128
+ "p99": 0.5709612411499023,
129
  "values": [
130
+ 0.5916934814453125,
131
+ 0.07338747406005859,
132
+ 0.0721372299194336,
133
+ 0.07209451293945313,
134
+ 0.07223963165283204
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 56.718130390547195
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1156.796416,
148
+ "max_global_vram": 4373.233664,
149
+ "max_process_vram": 472834.469888,
150
  "max_reserved": 3919.577088,
151
  "max_allocated": 3695.353344
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.6650809555053712,
157
+ "mean": 0.3325404777526856,
158
+ "stdev": 0.25915300369262695,
159
+ "p50": 0.3325404777526856,
160
+ "p90": 0.5398628807067871,
161
+ "p95": 0.5657781810760498,
162
+ "p99": 0.58651042137146,
163
  "values": [
164
+ 0.5916934814453125,
165
+ 0.07338747406005859
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 12.028610853728456
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1156.796416,
179
+ "max_global_vram": 4373.233664,
180
+ "max_process_vram": 472834.469888,
181
  "max_reserved": 3919.577088,
182
  "max_allocated": 3695.353344
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.21647137451171877,
188
+ "mean": 0.07215712483723959,
189
+ "stdev": 6.089179506341817e-05,
190
+ "p50": 0.0721372299194336,
191
+ "p90": 0.07221915130615235,
192
+ "p95": 0.07222939147949219,
193
+ "p99": 0.07223758361816407,
194
  "values": [
195
+ 0.0721372299194336,
196
+ 0.07209451293945313,
197
+ 0.07223963165283204
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 83.15187188422256
203
  },
204
  "energy": null,
205
  "efficiency": null