IlyasMoutawwakil HF staff commited on
Commit
34dbf79
·
verified ·
1 Parent(s): 5fae926

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -110,33 +110,33 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1636.39296,
114
- "max_global_vram": 0.0,
115
- "max_process_vram": 0.0,
116
- "max_reserved": 2889.875456,
117
  "max_allocated": 2506.73664
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.7833391952514649,
123
- "mean": 0.15666783905029297,
124
- "stdev": 0.21666670614723896,
125
- "p50": 0.04828705978393555,
126
- "p90": 0.3734940689086914,
127
- "p95": 0.4817475532531737,
128
- "p99": 0.5683503407287597,
129
  "values": [
130
- 0.5900010375976562,
131
- 0.04873361587524414,
132
- 0.04828705978393555,
133
- 0.048222900390625,
134
- 0.04809458160400391
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 63.82930958018662
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,30 +144,30 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1636.39296,
148
- "max_global_vram": 0.0,
149
- "max_process_vram": 0.0,
150
- "max_reserved": 2889.875456,
151
  "max_allocated": 2506.73664
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.6387346534729004,
157
- "mean": 0.3193673267364502,
158
- "stdev": 0.270633710861206,
159
- "p50": 0.3193673267364502,
160
- "p90": 0.535874295425415,
161
- "p95": 0.5629376665115355,
162
- "p99": 0.5845883633804321,
163
  "values": [
164
- 0.5900010375976562,
165
- 0.04873361587524414
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 12.524762757903844
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,31 +175,31 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1636.39296,
179
- "max_global_vram": 0.0,
180
- "max_process_vram": 0.0,
181
- "max_reserved": 2889.875456,
182
  "max_allocated": 2506.73664
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.14460454177856444,
188
- "mean": 0.04820151392618815,
189
- "stdev": 8.002082263711423e-05,
190
- "p50": 0.048222900390625,
191
- "p90": 0.048274227905273435,
192
- "p95": 0.048280643844604496,
193
- "p99": 0.048285776596069335,
194
  "values": [
195
- 0.04828705978393555,
196
- 0.048222900390625,
197
- 0.04809458160400391
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 124.47741805761348
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.2.2+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1150.5664,
114
+ "max_global_vram": 3337.035776,
115
+ "max_process_vram": 356288.630784,
116
+ "max_reserved": 2894.06976,
117
  "max_allocated": 2506.73664
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.7056295127868654,
123
+ "mean": 0.14112590255737306,
124
+ "stdev": 0.2014162817173391,
125
+ "p50": 0.04014517593383789,
126
+ "p90": 0.34296517944335947,
127
+ "p95": 0.4434610748291015,
128
+ "p99": 0.5238577911376954,
129
  "values": [
130
+ 0.5439569702148438,
131
+ 0.04147749328613281,
132
+ 0.04014517593383789,
133
+ 0.040039257049560545,
134
+ 0.04001061630249023
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 70.85871423167423
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1150.5664,
148
+ "max_global_vram": 3337.035776,
149
+ "max_process_vram": 356288.630784,
150
+ "max_reserved": 2894.06976,
151
  "max_allocated": 2506.73664
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.5854344635009766,
157
+ "mean": 0.2927172317504883,
158
+ "stdev": 0.2512397384643555,
159
+ "p50": 0.2927172317504883,
160
+ "p90": 0.4937090225219727,
161
+ "p95": 0.5188329963684082,
162
+ "p99": 0.5389321754455567,
163
  "values": [
164
+ 0.5439569702148438,
165
+ 0.04147749328613281
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 13.665065005156901
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1150.5664,
179
+ "max_global_vram": 3337.035776,
180
+ "max_process_vram": 356288.630784,
181
+ "max_reserved": 2894.06976,
182
  "max_allocated": 2506.73664
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.12019504928588867,
188
+ "mean": 0.04006501642862956,
189
+ "stdev": 5.787476597285938e-05,
190
+ "p50": 0.040039257049560545,
191
+ "p90": 0.04012399215698242,
192
+ "p95": 0.040134584045410156,
193
+ "p99": 0.04014305755615234,
194
  "values": [
195
+ 0.04014517593383789,
196
+ 0.040039257049560545,
197
+ 0.04001061630249023
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 149.75658404354317
203
  },
204
  "energy": null,
205
  "efficiency": null