IlyasMoutawwakil HF staff commited on
Commit
901f7b3
·
verified ·
1 Parent(s): ef2fe55

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,19 +6,17 @@
6
  "version": "2.2.2",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "model": "openai-community/gpt2",
10
  "library": "transformers",
 
 
11
  "device": "cuda",
12
  "device_ids": "0",
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
- "hub_kwargs": {
17
- "revision": "main",
18
- "force_download": false,
19
- "local_files_only": false,
20
- "trust_remote_code": false
21
- },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
@@ -107,7 +105,7 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 1106.71872,
111
  "max_global_vram": 3563.585536,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2915.04128,
@@ -116,24 +114,24 @@
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
- "total": 0.8164259529113769,
120
- "mean": 0.16328519058227536,
121
- "stdev": 0.22830199141373853,
122
- "p50": 0.04896051025390625,
123
- "p90": 0.3918630737304688,
124
- "p95": 0.5058758422851561,
125
- "p99": 0.5970860571289062,
126
  "values": [
127
- 0.6198886108398437,
128
- 0.04982476806640625,
129
- 0.04896051025390625,
130
- 0.0488458251953125,
131
- 0.048906238555908206
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
- "value": 61.24254112905143
137
  },
138
  "energy": null,
139
  "efficiency": null
@@ -141,7 +139,7 @@
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
- "max_ram": 1106.71872,
145
  "max_global_vram": 3563.585536,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2915.04128,
@@ -150,21 +148,21 @@
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
- "total": 0.6697133789062499,
154
- "mean": 0.33485668945312497,
155
- "stdev": 0.28503192138671873,
156
- "p50": 0.33485668945312497,
157
- "p90": 0.5628822265625,
158
- "p95": 0.5913854187011718,
159
- "p99": 0.6141879724121093,
160
  "values": [
161
- 0.6198886108398437,
162
- 0.04982476806640625
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
- "value": 11.945408665816549
168
  },
169
  "energy": null,
170
  "efficiency": null
@@ -172,7 +170,7 @@
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
- "max_ram": 1106.71872,
176
  "max_global_vram": 3563.585536,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2915.04128,
@@ -181,22 +179,22 @@
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
- "total": 0.14671257400512694,
185
- "mean": 0.04890419133504231,
186
- "stdev": 4.6842352638883904e-05,
187
- "p50": 0.048906238555908206,
188
- "p90": 0.04894965591430664,
189
- "p95": 0.04895508308410645,
190
- "p99": 0.048959424819946286,
191
  "values": [
192
- 0.04896051025390625,
193
- 0.0488458251953125,
194
- 0.048906238555908206
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
- "value": 122.6888705488255
200
  },
201
  "energy": null,
202
  "efficiency": null
 
6
  "version": "2.2.2",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "library": "transformers",
10
+ "model": "openai-community/gpt2",
11
+ "processor": "openai-community/gpt2",
12
  "device": "cuda",
13
  "device_ids": "0",
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
+ "model_kwargs": {},
18
+ "processor_kwargs": {},
19
+ "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
 
105
  "overall": {
106
  "memory": {
107
  "unit": "MB",
108
+ "max_ram": 1107.120128,
109
  "max_global_vram": 3563.585536,
110
  "max_process_vram": 0.0,
111
  "max_reserved": 2915.04128,
 
114
  "latency": {
115
  "unit": "s",
116
  "count": 5,
117
+ "total": 0.7884809455871582,
118
+ "mean": 0.15769618911743163,
119
+ "stdev": 0.2183143474662541,
120
+ "p50": 0.04857753753662109,
121
+ "p90": 0.37621984252929697,
122
+ "p95": 0.4852721527099609,
123
+ "p99": 0.5725140008544922,
124
  "values": [
125
+ 0.594324462890625,
126
+ 0.04906291198730469,
127
+ 0.04857753753662109,
128
+ 0.04837171173095703,
129
+ 0.048144321441650394
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "samples/s",
134
+ "value": 63.41307330231867
135
  },
136
  "energy": null,
137
  "efficiency": null
 
139
  "warmup": {
140
  "memory": {
141
  "unit": "MB",
142
+ "max_ram": 1107.120128,
143
  "max_global_vram": 3563.585536,
144
  "max_process_vram": 0.0,
145
  "max_reserved": 2915.04128,
 
148
  "latency": {
149
  "unit": "s",
150
  "count": 2,
151
+ "total": 0.6433873748779297,
152
+ "mean": 0.32169368743896487,
153
+ "stdev": 0.2726307754516602,
154
+ "p50": 0.32169368743896487,
155
+ "p90": 0.539798307800293,
156
+ "p95": 0.567061385345459,
157
+ "p99": 0.5888718473815918,
158
  "values": [
159
+ 0.594324462890625,
160
+ 0.04906291198730469
161
  ]
162
  },
163
  "throughput": {
164
  "unit": "samples/s",
165
+ "value": 12.434188658920831
166
  },
167
  "energy": null,
168
  "efficiency": null
 
170
  "train": {
171
  "memory": {
172
  "unit": "MB",
173
+ "max_ram": 1107.120128,
174
  "max_global_vram": 3563.585536,
175
  "max_process_vram": 0.0,
176
  "max_reserved": 2915.04128,
 
179
  "latency": {
180
  "unit": "s",
181
  "count": 3,
182
+ "total": 0.14509357070922851,
183
+ "mean": 0.048364523569742836,
184
+ "stdev": 0.00017693275272759448,
185
+ "p50": 0.04837171173095703,
186
+ "p90": 0.04853637237548828,
187
+ "p95": 0.048556954956054686,
188
+ "p99": 0.04857342102050781,
189
  "values": [
190
+ 0.04857753753662109,
191
+ 0.04837171173095703,
192
+ 0.048144321441650394
193
  ]
194
  },
195
  "throughput": {
196
  "unit": "samples/s",
197
+ "value": 124.05787459785171
198
  },
199
  "energy": null,
200
  "efficiency": null