IlyasMoutawwakil HF staff commited on
Commit
8457705
1 Parent(s): 72a42d6

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.2.2+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -77,12 +77,12 @@
77
  "start_method": "spawn"
78
  },
79
  "environment": {
80
- "cpu": " AMD EPYC 7643 48-Core Processor",
81
- "cpu_count": 96,
82
- "cpu_ram_mb": 1082028.982272,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
- "platform": "Linux-5.15.0-84-generic-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
@@ -91,16 +91,16 @@
91
  "gpu_count": 1,
92
  "gpu_vram_mb": 68702699520,
93
  "optimum_benchmark_version": "0.4.0",
94
- "optimum_benchmark_commit": "65fa416fd503cfe9a2be7637ee30c70a4a1f96f1",
95
- "transformers_version": "4.43.3",
96
  "transformers_commit": null,
97
  "accelerate_version": "0.33.0",
98
  "accelerate_commit": null,
99
- "diffusers_version": "0.29.2",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
- "timm_version": "1.0.8",
104
  "timm_commit": null,
105
  "peft_version": null,
106
  "peft_commit": null
@@ -110,33 +110,33 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1149.067264,
114
- "max_global_vram": 3337.035776,
115
- "max_process_vram": 331613.26592,
116
- "max_reserved": 2894.06976,
117
  "max_allocated": 2506.73664
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.6431888923645019,
123
- "mean": 0.1286377784729004,
124
- "stdev": 0.17756446487443686,
125
- "p50": 0.03973424911499023,
126
- "p90": 0.30655107879638677,
127
- "p95": 0.39515829086303705,
128
- "p99": 0.46604406051635744,
129
  "values": [
130
- 0.4837655029296875,
131
- 0.040729442596435544,
132
- 0.03973424911499023,
133
- 0.03941136932373047,
134
- 0.0395483283996582
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 77.73766088557461
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,30 +144,30 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1149.067264,
148
- "max_global_vram": 3337.035776,
149
- "max_process_vram": 331613.26592,
150
- "max_reserved": 2894.06976,
151
  "max_allocated": 2506.73664
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.5244949455261231,
157
- "mean": 0.26224747276306154,
158
- "stdev": 0.22151803016662597,
159
- "p50": 0.26224747276306154,
160
- "p90": 0.43946189689636234,
161
- "p95": 0.46161369991302487,
162
- "p99": 0.479335142326355,
163
  "values": [
164
- 0.4837655029296875,
165
- 0.040729442596435544
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 15.252768531401511
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,31 +175,31 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1149.067264,
179
- "max_global_vram": 3337.035776,
180
- "max_process_vram": 331613.26592,
181
- "max_reserved": 2894.06976,
182
  "max_allocated": 2506.73664
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.1186939468383789,
188
- "mean": 0.0395646489461263,
189
- "stdev": 0.00013231933614996267,
190
- "p50": 0.0395483283996582,
191
- "p90": 0.03969706497192382,
192
- "p95": 0.03971565704345703,
193
- "p99": 0.039730530700683594,
194
  "values": [
195
- 0.03973424911499023,
196
- 0.03941136932373047,
197
- 0.0395483283996582
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 151.6505304563671
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
77
  "start_method": "spawn"
78
  },
79
  "environment": {
80
+ "cpu": " AMD EPYC 7763 64-Core Processor",
81
+ "cpu_count": 128,
82
+ "cpu_ram_mb": 1082015.256576,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
+ "platform": "Linux-5.15.0-101-generic-x86_64-with-glibc2.35",
86
  "processor": "x86_64",
87
  "python_version": "3.10.12",
88
  "gpu": [
 
91
  "gpu_count": 1,
92
  "gpu_vram_mb": 68702699520,
93
  "optimum_benchmark_version": "0.4.0",
94
+ "optimum_benchmark_commit": null,
95
+ "transformers_version": "4.44.2",
96
  "transformers_commit": null,
97
  "accelerate_version": "0.33.0",
98
  "accelerate_commit": null,
99
+ "diffusers_version": "0.30.1",
100
  "diffusers_commit": null,
101
  "optimum_version": null,
102
  "optimum_commit": null,
103
+ "timm_version": "1.0.9",
104
  "timm_commit": null,
105
  "peft_version": null,
106
  "peft_commit": null
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1636.184064,
114
+ "max_global_vram": 0.0,
115
+ "max_process_vram": 0.0,
116
+ "max_reserved": 2889.875456,
117
  "max_allocated": 2506.73664
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.7932766151428222,
123
+ "mean": 0.15865532302856444,
124
+ "stdev": 0.2205882731889155,
125
+ "p50": 0.04830768585205078,
126
+ "p90": 0.37946298522949223,
127
+ "p95": 0.48964723358154283,
128
+ "p99": 0.5777946322631835,
129
  "values": [
130
+ 0.5998314819335937,
131
+ 0.04891024017333984,
132
+ 0.04830768585205078,
133
+ 0.04812848281860352,
134
+ 0.04809872436523437
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 63.02971630015585
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1636.184064,
148
+ "max_global_vram": 0.0,
149
+ "max_process_vram": 0.0,
150
+ "max_reserved": 2889.875456,
151
  "max_allocated": 2506.73664
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.6487417221069336,
157
+ "mean": 0.3243708610534668,
158
+ "stdev": 0.27546062088012696,
159
+ "p50": 0.3243708610534668,
160
+ "p90": 0.5447393577575683,
161
+ "p95": 0.572285419845581,
162
+ "p99": 0.5943222695159912,
163
  "values": [
164
+ 0.5998314819335937,
165
+ 0.04891024017333984
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 12.331563898832055
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1636.184064,
179
+ "max_global_vram": 0.0,
180
+ "max_process_vram": 0.0,
181
+ "max_reserved": 2889.875456,
182
  "max_allocated": 2506.73664
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.14453489303588868,
188
+ "mean": 0.04817829767862956,
189
+ "stdev": 9.229433334967446e-05,
190
+ "p50": 0.04812848281860352,
191
+ "p90": 0.04827184524536133,
192
+ "p95": 0.04828976554870605,
193
+ "p99": 0.04830410179138184,
194
  "values": [
195
+ 0.04830768585205078,
196
+ 0.04812848281860352,
197
+ 0.04809872436523437
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 124.53740146699744
203
  },
204
  "energy": null,
205
  "efficiency": null