IlyasMoutawwakil HF staff commited on
Commit
014a13b
·
verified ·
1 Parent(s): 5ab3970

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -11,7 +11,7 @@
11
  "model": "openai-community/gpt2",
12
  "processor": "openai-community/gpt2",
13
  "device": "cuda",
14
- "device_ids": "6",
15
  "seed": 42,
16
  "inter_op_num_threads": null,
17
  "intra_op_num_threads": null,
@@ -117,33 +117,33 @@
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
- "max_ram": 1303.412736,
121
  "max_global_vram": 68702.69952,
122
- "max_process_vram": 319627.329536,
123
  "max_reserved": 2894.06976,
124
  "max_allocated": 2506.73664
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
- "total": 0.7641195831298828,
130
- "mean": 0.15282391662597655,
131
- "stdev": 0.2228617624438391,
132
- "p50": 0.0415645523071289,
133
- "p90": 0.3759261337280274,
134
- "p95": 0.4872364738464354,
135
- "p99": 0.5762847459411621,
136
  "values": [
137
- 0.5985468139648438,
138
- 0.041995113372802734,
139
- 0.0415645523071289,
140
- 0.04107223129272461,
141
- 0.040940872192382814
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
- "value": 65.43478416715456
147
  },
148
  "energy": null,
149
  "efficiency": null
@@ -151,30 +151,30 @@
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 1303.412736,
155
  "max_global_vram": 68702.69952,
156
- "max_process_vram": 319627.329536,
157
  "max_reserved": 2894.06976,
158
  "max_allocated": 2506.73664
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
- "total": 0.6405419273376465,
164
- "mean": 0.32027096366882324,
165
- "stdev": 0.27827585029602053,
166
- "p50": 0.32027096366882324,
167
- "p90": 0.5428916439056397,
168
- "p95": 0.5707192289352417,
169
- "p99": 0.5929812969589233,
170
  "values": [
171
- 0.5985468139648438,
172
- 0.041995113372802734
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
- "value": 12.489424436666095
178
  },
179
  "energy": null,
180
  "efficiency": null
@@ -182,31 +182,31 @@
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
- "max_ram": 1303.412736,
186
  "max_global_vram": 68702.69952,
187
- "max_process_vram": 319627.329536,
188
  "max_reserved": 2894.06976,
189
  "max_allocated": 2506.73664
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
- "total": 0.12357765579223634,
195
- "mean": 0.04119255193074545,
196
- "stdev": 0.0002684548544699573,
197
- "p50": 0.04107223129272461,
198
- "p90": 0.041466088104248046,
199
- "p95": 0.041515320205688475,
200
- "p99": 0.04155470588684082,
201
  "values": [
202
- 0.0415645523071289,
203
- 0.04107223129272461,
204
- 0.040940872192382814
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
- "value": 145.65739966990728
210
  },
211
  "energy": null,
212
  "efficiency": null
 
11
  "model": "openai-community/gpt2",
12
  "processor": "openai-community/gpt2",
13
  "device": "cuda",
14
+ "device_ids": "4",
15
  "seed": 42,
16
  "inter_op_num_threads": null,
17
  "intra_op_num_threads": null,
 
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
+ "max_ram": 1303.289856,
121
  "max_global_vram": 68702.69952,
122
+ "max_process_vram": 364232.323072,
123
  "max_reserved": 2894.06976,
124
  "max_allocated": 2506.73664
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
+ "total": 0.7360632476806641,
130
+ "mean": 0.14721264953613283,
131
+ "stdev": 0.2113408385716852,
132
+ "p50": 0.04165745544433594,
133
+ "p90": 0.3589484725952149,
134
+ "p95": 0.4644204643249511,
135
+ "p99": 0.5487980577087402,
136
  "values": [
137
+ 0.5698924560546875,
138
+ 0.042532497406005856,
139
+ 0.04165745544433594,
140
+ 0.04142273712158203,
141
+ 0.040558101654052735
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
+ "value": 67.92894517903189
147
  },
148
  "energy": null,
149
  "efficiency": null
 
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 1303.289856,
155
  "max_global_vram": 68702.69952,
156
+ "max_process_vram": 364232.323072,
157
  "max_reserved": 2894.06976,
158
  "max_allocated": 2506.73664
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
+ "total": 0.6124249534606934,
164
+ "mean": 0.3062124767303467,
165
+ "stdev": 0.26367997932434084,
166
+ "p50": 0.3062124767303467,
167
+ "p90": 0.5171564601898193,
168
+ "p95": 0.5435244581222534,
169
+ "p99": 0.5646188564682006,
170
  "values": [
171
+ 0.5698924560546875,
172
+ 0.042532497406005856
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
+ "value": 13.062825011935859
178
  },
179
  "energy": null,
180
  "efficiency": null
 
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
+ "max_ram": 1303.289856,
186
  "max_global_vram": 68702.69952,
187
+ "max_process_vram": 364232.323072,
188
  "max_reserved": 2894.06976,
189
  "max_allocated": 2506.73664
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
+ "total": 0.12363829421997072,
195
+ "mean": 0.04121276473999024,
196
+ "stdev": 0.00047273035994881894,
197
+ "p50": 0.04142273712158203,
198
+ "p90": 0.041610511779785156,
199
+ "p95": 0.04163398361206055,
200
+ "p99": 0.04165276107788086,
201
  "values": [
202
+ 0.04165745544433594,
203
+ 0.04142273712158203,
204
+ 0.040558101654052735
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
+ "value": 145.58596196721504
210
  },
211
  "energy": null,
212
  "efficiency": null