Spaces:
Running
Running
BenchmarkBot
commited on
Commit
β’
97058d0
1
Parent(s):
89517bf
added optimizations to control panel
Browse files
app.py
CHANGED
@@ -31,8 +31,8 @@ COLUMNS_MAPPING = {
|
|
31 |
"backend.torch_dtype": "Load Dtype π₯",
|
32 |
"optimizations": "Optimizations π οΈ",
|
33 |
#
|
34 |
-
"forward.peak_memory(MB)": "Peak Memory (MB) β¬οΈ",
|
35 |
"generate.throughput(tokens/s)": "Throughput (tokens/s) β¬οΈ",
|
|
|
36 |
"average": "Average Open LLM Score β¬οΈ",
|
37 |
#
|
38 |
"num_parameters": "#οΈβ£ Parameters π",
|
@@ -67,11 +67,7 @@ def get_benchmark_df(benchmark="1xA100-80GB"):
|
|
67 |
bench_df["optimizations"] = bench_df[
|
68 |
["backend.bettertransformer", "backend.load_in_8bit", "backend.load_in_4bit"]
|
69 |
].apply(
|
70 |
-
lambda x: "
|
71 |
-
if x[0] == True
|
72 |
-
else (
|
73 |
-
"LLM.int8 ποΈ" if x[1] == True else ("NF4 ποΈ" if x[2] == True else "")
|
74 |
-
),
|
75 |
axis=1,
|
76 |
)
|
77 |
|
@@ -151,13 +147,22 @@ def get_benchmark_plot(bench_df):
|
|
151 |
return fig
|
152 |
|
153 |
|
154 |
-
def filter_query(
|
|
|
|
|
155 |
raw_df = get_benchmark_df(benchmark=benchmark)
|
156 |
|
157 |
filtered_df = raw_df[
|
158 |
raw_df["model"].str.lower().str.contains(text.lower())
|
159 |
& raw_df["backend.name"].isin(backends)
|
160 |
& raw_df["backend.torch_dtype"].isin(datatypes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
& (raw_df["average"] >= threshold)
|
162 |
]
|
163 |
|
@@ -191,6 +196,8 @@ with demo:
|
|
191 |
info="π Search for a model name",
|
192 |
elem_id="search-bar",
|
193 |
)
|
|
|
|
|
194 |
backend_checkboxes = gr.CheckboxGroup(
|
195 |
label="Backends π",
|
196 |
choices=["pytorch", "onnxruntime"],
|
@@ -205,7 +212,16 @@ with demo:
|
|
205 |
info="βοΈ Select the load datatypes",
|
206 |
elem_id="datatype-checkboxes",
|
207 |
)
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
label="Average Open LLM Score π",
|
210 |
info="ποΈ Slide to minimum Average Open LLM score",
|
211 |
value=0.0,
|
@@ -213,9 +229,9 @@ with demo:
|
|
213 |
)
|
214 |
|
215 |
with gr.Row():
|
216 |
-
|
217 |
value="Filter π",
|
218 |
-
elem_id="
|
219 |
)
|
220 |
|
221 |
# leaderboard tabs
|
@@ -242,9 +258,15 @@ with demo:
|
|
242 |
show_label=False,
|
243 |
)
|
244 |
|
245 |
-
|
246 |
filter_query,
|
247 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
[single_A100_leaderboard, single_A100_plotly],
|
249 |
)
|
250 |
|
|
|
31 |
"backend.torch_dtype": "Load Dtype π₯",
|
32 |
"optimizations": "Optimizations π οΈ",
|
33 |
#
|
|
|
34 |
"generate.throughput(tokens/s)": "Throughput (tokens/s) β¬οΈ",
|
35 |
+
"forward.peak_memory(MB)": "Peak Memory (MB) β¬οΈ",
|
36 |
"average": "Average Open LLM Score β¬οΈ",
|
37 |
#
|
38 |
"num_parameters": "#οΈβ£ Parameters π",
|
|
|
67 |
bench_df["optimizations"] = bench_df[
|
68 |
["backend.bettertransformer", "backend.load_in_8bit", "backend.load_in_4bit"]
|
69 |
].apply(
|
70 |
+
lambda x: ", ".join([opt for opt in x.index if x[opt] == True]),
|
|
|
|
|
|
|
|
|
71 |
axis=1,
|
72 |
)
|
73 |
|
|
|
147 |
return fig
|
148 |
|
149 |
|
150 |
+
def filter_query(
|
151 |
+
text, backends, datatypes, optimizations, threshold, benchmark="1xA100-80GB"
|
152 |
+
):
|
153 |
raw_df = get_benchmark_df(benchmark=benchmark)
|
154 |
|
155 |
filtered_df = raw_df[
|
156 |
raw_df["model"].str.lower().str.contains(text.lower())
|
157 |
& raw_df["backend.name"].isin(backends)
|
158 |
& raw_df["backend.torch_dtype"].isin(datatypes)
|
159 |
+
& pd.concat(
|
160 |
+
[
|
161 |
+
raw_df["optimizations"].str.contains(optimization)
|
162 |
+
for optimization in optimizations
|
163 |
+
],
|
164 |
+
axis=1,
|
165 |
+
).any(axis=1)
|
166 |
& (raw_df["average"] >= threshold)
|
167 |
]
|
168 |
|
|
|
196 |
info="π Search for a model name",
|
197 |
elem_id="search-bar",
|
198 |
)
|
199 |
+
|
200 |
+
with gr.Row():
|
201 |
backend_checkboxes = gr.CheckboxGroup(
|
202 |
label="Backends π",
|
203 |
choices=["pytorch", "onnxruntime"],
|
|
|
212 |
info="βοΈ Select the load datatypes",
|
213 |
elem_id="datatype-checkboxes",
|
214 |
)
|
215 |
+
optimizations_checkboxes = gr.CheckboxGroup(
|
216 |
+
label="Optimizations π οΈ",
|
217 |
+
choices=["BetterTransformer", "LLM.int8", "NF4"],
|
218 |
+
value=[],
|
219 |
+
info="βοΈ Select the optimizations",
|
220 |
+
elem_id="optimizations-checkboxes",
|
221 |
+
)
|
222 |
+
|
223 |
+
with gr.Row():
|
224 |
+
score_slider = gr.Slider(
|
225 |
label="Average Open LLM Score π",
|
226 |
info="ποΈ Slide to minimum Average Open LLM score",
|
227 |
value=0.0,
|
|
|
229 |
)
|
230 |
|
231 |
with gr.Row():
|
232 |
+
filter_button = gr.Button(
|
233 |
value="Filter π",
|
234 |
+
elem_id="filter-button",
|
235 |
)
|
236 |
|
237 |
# leaderboard tabs
|
|
|
258 |
show_label=False,
|
259 |
)
|
260 |
|
261 |
+
filter_button.click(
|
262 |
filter_query,
|
263 |
+
[
|
264 |
+
search_bar,
|
265 |
+
backend_checkboxes,
|
266 |
+
datatype_checkboxes,
|
267 |
+
optimizations_checkboxes,
|
268 |
+
score_slider,
|
269 |
+
],
|
270 |
[single_A100_leaderboard, single_A100_plotly],
|
271 |
)
|
272 |
|