BenchmarkBot commited on
Commit
5aba478
β€’
1 Parent(s): 6203f23

add latency filter

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -126,10 +126,13 @@ def get_benchmark_table(bench_df):
126
 
127
 
128
  def get_benchmark_plot(bench_df):
 
 
 
129
  fig = px.scatter(
130
  bench_df,
131
  y="best_score",
132
- x="generate.throughput(tokens/s)",
133
  size="generate.peak_memory(MB)",
134
  color="model_type",
135
  custom_data=list(ALL_COLUMNS_MAPPING.keys()),
@@ -137,7 +140,7 @@ def get_benchmark_plot(bench_df):
137
  )
138
  fig.update_layout(
139
  title={
140
- "text": "Model Score vs. Latency vs. Memory",
141
  "y": 0.95,
142
  "x": 0.5,
143
  "xanchor": "center",
@@ -145,7 +148,7 @@ def get_benchmark_plot(bench_df):
145
  },
146
  xaxis_title="Generation Throughput (tokens/s)",
147
  yaxis_title="Open LLM Score (%)",
148
- legend_title="Model Type",
149
  width=1200,
150
  height=600,
151
  )
 
126
 
127
 
128
  def get_benchmark_plot(bench_df):
129
+ # filter latency bigger than 150s
130
+ bench_df = bench_df[bench_df["generate.latency(s)"] <= 150]
131
+
132
  fig = px.scatter(
133
  bench_df,
134
  y="best_score",
135
+ x="generate.latency(s)",
136
  size="generate.peak_memory(MB)",
137
  color="model_type",
138
  custom_data=list(ALL_COLUMNS_MAPPING.keys()),
 
140
  )
141
  fig.update_layout(
142
  title={
143
+ "text": "Latency vs. Score vs. Memory",
144
  "y": 0.95,
145
  "x": 0.5,
146
  "xanchor": "center",
 
148
  },
149
  xaxis_title="Generation Throughput (tokens/s)",
150
  yaxis_title="Open LLM Score (%)",
151
+ legend_title="LLM Type",
152
  width=1200,
153
  height=600,
154
  )