try different layout
Browse files
app.py
CHANGED
@@ -149,7 +149,6 @@ def get_plot(model_name, plot_eager, generate_type):
|
|
149 |
if plot_eager == "No":
|
150 |
df = df[df["framework"] != "TF (Eager Execition)"]
|
151 |
|
152 |
-
plt.figure(figsize=(200 / FIG_DPI, 200 / FIG_DPI))
|
153 |
g = sns.catplot(
|
154 |
data=df,
|
155 |
kind="bar",
|
@@ -205,34 +204,35 @@ with demo:
|
|
205 |
interactive=True
|
206 |
)
|
207 |
plot_fn = functools.partial(get_plot, generate_type="Greedy Search")
|
208 |
-
plot = gr.Image(value=plot_fn("T5 Small", "Yes")
|
209 |
model_selector.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
210 |
eager_enabler.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
211 |
with gr.TabItem("Sample"):
|
212 |
-
gr.Markdown(
|
213 |
-
"""
|
214 |
-
### Sample benchmark parameters
|
215 |
-
- `max_new_tokens = 128`;
|
216 |
-
- `temperature = 2.0`;
|
217 |
-
- `top_k = 50`;
|
218 |
-
- `pad_to_multiple_of = 64` for Tensorflow XLA models. Others do not pad (input prompts between 2 and 33 tokens).
|
219 |
-
"""
|
220 |
-
)
|
221 |
-
with gr.Row():
|
222 |
-
model_selector = gr.Dropdown(
|
223 |
-
choices=["DistilGPT2", "GPT2", "OPT-1.3B", "GPTJ-6B", "T5 Small", "T5 Base", "T5 Large", "T5 3B"],
|
224 |
-
value="T5 Small",
|
225 |
-
label="Model",
|
226 |
-
interactive=True,
|
227 |
-
)
|
228 |
-
eager_enabler = gr.Radio(
|
229 |
-
["Yes", "No"],
|
230 |
-
value="Yes",
|
231 |
-
label="Plot TF Eager Execution?",
|
232 |
-
interactive=True
|
233 |
-
)
|
234 |
plot_fn = functools.partial(get_plot, generate_type="Sample")
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
model_selector.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
237 |
eager_enabler.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
238 |
with gr.TabItem("Beam Search"):
|
|
|
149 |
if plot_eager == "No":
|
150 |
df = df[df["framework"] != "TF (Eager Execition)"]
|
151 |
|
|
|
152 |
g = sns.catplot(
|
153 |
data=df,
|
154 |
kind="bar",
|
|
|
204 |
interactive=True
|
205 |
)
|
206 |
plot_fn = functools.partial(get_plot, generate_type="Greedy Search")
|
207 |
+
plot = gr.Image(value=plot_fn("T5 Small", "Yes")) # Show plot when the gradio app is initialized
|
208 |
model_selector.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
209 |
eager_enabler.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
210 |
with gr.TabItem("Sample"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
plot_fn = functools.partial(get_plot, generate_type="Sample")
|
212 |
+
with gr.Row():
|
213 |
+
with gr.Column():
|
214 |
+
gr.Markdown(
|
215 |
+
"""
|
216 |
+
### Sample benchmark parameters
|
217 |
+
- `max_new_tokens = 128`;
|
218 |
+
- `temperature = 2.0`;
|
219 |
+
- `top_k = 50`;
|
220 |
+
- `pad_to_multiple_of = 64` for Tensorflow XLA models. Others do not pad (input prompts between 2 and 33 tokens).
|
221 |
+
"""
|
222 |
+
)
|
223 |
+
model_selector = gr.Dropdown(
|
224 |
+
choices=["DistilGPT2", "GPT2", "OPT-1.3B", "GPTJ-6B", "T5 Small", "T5 Base", "T5 Large", "T5 3B"],
|
225 |
+
value="T5 Small",
|
226 |
+
label="Model",
|
227 |
+
interactive=True,
|
228 |
+
)
|
229 |
+
eager_enabler = gr.Radio(
|
230 |
+
["Yes", "No"],
|
231 |
+
value="Yes",
|
232 |
+
label="Plot TF Eager Execution?",
|
233 |
+
interactive=True
|
234 |
+
)
|
235 |
+
plot = gr.Image(value=plot_fn("T5 Small", "Yes")) # Show plot when the gradio app is initialized
|
236 |
model_selector.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
237 |
eager_enabler.change(fn=plot_fn, inputs=[model_selector, eager_enabler], outputs=plot)
|
238 |
with gr.TabItem("Beam Search"):
|