dummyLLM / app.py
eduardo-alvarez's picture
Update app.py
7eaaffa verified
import gradio as gr
import time
def simulate_response(prompt, expected_output, words_per_second, history):
delay = 1.0 / words_per_second if words_per_second > 0 else 0.01
# Add user prompt
history.append((prompt, ""))
# Stream simulated response
output = ""
for word in expected_output.split():
output += word + " "
history[-1] = (prompt, output.strip())
time.sleep(delay)
yield history
with gr.Blocks(theme="default") as demo:
gr.Markdown("## 🧠 Simulated Chatbot (LLM Style)", elem_id="title")
chatbot = gr.Chatbot(show_label=False, height=800)
generate_btn = gr.Button("Simulate Chat")
with gr.Row():
prompt_input = gr.Textbox(label="User Prompt", placeholder="Type your prompt here")
response_input = gr.Textbox(label="Expected Output", placeholder="Type simulated response here")
wps_slider = gr.Slider(minimum=1, maximum=5000, value=5, step=1,
label="Simulated Words per Second")
generate_btn.click(
simulate_response,
inputs=[prompt_input, response_input, wps_slider, chatbot],
outputs=chatbot
)
demo.launch()