Spaces:
Running
Running
better chat interface
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ phi2 = pipeline(
|
|
22 |
)
|
23 |
|
24 |
# Function that accepts a prompt and generates text using the phi2 pipeline
|
25 |
-
def generate(
|
26 |
|
27 |
instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
|
28 |
final_prompt = f"Instruction: {instruction}\n"
|
@@ -31,7 +31,7 @@ def generate(prompt, chat_history, max_new_tokens):
|
|
31 |
final_prompt += "User: " + sent + "\n"
|
32 |
final_prompt += "Assistant: " + received + "\n"
|
33 |
|
34 |
-
final_prompt += "User: " +
|
35 |
final_prompt += "Output:"
|
36 |
|
37 |
# Streamer
|
@@ -40,7 +40,6 @@ def generate(prompt, chat_history, max_new_tokens):
|
|
40 |
thread.start()
|
41 |
|
42 |
generated_text = ""
|
43 |
-
chat_history.append((prompt, ""))
|
44 |
for word in streamer:
|
45 |
generated_text += word
|
46 |
response = generated_text.strip()
|
@@ -51,10 +50,7 @@ def generate(prompt, chat_history, max_new_tokens):
|
|
51 |
if "Assistant:" in response:
|
52 |
response = response.split("Assistant:")[1].strip()
|
53 |
|
54 |
-
|
55 |
-
chat_history.append((prompt, response))
|
56 |
-
|
57 |
-
yield "", chat_history
|
58 |
|
59 |
# Chat interface with gradio
|
60 |
with gr.Blocks() as demo:
|
@@ -67,15 +63,11 @@ with gr.Blocks() as demo:
|
|
67 |
|
68 |
tokens_slider = gr.Slider(8, 128, value=21, label="Maximum new tokens", info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.")
|
69 |
|
70 |
-
chatbot = gr.
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
clear = gr.ClearButton([msg, chatbot])
|
77 |
-
|
78 |
-
btn.click(fn=generate, inputs=[msg, chatbot, tokens_slider], outputs=[msg, chatbot])
|
79 |
-
examples = gr.Examples(examples=["Who is Leonhard Euler?"], inputs=[msg])
|
80 |
|
81 |
demo.queue().launch()
|
|
|
22 |
)
|
23 |
|
24 |
# Function that accepts a prompt and generates text using the phi2 pipeline
|
25 |
+
def generate(message, chat_history, max_new_tokens):
|
26 |
|
27 |
instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
|
28 |
final_prompt = f"Instruction: {instruction}\n"
|
|
|
31 |
final_prompt += "User: " + sent + "\n"
|
32 |
final_prompt += "Assistant: " + received + "\n"
|
33 |
|
34 |
+
final_prompt += "User: " + message + "\n"
|
35 |
final_prompt += "Output:"
|
36 |
|
37 |
# Streamer
|
|
|
40 |
thread.start()
|
41 |
|
42 |
generated_text = ""
|
|
|
43 |
for word in streamer:
|
44 |
generated_text += word
|
45 |
response = generated_text.strip()
|
|
|
50 |
if "Assistant:" in response:
|
51 |
response = response.split("Assistant:")[1].strip()
|
52 |
|
53 |
+
yield response
|
|
|
|
|
|
|
54 |
|
55 |
# Chat interface with gradio
|
56 |
with gr.Blocks() as demo:
|
|
|
63 |
|
64 |
tokens_slider = gr.Slider(8, 128, value=21, label="Maximum new tokens", info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.")
|
65 |
|
66 |
+
chatbot = gr.ChatInterface(
|
67 |
+
fn=generate,
|
68 |
+
additional_inputs=[tokens_slider],
|
69 |
+
stop_btn=None,
|
70 |
+
examples=[["Who is Leonhard Euler?"]]
|
71 |
+
)
|
|
|
|
|
|
|
|
|
72 |
|
73 |
demo.queue().launch()
|