Spaces:
Runtime error
Runtime error
Using Gradio's exception handling instead of custom error textarea.
Browse files
chat.py
CHANGED
@@ -4,6 +4,7 @@
|
|
4 |
import traceback
|
5 |
import gradio as gr
|
6 |
import chat_client
|
|
|
7 |
|
8 |
CHAT_URL='ws://chat.petals.ml/api/v2/generate'
|
9 |
#CHAT_URL='ws://localhost:8000/api/v2/generate'
|
@@ -20,9 +21,8 @@ def generate(state, prompt, model, context, output, *args):
|
|
20 |
state['generate'] = True
|
21 |
|
22 |
try:
|
23 |
-
|
24 |
-
|
25 |
-
except BrokenPipeError:
|
26 |
# Broken session, try to renew
|
27 |
# TODO This is a bit fragile because of recursive call...
|
28 |
print("Retrying session...")
|
@@ -47,9 +47,7 @@ def _generate(state, prompt, model, context, output, endseq, max_length,
|
|
47 |
state['model'] = model
|
48 |
except Exception:
|
49 |
print(traceback.format_exc())
|
50 |
-
|
51 |
-
gr.update(visible=True, value=traceback.format_exc())
|
52 |
-
return
|
53 |
else:
|
54 |
context = ''
|
55 |
|
@@ -99,7 +97,7 @@ def _generate(state, prompt, model, context, output, endseq, max_length,
|
|
99 |
output += prompt2
|
100 |
|
101 |
# Update widgets even before we get the first response
|
102 |
-
yield state, state['history'] + [[prompt, '']], None, output
|
103 |
|
104 |
orig_history = state['history']
|
105 |
new_line = ''
|
@@ -115,7 +113,7 @@ def _generate(state, prompt, model, context, output, endseq, max_length,
|
|
115 |
|
116 |
if not state['generate']:
|
117 |
client.close_session()
|
118 |
-
yield state, [], None, ''
|
119 |
# Stopping generation
|
120 |
return
|
121 |
|
@@ -129,16 +127,16 @@ def _generate(state, prompt, model, context, output, endseq, max_length,
|
|
129 |
if len(spl) > 1:
|
130 |
state['history'] = orig_history + [[prompt, new_line]]
|
131 |
output += new_line
|
132 |
-
yield state, state['history'], None, output
|
133 |
# Stopping generation
|
134 |
return
|
135 |
|
136 |
# Keep original history untouched as we're adding just
|
137 |
# a chunks at one moment.
|
138 |
state['history'] = orig_history + [[prompt, new_line]]
|
139 |
-
yield state, state['history'], None, output
|
140 |
|
141 |
-
except BrokenPipeError:
|
142 |
# Session was interrupted
|
143 |
# Handled in upstream func
|
144 |
client.close_session()
|
@@ -153,15 +151,12 @@ def _generate(state, prompt, model, context, output, endseq, max_length,
|
|
153 |
state['model'] = None
|
154 |
|
155 |
print(traceback.format_exc())
|
156 |
-
|
157 |
-
yield state, state['history'], prompt, output, \
|
158 |
-
gr.update(visible=True, value=traceback.format_exc())
|
159 |
-
return
|
160 |
|
161 |
def reset(state):
|
162 |
"""Resets the session and clears the chat window."""
|
163 |
state.update(EMPTY_STATE)
|
164 |
-
return state, [], ''
|
165 |
|
166 |
with gr.Blocks() as iface_chat:
|
167 |
gr.Markdown("""**Let's talk to Bloom in a chat!**""")
|
@@ -198,7 +193,6 @@ with gr.Blocks() as iface_chat:
|
|
198 |
chat = gr.Chatbot(label='Chat window')
|
199 |
prompt = gr.Textbox(show_label=False, label='Prompt',
|
200 |
placeholder="Prompt Here and press Enter...").style(container=False)
|
201 |
-
error = gr.Textbox(label="Error log", visible=False, elem_id="error")
|
202 |
|
203 |
with gr.Row():
|
204 |
button_generate = gr.Button("Generate")
|
@@ -212,11 +206,11 @@ with gr.Blocks() as iface_chat:
|
|
212 |
|
213 |
inputs = [state, prompt, model, context, output, endseq,
|
214 |
max_length, do_sample, top_k, top_p, temperature]
|
215 |
-
outputs=[state, chat, prompt, output
|
216 |
|
217 |
prompt.submit(generate, inputs=inputs, outputs=outputs)
|
218 |
button_generate.click(generate, inputs=inputs, outputs=outputs)
|
219 |
-
button_reset.click(reset, inputs=[state], outputs=[state, chat, output
|
220 |
|
221 |
examples = gr.Examples(inputs=[context, prompt, model, do_sample, top_k, top_p, temperature],
|
222 |
examples=[
|
|
|
4 |
import traceback
|
5 |
import gradio as gr
|
6 |
import chat_client
|
7 |
+
import json
|
8 |
|
9 |
CHAT_URL='ws://chat.petals.ml/api/v2/generate'
|
10 |
#CHAT_URL='ws://localhost:8000/api/v2/generate'
|
|
|
21 |
state['generate'] = True
|
22 |
|
23 |
try:
|
24 |
+
yield from _generate(state, prompt, model, context, output, *args)
|
25 |
+
except (json.decoder.JSONDecodeError, BrokenPipeError):
|
|
|
26 |
# Broken session, try to renew
|
27 |
# TODO This is a bit fragile because of recursive call...
|
28 |
print("Retrying session...")
|
|
|
47 |
state['model'] = model
|
48 |
except Exception:
|
49 |
print(traceback.format_exc())
|
50 |
+
raise gr.Error(traceback.format_exc())
|
|
|
|
|
51 |
else:
|
52 |
context = ''
|
53 |
|
|
|
97 |
output += prompt2
|
98 |
|
99 |
# Update widgets even before we get the first response
|
100 |
+
yield state, state['history'] + [[prompt, '']], None, output
|
101 |
|
102 |
orig_history = state['history']
|
103 |
new_line = ''
|
|
|
113 |
|
114 |
if not state['generate']:
|
115 |
client.close_session()
|
116 |
+
yield state, [], None, ''
|
117 |
# Stopping generation
|
118 |
return
|
119 |
|
|
|
127 |
if len(spl) > 1:
|
128 |
state['history'] = orig_history + [[prompt, new_line]]
|
129 |
output += new_line
|
130 |
+
yield state, state['history'], None, output
|
131 |
# Stopping generation
|
132 |
return
|
133 |
|
134 |
# Keep original history untouched as we're adding just
|
135 |
# a chunks at one moment.
|
136 |
state['history'] = orig_history + [[prompt, new_line]]
|
137 |
+
yield state, state['history'], None, output
|
138 |
|
139 |
+
except (json.decoder.JSONDecodeError, BrokenPipeError):
|
140 |
# Session was interrupted
|
141 |
# Handled in upstream func
|
142 |
client.close_session()
|
|
|
151 |
state['model'] = None
|
152 |
|
153 |
print(traceback.format_exc())
|
154 |
+
raise gr.Error(traceback.format_exc())
|
|
|
|
|
|
|
155 |
|
156 |
def reset(state):
|
157 |
"""Resets the session and clears the chat window."""
|
158 |
state.update(EMPTY_STATE)
|
159 |
+
return state, [], ''
|
160 |
|
161 |
with gr.Blocks() as iface_chat:
|
162 |
gr.Markdown("""**Let's talk to Bloom in a chat!**""")
|
|
|
193 |
chat = gr.Chatbot(label='Chat window')
|
194 |
prompt = gr.Textbox(show_label=False, label='Prompt',
|
195 |
placeholder="Prompt Here and press Enter...").style(container=False)
|
|
|
196 |
|
197 |
with gr.Row():
|
198 |
button_generate = gr.Button("Generate")
|
|
|
206 |
|
207 |
inputs = [state, prompt, model, context, output, endseq,
|
208 |
max_length, do_sample, top_k, top_p, temperature]
|
209 |
+
outputs=[state, chat, prompt, output]
|
210 |
|
211 |
prompt.submit(generate, inputs=inputs, outputs=outputs)
|
212 |
button_generate.click(generate, inputs=inputs, outputs=outputs)
|
213 |
+
button_reset.click(reset, inputs=[state], outputs=[state, chat, output])
|
214 |
|
215 |
examples = gr.Examples(inputs=[context, prompt, model, do_sample, top_k, top_p, temperature],
|
216 |
examples=[
|