Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -29,12 +29,16 @@ def models(text, model="Mixtral 8x7B"):
|
|
29 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
30 |
output = ""
|
31 |
for response in stream:
|
32 |
-
if
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
return output
|
35 |
|
36 |
description="""# Chat GO
|
37 |
-
### Inspired from
|
38 |
|
39 |
demo = gr.Interface(description=description,fn=models, inputs=["text", gr.Dropdown([ 'Mixtral 8x7B','Llama 3 8B','Mistral 7B v0.3','Phi 3 mini', ], value="Mistral 7B v0.3", label="Select Model") ], outputs="text", live=True, batch=True, max_batch_size=1000)
|
40 |
demo.launch()
|
|
|
29 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
30 |
output = ""
|
31 |
for response in stream:
|
32 |
+
if "Phi" in model:
|
33 |
+
if not response.token.text == "<|assistant|>":
|
34 |
+
output += response.token.text
|
35 |
+
else:
|
36 |
+
if not response.token.text == "</s>":
|
37 |
+
output += response.token.text
|
38 |
return output
|
39 |
|
40 |
description="""# Chat GO
|
41 |
+
### Inspired from Google Go"""
|
42 |
|
43 |
demo = gr.Interface(description=description,fn=models, inputs=["text", gr.Dropdown([ 'Mixtral 8x7B','Llama 3 8B','Mistral 7B v0.3','Phi 3 mini', ], value="Mistral 7B v0.3", label="Select Model") ], outputs="text", live=True, batch=True, max_batch_size=1000)
|
44 |
demo.launch()
|