Spaces:
Sleeping
Sleeping
Commit
·
1c3cdd1
1
Parent(s):
2707100
update
Browse files
app.py
CHANGED
@@ -20,34 +20,6 @@ system_instructions = ("[SYSTEM] You are an assistant ."
|
|
20 |
"Begin with a greeting if the user initiates the conversation. "
|
21 |
"Here is the user's query:[QUESTION] ")
|
22 |
|
23 |
-
def models(text, model="Mixtral 8x7B"):
|
24 |
-
|
25 |
-
client = client_fn(model)
|
26 |
-
|
27 |
-
generate_kwargs = dict(
|
28 |
-
max_new_tokens=100,
|
29 |
-
do_sample=True,
|
30 |
-
)
|
31 |
-
|
32 |
-
formatted_prompt = system_instructions1 + text + "[ANSWER]"
|
33 |
-
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
34 |
-
output = ""
|
35 |
-
for response in stream:
|
36 |
-
output+=response.token.text
|
37 |
-
if output.endswith("<|assistant|>"):
|
38 |
-
output = output[:-13]
|
39 |
-
elif output.endswith("</s>"):
|
40 |
-
output = output[:-4]
|
41 |
-
return output
|
42 |
-
|
43 |
-
description="""# Chat GO
|
44 |
-
### Inspired from Google Go"""
|
45 |
-
|
46 |
-
demo = gr.Interface(description=description,fn=models, inputs=["text", gr.Dropdown([ 'Mixtral 8x7B','Nous Hermes Mixtral 8x7B DPO','StarChat2 15b','Mistral 7B v0.3','Phi 3 mini', ], value="Mistral 7B v0.3", label="Select Model") ], outputs="text", live=True, batch=True, max_batch_size=10000)
|
47 |
-
demo.queue(max_size=300000)
|
48 |
-
demo.launch()
|
49 |
-
|
50 |
-
|
51 |
# Function to generate model responses
|
52 |
def models(text, model="Mixtral 8x7B"):
|
53 |
client = client_fn(model)
|
|
|
20 |
"Begin with a greeting if the user initiates the conversation. "
|
21 |
"Here is the user's query:[QUESTION] ")
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Function to generate model responses
|
24 |
def models(text, model="Mixtral 8x7B"):
|
25 |
client = client_fn(model)
|