Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,25 +5,25 @@ client = InferenceClient("google/gemma-1.1-2b-it")
|
|
5 |
|
6 |
system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
|
7 |
|
8 |
-
def models(
|
9 |
|
10 |
messages = []
|
11 |
|
12 |
-
messages.append({"role": "user", "content":
|
13 |
|
14 |
-
|
15 |
|
16 |
for message in client.chat_completion(
|
17 |
messages,
|
18 |
-
max_tokens=
|
19 |
stream=True
|
20 |
):
|
21 |
token = message.choices[0].delta.content
|
22 |
|
23 |
-
|
24 |
-
yield
|
25 |
|
26 |
-
description="# Chat GO\n###Enter your query and Press enter and get response faster than groq"
|
27 |
|
28 |
demo = gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
|
29 |
demo.queue(max_size=300000)
|
|
|
5 |
|
6 |
system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
|
7 |
|
8 |
+
def models(Query):
|
9 |
|
10 |
messages = []
|
11 |
|
12 |
+
messages.append({"role": "user", "content": Query})
|
13 |
|
14 |
+
Response = ""
|
15 |
|
16 |
for message in client.chat_completion(
|
17 |
messages,
|
18 |
+
max_tokens=2048,
|
19 |
stream=True
|
20 |
):
|
21 |
token = message.choices[0].delta.content
|
22 |
|
23 |
+
Response += token
|
24 |
+
yield Response
|
25 |
|
26 |
+
description="# Chat GO\n### Enter your query and Press enter and get response faster than groq"
|
27 |
|
28 |
demo = gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
|
29 |
demo.queue(max_size=300000)
|