Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,28 +3,35 @@ from huggingface_hub import InferenceClient
|
|
3 |
|
4 |
client = InferenceClient("google/gemma-1.1-2b-it")
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
10 |
messages = []
|
11 |
-
|
12 |
-
messages.append({"role": "user", "content": Query})
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
for message in client.chat_completion(
|
17 |
messages,
|
18 |
-
max_tokens=
|
19 |
stream=True
|
20 |
):
|
21 |
token = message.choices[0].delta.content
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
description="# Chat GO\n### Enter your query and Press enter and get response faster than groq"
|
27 |
|
28 |
-
|
29 |
-
demo.
|
30 |
-
demo.launch()
|
|
|
3 |
|
4 |
client = InferenceClient("google/gemma-1.1-2b-it")
|
5 |
|
6 |
+
def respond(
|
7 |
+
message,
|
8 |
+
history: list[tuple[str, str]],
|
9 |
+
max_tokens
|
10 |
+
):
|
11 |
messages = []
|
|
|
|
|
12 |
|
13 |
+
for val in history:
|
14 |
+
if val[0]:
|
15 |
+
messages.append({"role": "user", "content": val[0]})
|
16 |
+
if val[1]:
|
17 |
+
messages.append({"role": "assistant", "content": val[1]})
|
18 |
+
|
19 |
+
messages.append({"role": "user", "content": message})
|
20 |
+
|
21 |
+
response = ""
|
22 |
|
23 |
for message in client.chat_completion(
|
24 |
messages,
|
25 |
+
max_tokens=1024,
|
26 |
stream=True
|
27 |
):
|
28 |
token = message.choices[0].delta.content
|
29 |
+
response += token
|
30 |
+
yield response
|
31 |
|
32 |
+
with gr.Blocks as demo:
|
33 |
+
gr.Markdown("# CHAT with AI faster Than Groq")
|
34 |
+
gr.ChatInterface(respond)
|
|
|
35 |
|
36 |
+
if __name__ == "__main__":
|
37 |
+
demo.launch()
|
|