KingNish commited on
Commit
cfea855
1 Parent(s): 42047b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -15
app.py CHANGED
@@ -3,28 +3,35 @@ from huggingface_hub import InferenceClient
3
 
4
  client = InferenceClient("google/gemma-1.1-2b-it")
5
 
6
- system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
7
-
8
- def models(Query):
9
-
 
10
  messages = []
11
-
12
- messages.append({"role": "user", "content": Query})
13
 
14
- Response = ""
 
 
 
 
 
 
 
 
15
 
16
  for message in client.chat_completion(
17
  messages,
18
- max_tokens=2048,
19
  stream=True
20
  ):
21
  token = message.choices[0].delta.content
 
 
22
 
23
- Response += token
24
- yield Response
25
-
26
- description="# Chat GO\n### Enter your query and Press enter and get response faster than groq"
27
 
28
- demo = gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
29
- demo.queue(max_size=300000)
30
- demo.launch()
 
3
 
4
  client = InferenceClient("google/gemma-1.1-2b-it")
5
 
6
+ def respond(
7
+ message,
8
+ history: list[tuple[str, str]],
9
+ max_tokens
10
+ ):
11
  messages = []
 
 
12
 
13
+ for val in history:
14
+ if val[0]:
15
+ messages.append({"role": "user", "content": val[0]})
16
+ if val[1]:
17
+ messages.append({"role": "assistant", "content": val[1]})
18
+
19
+ messages.append({"role": "user", "content": message})
20
+
21
+ response = ""
22
 
23
  for message in client.chat_completion(
24
  messages,
25
+ max_tokens=1024,
26
  stream=True
27
  ):
28
  token = message.choices[0].delta.content
29
+ response += token
30
+ yield response
31
 
32
+ with gr.Blocks as demo:
33
+ gr.Markdown("# CHAT with AI faster Than Groq")
34
+ gr.ChatInterface(respond)
 
35
 
36
+ if __name__ == "__main__":
37
+ demo.launch()