KingNish commited on
Commit
42047b7
1 Parent(s): b19dbe6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -5,25 +5,25 @@ client = InferenceClient("google/gemma-1.1-2b-it")
5
 
6
  system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
7
 
8
- def models(message):
9
 
10
  messages = []
11
 
12
- messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {message}"})
13
 
14
- response = ""
15
 
16
  for message in client.chat_completion(
17
  messages,
18
- max_tokens=200,
19
  stream=True
20
  ):
21
  token = message.choices[0].delta.content
22
 
23
- response += token
24
- yield response
25
 
26
- description="# Chat GO\n###Enter your query and Press enter and get response faster than groq"
27
 
28
  demo = gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
29
  demo.queue(max_size=300000)
 
5
 
6
  system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
7
 
8
+ def models(Query):
9
 
10
  messages = []
11
 
12
+ messages.append({"role": "user", "content": Query})
13
 
14
+ Response = ""
15
 
16
  for message in client.chat_completion(
17
  messages,
18
+ max_tokens=2048,
19
  stream=True
20
  ):
21
  token = message.choices[0].delta.content
22
 
23
+ Response += token
24
+ yield Response
25
 
26
+ description="# Chat GO\n### Enter your query and Press enter and get response faster than groq"
27
 
28
  demo = gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
29
  demo.queue(max_size=300000)