HarshanaLF commited on
Commit
70ab5a1
1 Parent(s): a434a54
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -12,12 +12,42 @@ def client_fn(model):
12
  }
13
  return InferenceClient(model_map.get(model, "mistralai/Mixtral-8x7B-Instruct-v0.1"))
14
 
15
- system_instructions = ("[SYSTEM] You are an assistant designed to provide information, answer questions, and offer helpful advice. "
 
 
16
  "Respond naturally and concisely to the user's queries. "
17
- "Answer questions directly and avoid including unnecessary tags or information. "
18
  "Begin with a greeting if the user initiates the conversation. "
19
  "Here is the user's query: ")
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Function to generate model responses
22
  def models(text, model="Mixtral 8x7B"):
23
  client = client_fn(model)
 
12
  }
13
  return InferenceClient(model_map.get(model, "mistralai/Mixtral-8x7B-Instruct-v0.1"))
14
 
15
+ system_instructions = ("[SYSTEM] You are an assistant ."
16
+ "Your task is to Answer the question."
17
+ "Keep conversation very short, clear and concise."
18
  "Respond naturally and concisely to the user's queries. "
19
+ "The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things."
20
  "Begin with a greeting if the user initiates the conversation. "
21
  "Here is the user's query: ")
22
 
23
+ def models(text, model="Mixtral 8x7B"):
24
+
25
+ client = client_fn(model)
26
+
27
+ generate_kwargs = dict(
28
+ max_new_tokens=100,
29
+ do_sample=True,
30
+ )
31
+
32
+ formatted_prompt = system_instructions1 + text + "[ANSWER]"
33
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
34
+ output = ""
35
+ for response in stream:
36
+ output+=response.token.text
37
+ if output.endswith("<|assistant|>"):
38
+ output = output[:-13]
39
+ elif output.endswith("</s>"):
40
+ output = output[:-4]
41
+ return output
42
+
43
+ description="""# Chat GO
44
+ ### Inspired from Google Go"""
45
+
46
+ demo = gr.Interface(description=description,fn=models, inputs=["text", gr.Dropdown([ 'Mixtral 8x7B','Nous Hermes Mixtral 8x7B DPO','StarChat2 15b','Mistral 7B v0.3','Phi 3 mini', ], value="Mistral 7B v0.3", label="Select Model") ], outputs="text", live=True, batch=True, max_batch_size=10000)
47
+ demo.queue(max_size=300000)
48
+ demo.launch()
49
+
50
+
51
  # Function to generate model responses
52
  def models(text, model="Mixtral 8x7B"):
53
  client = client_fn(model)