Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -126,17 +126,15 @@ def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, time
|
|
126 |
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
127 |
|
128 |
def model(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p):
|
129 |
-
|
|
|
130 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
131 |
web_results = search(message)
|
132 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
133 |
formatted_prompt = system_instructions1 + message + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
134 |
stream = client.text_generation(formatted_prompt, max_new_tokens=max_tokens, stream=True, temperature=temperature, details=True, return_full_text=False, top_p=top_p)
|
135 |
-
response = ""
|
136 |
token = "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
137 |
-
|
138 |
-
response += token
|
139 |
-
yield response
|
140 |
|
141 |
examples = [
|
142 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
|
|
126 |
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
127 |
|
128 |
def model(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p):
|
129 |
+
|
130 |
+
system_instructions1 = "<s>[SYSTEM]"+system_message+"And your name is Chatchat.Answer as Real OpenGPT 4o, Made by 'peterpeter8585', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses. The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant.the instructions: [USER]"
|
131 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
132 |
web_results = search(message)
|
133 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
134 |
formatted_prompt = system_instructions1 + message + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
135 |
stream = client.text_generation(formatted_prompt, max_new_tokens=max_tokens, stream=True, temperature=temperature, details=True, return_full_text=False, top_p=top_p)
|
|
|
136 |
token = "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
137 |
+
yield token
|
|
|
|
|
138 |
|
139 |
examples = [
|
140 |
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|