Spaces:
Running
Running
File size: 23,313 Bytes
09eaef4 47e0125 41ec323 09eaef4 c58e706 09eaef4 41ec323 09eaef4 41ec323 47e0125 09eaef4 beaf90e 41ec323 47e0125 41ec323 09eaef4 41ec323 d41ee7d 09eaef4 41ec323 09eaef4 41ec323 09eaef4 41ec323 09eaef4 41ec323 09eaef4 2d68b9c d423a74 2d68b9c 09eaef4 41ec323 09eaef4 41ec323 09eaef4 41ec323 47e0125 09eaef4 47e0125 beaf90e 47e0125 7b16373 beaf90e 47e0125 7b16373 47e0125 7b16373 47e0125 7b16373 47e0125 41ec323 beaf90e 41ec323 beaf90e 012a610 09eaef4 7b16373 012a610 c58e706 7b16373 012a610 c58e706 012a610 c58e706 7b16373 012a610 09eaef4 012a610 7b16373 012a610 7b16373 09eaef4 7b16373 09eaef4 c58e706 09eaef4 7b16373 09eaef4 beaf90e b62865b beaf90e 09eaef4 012a610 09eaef4 beaf90e 41ec323 09eaef4 41ec323 09eaef4 41ec323 09eaef4 beaf90e 09eaef4 beaf90e 09eaef4 beaf90e 09eaef4 beaf90e 09eaef4 41ec323 09eaef4 41ec323 09eaef4 41ec323 09eaef4 ed9fa5b 09eaef4 59cb099 09eaef4 da429cd 09eaef4 b89481f beaf90e 09eaef4 beaf90e e2cc3e5 47e0125 34b1fd2 47e0125 da429cd 47e0125 da429cd 47e0125 beaf90e 47e0125 59cb099 beaf90e 47e0125 beaf90e 47e0125 beaf90e 47e0125 da429cd 47e0125 09eaef4 47e0125 da429cd 47e0125 beaf90e 47e0125 59cb099 beaf90e 47e0125 09eaef4 beaf90e 09eaef4 beaf90e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 |
#########################################################################################
# Title: German AI-Interface with advanced RAG
# Author: Andreas Fischer
# Date: January 31st, 2023
# Last update: May 27th, 2024
##########################################################################################
#https://github.com/abetlen/llama-cpp-python/issues/306
#sudo apt install libclblast-dev
#CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 pip install llama-cpp-python --force-reinstall --upgrade --no-cache-dir -v
# Prepare resources
#-------------------
import torch
import gc
torch.cuda.empty_cache()
gc.collect()
import os
from datetime import datetime
global filename
filename=f"./{datetime.now().strftime('%Y%m%d')}_history.json" # where to store the history as json-file
if(os.path.exists(filename)==True): os.remove(filename)
# Chroma-DB
#-----------
import os
import chromadb
dbPath = "/home/af/Schreibtisch/Code/gradio/Chroma/db"
onPrem = True if(os.path.exists(dbPath)) else False
if(onPrem==False): dbPath="/home/user/app/db"
#onPrem=True # uncomment to override automatic detection
print(dbPath)
#client = chromadb.Client()
path=dbPath
client = chromadb.PersistentClient(path=path)
print(client.heartbeat())
print(client.get_version())
print(client.list_collections())
from chromadb.utils import embedding_functions
default_ef = embedding_functions.DefaultEmbeddingFunction()
#sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="T-Systems-onsite/cross-en-de-roberta-sentence-transformer")
#instructor_ef = embedding_functions.InstructorEmbeddingFunction(model_name="hkunlp/instructor-large", device="cuda")
embeddingModel = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="T-Systems-onsite/cross-en-de-roberta-sentence-transformer", device="cuda" if(onPrem) else "cpu")
print(str(client.list_collections()))
global collection
dbName="myDB"
if("name="+dbName in str(client.list_collections())): client.delete_collection(name=dbName)
if("name="+dbName in str(client.list_collections())):
print(dbName+" found!")
collection = client.get_collection(name=dbName, embedding_function=embeddingModel )
else:
print(dbName+" created!")
collection = client.create_collection(
dbName,
embedding_function=embeddingModel,
metadata={"hnsw:space": "cosine"})
# txts0: Intentions
#------------------
txts0=[
"Ich suche ein KI-Programm mit bestimmten Fähigkeiten.", # 1a
#"Ich suche kein KI-Programm mit bestimmten Fähigkeiten.", # !1a
"Ich habe ein KI-Programm und habe Fragen zur Benutzung.", # !1a (besser, um 1a und 1b abzugrenzen)
"Ich habe ein KI-Programm und habe Fragen zur Benutzung.", # 1b
#"Ich habe kein KI-Programm und habe keine Fragen zur Benutzung.", # !1b
"Ich habe eine allgemeine Frage ohne KI-Bezug." # !1b (greift besser bei Alltagsfragen)
]
# txts1a: RAG-Infos for first intention:
#---------------------------------------
txts1a=[
"Text generating AI model mistralai/Mixtral-8x7B-Instruct-v0.1: Suitable for text generation, e.g., social media content, marketing copy, blog posts, short stories, etc.",
"Image generating AI model stabilityai/sdxl-turbo: Suitable for image generation, e.g., illustrations, graphics, AI art, etc.",
"Audio transcribing AI model openai/whisper-large-v3: Suitable for audio-transcription in different languages",
"Speech synthesizing AI model coqui/XTTS-v2: Suitable for generating audio from text and for voice-cloning",
"Code generating AI model deepseek-ai/deepseek-coder-6.7b-instruct: Suitable for programming in Python, JavaScript, PHP, Bash and many other programming languages.",
"Translation AI model Helsinki-NLP/opus-mt: Suitable for translating text, e.g., from English to German or vice versa",
"Search result-integrating AI model phind/phind-v9-model: Suitable for researching current topics and for obtaining precise and up-to-date answers to questions based on web search results"
]
# txts1b: RAG-Infos for second intention
#----------------------------------------
txts1b=[
"Für Fragen zur Umsetzung von KI-Verfahren ist das KI-basierte Assistenzsystem nicht geeignet. Möglicherweise empfiehlt sich ein KI-Modell mit Internetzugriff, wie beispielsweise phind.com, oder das Kontaktieren eines Experten wie Dr. Andreas Fischer ([email protected])."
]
#meta=[{"type":"0", "type2":"0","source":"AF"}]*len(txts0)+[{"type":"1a","type2":"0","source":"AF"}]*len(txts1a)+[{"type":"1b","type2":"0","source":"AF"}]*len(txts1b)
meta = []
for _ in range(len(txts0)):
meta.append({"type":"0", "type2":"0","source":"AF"})
for _ in range(len(txts1a)):
meta.append({"type":"1a","type2":"0","source":"AF"})
for _ in range(len(txts1b)):
meta.append({"type":"1b","type2":"0","source":"AF"})
#Change type2 for txt0-entries
#-----------------------------
meta[0]["type2"]="1a" # RAG mit txts1a
meta[1]["type2"]="!1a" # else
meta[2]["type2"]="1b" # RAG mit txts1b
meta[3]["type2"]="!1b" # else
txts=txts0+txts1a+txts1b
collection.add(
documents=txts,
ids=[str(i) for i in list(range(len(txts)))],
metadatas=meta
)
# Add entry to episodic memory
x=collection.get(include=[])["ids"]
if(True): #len(x)==0):
message="Ich bin der User."
response="Hallo User, wie kann ich dienen?"
x=collection.get(include=[])["ids"]
collection.add(
documents=[message,response],
metadatas=[
{"source": "ICH", "dialog": f"ICH: {message}\nDU: {response}", "type":"episode"},
{"source": "DU", "dialog": f"ICH: {message}\nDU: {response}", "type":"episode"}
],
ids=[str(len(x)+1),str(len(x)+2)]
)
RAGResults=collection.query(
query_texts=[message],
n_results=1,
#where={"source": "USER"}
)
RAGResults["metadatas"][0][0]["dialog"]
x=collection.get(include=[])["ids"]
x
collection.get() # Inspect db-entries
print("Database ready!")
print(collection.count())
rag0=collection.query(
query_texts=[message],
n_results=4,
where={"type": "0"}
)
x=rag0["metadatas"][0][0]["type2"]
x=[x["type2"] for x in rag0["metadatas"][0]]
x.index("1c") if "1c" in x else len(x)+1
# Model
#-------
#onPrem=False
if(onPrem==False):
modelPath="mistralai/Mixtral-8x7B-Instruct-v0.1"
from huggingface_hub import InferenceClient
import gradio as gr
client = InferenceClient(
modelPath
#"mistralai/Mixtral-8x7B-Instruct-v0.1"
#"mistralai/Mistral-7B-Instruct-v0.1"
)
else:
import os
import requests
import subprocess
#modelPath="/home/af/gguf/models/Discolm_german_7b_v1.Q4_0.gguf"
modelPath="/home/af/gguf/models/Mixtral-8x7b-instruct-v0.1.Q4_0.gguf"
if(os.path.exists(modelPath)==False):
#url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
response = requests.get(url)
with open("./Mixtral-8x7b-instruct.gguf", mode="wb") as file:
file.write(response.content)
print("Model downloaded")
modelPath="./Mixtral-8x7b-instruct.gguf"
print(modelPath)
n="20"
if("Mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
subprocess.Popen(command)
print("Server ready!")
#import llama_cpp
#llama_cpp.llama_backend_init(numa=False)
#params=llama_cpp.llama_context_default_params()
#params.n_ctx
# Gradio-GUI
#------------
import re
def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4, removeHTML=True):
startOfString=""
if zeichenlimit is None: zeichenlimit=1000000000 # :-)
template0=" [INST]{system}\n [/INST] </s>"
template1=" [INST] {message} [/INST]"
template2=" {response}</s>"
if("command-r" in modelPath): #https://huggingface.co/CohereForAI/c4ai-command-r-v01
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
template0="<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|> {system}<|END_OF_TURN_TOKEN|>"
template1="<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{message}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
template2="{response}<|END_OF_TURN_TOKEN|>"
if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
template0="<start_of_turn>user{system}</end_of_turn>"
template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
template2="{response}</end_of_turn>"
if("Mixtral-8x22B-Instruct" in modelPath): # AutoTokenizer: <s>[INST] U1[/INST] A1</s>[INST] U2[/INST] A2</s>
startOfString="<s>"
template0="[INST]{system}\n [/INST] </s>"
template1="[INST] {message}[/INST]"
template2=" {response}</s>"
if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
startOfString="<s>" # AutoTokenzizer: <s> [INST] U1 [/INST]A1</s> [INST] U2 [/INST]A2</s>
template0=" [INST]{system}\n [/INST] </s>"
template1=" [INST] {message} [/INST]"
template2=" {response}</s>"
if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
startOfString="<s>"
template0="[INST]{system}\n [/INST]</s>"
template1="[INST] {message} [/INST]"
template2=" {response}</s>"
if("Openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
template2="{response}<|end_of_turn|>"
if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
template0="<|im_start|>system\n{system}<|im_end|>\n"
template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
template2="{response}<|im_end|>\n"
if("Llama-3-SauerkrautLM-8b-Instruct" in modelPath): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
template0="<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system}<|eot_id|>"
template1="<|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
template2="{response}<|eot_id|>\n"
if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
template0="{system} " #<s>
template1="USER: {message} ASSISTANT: "
template2="{response}</s>"
if("Phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
template0="Instruct: {system}\nOutput: Okay.\n"
template1="Instruct: {message}\nOutput:"
template2="{response}\n"
prompt = ""
if RAGAddon is not None:
system += RAGAddon
if system is not None:
prompt += template0.format(system=system) #"<s>"
if history is not None:
for user_message, bot_response in history[-historylimit:]:
if user_message is None: user_message = ""
if bot_response is None: bot_response = ""
bot_response = re.sub("\n\n<details(| open)>.*?</details>","", bot_response, flags=re.DOTALL) # remove RAG-compontents
if removeHTML==True: bot_response = re.sub("<(.*?)>","\n", bot_response) # remove HTML-components in general (may cause bugs with markdown-rendering)
if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit])
if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit])
if message is not None: prompt += template1.format(message=message[:zeichenlimit])
if system2 is not None:
prompt += system2
return startOfString+prompt
import gradio as gr
import requests
import json
from datetime import datetime
import os
import re
def response(message, history):
settings="Memory Off"
removeHTML=True
# Preprocessing to revent simple forms of prompt injection:
#----------------------------------------------------------
message=message.replace("[INST]","")
message=message.replace("[/INST]","")
message=re.sub("<[|](im_start|im_end|end_of_turn)[|]>", '', message)
# Load Memory if memory is turned on
#-------------------------------------
if (settings=="Memory On"):
if((len(history)==0)&(os.path.isfile(filename))): history=json.load(open(filename,'r',encoding="utf-8")) # retrieve history (if available)
system="Du bist ein deutschsprachiges wortkarges KI-basiertes Assistenzsystem. Antworte kurz, in deutsche Sprache und verzichte auf HTML und Code jeder Art."
#RAG-layer 0: Intention-RAG
#---------------------------
typeResults=collection.query(
query_texts=[message],
n_results=4,
where={"type": "0"}
)
myType=typeResults["metadatas"][0][0]["type2"] # einfachste Variante
x=[x["type2"] for x in typeResults["metadatas"][0]] # liste die type2-Einträge auf
myType="1a" if ((x.index("1a") if "1a" in x else len(x)+1) < (x.index("!1a") if "!1a" in x else len(x)+1)) else "else" # setze 1a wenn es besser passt als !1a
if ((x.index("1b") if "1b" in x else len(x)+1) < (x.index("1a") if "1a" in x else len(x)+1)): # prüfe 1b wenn 1b besser passt als 1a
if ((x.index("1b") if "1b" in x else len(x)+1) < (x.index("!1b") if "!1b" in x else len(x)+1)): myType="1b" # setze 1b wenn besser als !1b (sonst lass 1a/else)
print("Message:"+message+"\n\nIntention-Type: "+myType+"\n\n"+str(typeResults))
#RAG-layer 1: Respond with CustomDB-RAG (1a, 1b) or Memory-RAG
#--------------------------------------------------------------
rag=None
historylimit=4
combination=None
## RAG 1a: Respond with CustomDB-RAG
#-----------------------------------
if(myType=="1a"):
RAGResults=collection.query(
query_texts=[message],
n_results=2,
where={"type": myType}
#where_document={"$contains":"search_string"}
)
dists=["<br><small>(relevance: "+str(round((1-d)*100)/100)+";" for d in RAGResults['distances'][0]]
sources=["source: "+s["source"]+")</small>" for s in RAGResults['metadatas'][0]]
texts=RAGResults['documents'][0]
combination = zip(texts,dists,sources)
combination = [' '.join(triplets) for triplets in combination]
#print(combination)
rag="\n\n"
rag += "Mit Blick auf die aktuelle Äußerung des Users empfiehlst du AUSSCHLIEßLICH folgende KI-Verfahren aus unserer Datenbank:\n"
rag += str(texts)
rag += "\n\nIm Folgenden siehst du den jüngsten Dialog-Verlauf:"
else:
## RAG 1a: Respond with CustomDB-RAG
#-----------------------------------
if(myType=="1b"):
RAGResults=collection.query(
query_texts=[message],
n_results=2,
where={"type": myType}
#where_document={"$contains":"search_string"}
)
dists=["<br><small>(relevance: "+str(round((1-d)*100)/100)+";" for d in RAGResults['distances'][0]]
sources=["source: "+s["source"]+")</small>" for s in RAGResults['metadatas'][0]]
texts=RAGResults['documents'][0]
combination = zip(texts,dists,sources)
combination = [' '.join(triplets) for triplets in combination]
#print(combination)
rag="\n\n"
rag += "Beziehe dich in deiner Fortsetzung des Dialogs AUSSCHLIEßLICH auf die folgenden Informationen und gebe keine weiteren Informationen heraus:\n"
rag += str(texts)
rag += "\n\nIm Folgenden siehst du den jüngsten Dialog-Verlauf:"
## Else: Respond with Memory-RAG
#--------------------------------
else:
x=collection.get(include=[])["ids"]
if(len(x)>(historylimit*2)): # turn on RAG when the database contains entries that are not shown within historylimit
RAGResults=collection.query(
query_texts=[message],
n_results=1,
where={"type": "episode"}
)
texts=RAGResults["metadatas"][0][0]["dialog"] #str()
#print("Message: "+message+"\n\nBest Match: "+texts)
rag="\n\n"
rag += "Mit Blick auf die aktuelle Äußerung des Users erinnerst du dich insb. an folgende Episode aus eurem Dialog:\n"
rag += str(texts)
rag += "\n\nIm Folgenden siehst du den jüngsten Dialog-Verlauf:"
# Request Response from LLM:
system2=None # system2 can be used as fictive first words of the AI, which are not displayed or stored
#print("RAG: "+rag)
#print("System: "+system+"\n\nMessage: "+message)
prompt=extend_prompt(
message, # current message of the user
history, # complete history
system, # system prompt
rag, # RAG-component added to the system prompt
system2, # fictive first words of the AI (neither displayed nor stored)
historylimit=historylimit,# number of past messages to consider for response to current message
removeHTML=removeHTML # remove HTML-components from History (to prevent bugs with Markdown)
)
#print("\n\nMESSAGE:"+str(message))
#print("\n\nHISTORY:"+str(history))
#print("\n\nSYSTEM:"+str(system))
#print("\n\nRAG:"+str(rag))
#print("\n\nSYSTEM2:"+str(system2))
print("\n\n*** Prompt:\n"+prompt+"\n***\n\n")
## Request response from model
#------------------------------
print("AI running on prem!" if(onPrem) else "AI running HFHub!")
if(onPrem==False):
temperature=float(0.9)
max_new_tokens=500
top_p=0.95
repetition_penalty=1.0
if temperature < 1e-2: temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
response = ""
#print("User: "+message+"\nAI: ")
for text in stream:
part=text.token.text
#print(part, end="", flush=True)
response += part
if removeHTML==True: response = re.sub("<(.*?)>","\n", response) # remove HTML-components in general (may cause bugs with markdown-rendering)
yield response
if((myType=="1a")): #add RAG-results to chat-output if appropriate
response=response+"\n\n<details><summary><strong>Sources</strong></summary><br><ul>"+ "".join(["<li>" + s + "</li>" for s in combination])+"</ul></details>"
yield response
history.append((message, response)) # add current dialog to history
# Store current state in DB if memory is turned on
if (settings=="Memory On"):
x=collection.get(include=[])["ids"] # add current dialog to db
collection.add(
documents=[message,response],
metadatas=[
{ "source": "ICH", "dialog": f"ICH: {message.strip()}\n DU: {response.strip()}", "type":"episode"},
{ "source": "DU", "dialog": f"ICH: {message.strip()}\n DU: {response.strip()}", "type":"episode"}
],
ids=[str(len(x)+1),str(len(x)+2)]
)
json.dump(history,open(filename,'w',encoding="utf-8"),ensure_ascii=False)
if(onPrem==True):
# url="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
url="http://0.0.0.0:2600/v1/completions"
body={"prompt":prompt,"max_tokens":None, "echo":"False","stream":"True"} # e.g. Mixtral-Instruct
if("Discolm_german_7b" in modelPath): body.update({"stop": ["<|im_end|>"]}) # fix stop-token of DiscoLM
if("Gemma-" in modelPath): body.update({"stop": ["<|im_end|>","</end_of_turn>"]}) # fix stop-token of Gemma
response="" #+"("+myType+")\n"
buffer=""
#print("URL: "+url)
#print("User: "+message+"\nAI: ")
for text in requests.post(url, json=body, stream=True): #-H 'accept: application/json' -H 'Content-Type: application/json'
if buffer is None: buffer=""
buffer=str("".join(buffer))
# print("*** Raw String: "+str(text)+"\n***\n")
text=text.decode('utf-8')
if((text.startswith(": ping -")==False) & (len(text.strip("\n\r"))>0)): buffer=buffer+str(text)
# print("\n*** Buffer: "+str(buffer)+"\n***\n")
buffer=buffer.split('"finish_reason": null}]}')
if(len(buffer)==1):
buffer="".join(buffer)
pass
if(len(buffer)==2):
part=buffer[0]+'"finish_reason": null}]}'
if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
try:
part = str(json.loads(part)["choices"][0]["text"])
#print(part, end="", flush=True)
response=response+part
buffer="" # reset buffer
except Exception as e:
print("Exception:"+str(e))
pass
if removeHTML==True: response = re.sub("<(.*?)>","\n", response) # remove HTML-components in general (may cause bugs with markdown-rendering)
yield response
if((myType=="1a")): #add RAG-results to chat-output if appropriate
response=response+"\n\n<details><summary><strong>Sources</strong></summary><br><ul>"+ "".join(["<li>" + s + "</li>" for s in combination])+"</ul></details>"
yield response
# Store current state in DB if memory is turned on
if (settings=="Memory On"):
x=collection.get(include=[])["ids"] # add current dialog to db
collection.add(
documents=[message,response],
metadatas=[
{ "source": "ICH", "dialog": f"ICH: {message.strip()}\n DU: {response.strip()}", "type":"episode"},
{ "source": "DU", "dialog": f"ICH: {message.strip()}\n DU: {response.strip()}", "type":"episode"}
],
ids=[str(len(x)+1),str(len(x)+2)]
)
json.dump(history,open(filename,'w',encoding="utf-8"),ensure_ascii=False)
gr.ChatInterface(
response,
chatbot=gr.Chatbot(value=[[None,"Herzlich willkommen! Ich bin ein KI-basiertes Assistenzsystem, das für jede Anfrage die am besten geeigneten KI-Tools empfiehlt.\nAktuell bin ich wenig mehr als eine Tech-Demo und kenne nur 7 KI-Modelle - also sei bitte nicht zu streng mit mir.<ul><li>Wenn du ein KI-Modell suchst, antworte ich auf Basis der Liste</li><li>Wenn du Fragen zur Benutzung eines KI-Modells hast, verweise ich an andere Stellen</li><li>Wenn du andre Fragen hast, antworte ich frei und berücksichtige dabei Relevantes aus dem gesamten bisherigen Dialog.</li></ul>\nWas ist dein Anliegen?"]],render_markdown=True),
title="German AI-Interface with advanced RAG (on prem)" if onPrem else "German AI-Interface with advanced RAG (HFHub)",
#additional_inputs=[gr.Dropdown(["Memory On","Memory Off"],value="Memory Off",label="Memory")]
).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
print("Interface up and running!")
|