Update app.py
Browse files
app.py
CHANGED
@@ -424,6 +424,7 @@ def generate_text_zu_doc(file, prompt, k, rag_option, chatbot, history, db):
|
|
424 |
#mit oder ohne RAG möglich
|
425 |
def generate_text (prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3, top_k=35):
|
426 |
global splittet
|
|
|
427 |
suche_im_Netz="Antwort der KI ..."
|
428 |
print("Text pur..............................")
|
429 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
@@ -465,6 +466,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
465 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
466 |
#llm via HuggingChat
|
467 |
llm = hugchat.ChatBot(cookies=cookies.get_dict())
|
|
|
468 |
|
469 |
print("HF")
|
470 |
#Prompt an history anhängen und einen Text daraus machen
|
@@ -473,16 +475,11 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
473 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
474 |
if (rag_option == "An"):
|
475 |
print("LLM aufrufen mit RAG: ...........")
|
476 |
-
result = rag_chain(llm, history_text_und_prompt, db)
|
477 |
-
#elif (rag_option == "MongoDB"):
|
478 |
-
#splits = document_loading_splitting()
|
479 |
-
#document_storage_mongodb(splits)
|
480 |
-
#db = document_retrieval_mongodb(llm, history_text_und_prompt)
|
481 |
-
#result = rag_chain(llm, history_text_und_prompt, db)
|
482 |
else:
|
483 |
#splittet = False
|
484 |
print("LLM aufrufen ohne RAG: ...........")
|
485 |
-
resulti = llm_chain(llm, history_text_und_prompt)
|
486 |
result = resulti.strip()
|
487 |
"""
|
488 |
#Alternativ mit API_URL - aber das model braucht 93 B Space!!!
|
|
|
424 |
#mit oder ohne RAG möglich
|
425 |
def generate_text (prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3, top_k=35):
|
426 |
global splittet
|
427 |
+
hugchat=False
|
428 |
suche_im_Netz="Antwort der KI ..."
|
429 |
print("Text pur..............................")
|
430 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
|
|
466 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
467 |
#llm via HuggingChat
|
468 |
llm = hugchat.ChatBot(cookies=cookies.get_dict())
|
469 |
+
hugchat=True #da dieses Model in llm_chain bzw reag_chain anderes verarbeitet wird
|
470 |
|
471 |
print("HF")
|
472 |
#Prompt an history anhängen und einen Text daraus machen
|
|
|
475 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
476 |
if (rag_option == "An"):
|
477 |
print("LLM aufrufen mit RAG: ...........")
|
478 |
+
result = rag_chain(llm, history_text_und_prompt, db, hugchat)
|
|
|
|
|
|
|
|
|
|
|
479 |
else:
|
480 |
#splittet = False
|
481 |
print("LLM aufrufen ohne RAG: ...........")
|
482 |
+
resulti = llm_chain(llm, history_text_und_prompt, hugchat)
|
483 |
result = resulti.strip()
|
484 |
"""
|
485 |
#Alternativ mit API_URL - aber das model braucht 93 B Space!!!
|