Update app.py
Browse files
app.py
CHANGED
@@ -140,6 +140,11 @@ def clear_all(history):
|
|
140 |
# Den "content" auswählen, da dort die Antwort der Ki enthalten ist
|
141 |
result = data['choices'][0]['message']['content']
|
142 |
|
|
|
|
|
|
|
|
|
|
|
143 |
#chat hinzufügen zu den chatverläufen und in GUI anzeigen
|
144 |
id_neu = str(len(chats)+1) + "_" + result
|
145 |
#chats ist ein dictionary
|
@@ -438,7 +443,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
438 |
else:
|
439 |
#oder an Hugging Face --------------------------
|
440 |
print("HF Anfrage.......................")
|
441 |
-
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length":
|
442 |
#llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
|
443 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
444 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
|
|
140 |
# Den "content" auswählen, da dort die Antwort der Ki enthalten ist
|
141 |
result = data['choices'][0]['message']['content']
|
142 |
|
143 |
+
#Alternatives Model umd schlagworte zu finden
|
144 |
+
"""
|
145 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
|
146 |
+
result = process_chatverlauf_hf(prompt,llm)
|
147 |
+
"""
|
148 |
#chat hinzufügen zu den chatverläufen und in GUI anzeigen
|
149 |
id_neu = str(len(chats)+1) + "_" + result
|
150 |
#chats ist ein dictionary
|
|
|
443 |
else:
|
444 |
#oder an Hugging Face --------------------------
|
445 |
print("HF Anfrage.......................")
|
446 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
|
447 |
#llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
|
448 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
449 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|