Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -81,11 +81,13 @@ YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
|
|
81 |
MODEL_NAME ="gpt-4"
|
82 |
|
83 |
#HuggingFace
|
84 |
-
repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
85 |
#repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
86 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
87 |
#repo_id = "tiiuae/falcon-180B-chat"
|
88 |
#repo_id = "Vicuna-33b"
|
|
|
|
|
89 |
|
90 |
|
91 |
################################################
|
@@ -288,7 +290,7 @@ def invoke (prompt, history, rag_option, openai_api_key, temperature=0.9, max_n
|
|
288 |
#Anfrage an OpenAI
|
289 |
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
290 |
#oder an Hugging Face
|
291 |
-
|
292 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
293 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
294 |
|
|
|
81 |
MODEL_NAME ="gpt-4"
|
82 |
|
83 |
#HuggingFace
|
84 |
+
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
85 |
#repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
86 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
87 |
#repo_id = "tiiuae/falcon-180B-chat"
|
88 |
#repo_id = "Vicuna-33b"
|
89 |
+
repo_id = "alexkueck/ChatBotLI2Klein"
|
90 |
+
#repo_id ="mistralai/Mistral-7B-v0.1"
|
91 |
|
92 |
|
93 |
################################################
|
|
|
290 |
#Anfrage an OpenAI
|
291 |
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
292 |
#oder an Hugging Face
|
293 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 64})
|
294 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
295 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
296 |
|