clementsan
commited on
Commit
•
0ea29a9
1
Parent(s):
fa7cc51
Raise error for mpt-7b-instruct LLM model
Browse files
app.py
CHANGED
@@ -111,8 +111,8 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
|
|
111 |
top_k = top_k,
|
112 |
load_in_8bit = True,
|
113 |
)
|
114 |
-
elif llm_model
|
115 |
-
raise gr.Error("
|
116 |
llm = HuggingFaceEndpoint(
|
117 |
repo_id=llm_model,
|
118 |
temperature = temperature,
|
|
|
111 |
top_k = top_k,
|
112 |
load_in_8bit = True,
|
113 |
)
|
114 |
+
elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
|
115 |
+
raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
|
116 |
llm = HuggingFaceEndpoint(
|
117 |
repo_id=llm_model,
|
118 |
temperature = temperature,
|