clementsan
commited on
Commit
•
b1ec9ac
1
Parent(s):
d2a45d8
Add new LLM Mistral-7B-Instruct-v0.2, and re-order LLM list
Browse files
app.py
CHANGED
@@ -19,13 +19,15 @@ import accelerate
|
|
19 |
|
20 |
|
21 |
default_persist_directory = './chroma_HF/'
|
22 |
-
default_llm_name1 = "tiiuae/falcon-7b-instruct"
|
23 |
-
default_llm_name2 = "google/flan-t5-xxl"
|
24 |
-
default_llm_name3 = "mosaicml/mpt-7b-instruct"
|
25 |
-
default_llm_name4 = "meta-llama/Llama-2-7b-chat-hf"
|
26 |
-
default_llm_name5 = "mistralai/Mistral-7B-Instruct-v0.1"
|
27 |
-
list_llm = [default_llm_name1, default_llm_name2, default_llm_name3, default_llm_name4, default_llm_name5]
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Load PDF document and create doc splits
|
31 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
@@ -203,8 +205,8 @@ def demo():
|
|
203 |
|
204 |
with gr.Tab("Step 2 - Initializing QA chain"):
|
205 |
with gr.Row():
|
206 |
-
llm_btn = gr.Radio(
|
207 |
-
label="LLM", value =
|
208 |
with gr.Accordion("Advanced options - LLM", open=False):
|
209 |
slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
210 |
slider_maxtokens = gr.Slider(minimum = 256, maximum = 4096, value=1024, step=24, label="Max Tokens", info="Model max tokens", interactive=True)
|
|
|
19 |
|
20 |
|
21 |
default_persist_directory = './chroma_HF/'
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
llm_name1 = "mistralai/Mistral-7B-Instruct-v0.2"
|
24 |
+
llm_name2 = "mistralai/Mistral-7B-Instruct-v0.1"
|
25 |
+
llm_name3 = "meta-llama/Llama-2-7b-chat-hf"
|
26 |
+
llm_name4 = "mosaicml/mpt-7b-instruct"
|
27 |
+
llm_name5 = "tiiuae/falcon-7b-instruct"
|
28 |
+
llm_name6 = "google/flan-t5-xxl"
|
29 |
+
list_llm = [llm_name1, llm_name2, llm_name3, llm_name4, llm_name5, llm_name6]
|
30 |
+
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
31 |
|
32 |
# Load PDF document and create doc splits
|
33 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
|
|
205 |
|
206 |
with gr.Tab("Step 2 - Initializing QA chain"):
|
207 |
with gr.Row():
|
208 |
+
llm_btn = gr.Radio(list_llm_simple, \
|
209 |
+
label="LLM", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
210 |
with gr.Accordion("Advanced options - LLM", open=False):
|
211 |
slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
212 |
slider_maxtokens = gr.Slider(minimum = 256, maximum = 4096, value=1024, step=24, label="Max Tokens", info="Model max tokens", interactive=True)
|