Update app.py
Browse files
app.py
CHANGED
@@ -110,55 +110,100 @@ def conversation(qa_chain, message, history):
|
|
110 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
|
111 |
|
112 |
def upload_file(file_obj):
|
113 |
-
list_file_path = [
|
|
|
|
|
|
|
114 |
return list_file_path
|
115 |
|
116 |
|
117 |
-
def
|
118 |
with gr.Blocks(theme="base") as demo:
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
with
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
with gr.Row():
|
129 |
-
|
130 |
with gr.Row():
|
131 |
-
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
with gr.Row():
|
134 |
-
|
135 |
-
|
136 |
-
# Tab 2: QA Chain Initialization
|
137 |
-
with gr.Tab("Step 2 - QA Chain Initialization"):
|
138 |
-
with gr.Row():
|
139 |
-
llm_selection = gr.Radio(list_llm_simple, label="Choose LLM Model", value=list_llm_simple[0])
|
140 |
with gr.Row():
|
141 |
-
|
142 |
-
max_tokens = gr.Slider(minimum=64, maximum=1024, value=256, step=64, label="Max Tokens", interactive=True)
|
143 |
-
top_k = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Top K", interactive=True)
|
144 |
with gr.Row():
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
with gr.
|
149 |
-
|
|
|
|
|
|
|
|
|
150 |
with gr.Row():
|
151 |
-
|
|
|
152 |
with gr.Row():
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
-
return demo
|
162 |
|
163 |
if __name__ == "__main__":
|
164 |
-
|
|
|
110 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
|
111 |
|
112 |
def upload_file(file_obj):
|
113 |
+
list_file_path = []
|
114 |
+
for idx, file in enumerate(file_obj):
|
115 |
+
file_path = file_obj.name
|
116 |
+
list_file_path.append(file_path)
|
117 |
return list_file_path
|
118 |
|
119 |
|
120 |
+
def demo():
|
121 |
with gr.Blocks(theme="base") as demo:
|
122 |
+
vector_db = gr.State()
|
123 |
+
qa_chain = gr.State()
|
124 |
+
collection_name = gr.State()
|
125 |
+
|
126 |
+
gr.Markdown(
|
127 |
+
"""<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
|
128 |
+
<h3>Ask any questions about your PDF documents, along with follow-ups</h3>
|
129 |
+
<b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
|
130 |
+
When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
|
131 |
+
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
|
132 |
+
""")
|
133 |
+
with gr.Tab("Step 1 - Document pre-processing"):
|
134 |
+
with gr.Row():
|
135 |
+
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
136 |
+
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
137 |
+
with gr.Row():
|
138 |
+
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
|
139 |
+
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
140 |
with gr.Row():
|
141 |
+
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
|
142 |
with gr.Row():
|
143 |
+
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
|
144 |
+
with gr.Row():
|
145 |
+
db_progress = gr.Textbox(label="Vector database initialization", value="None")
|
146 |
+
with gr.Row():
|
147 |
+
db_btn = gr.Button("Generate vector database...")
|
148 |
+
|
149 |
+
with gr.Tab("Step 2 - QA chain initialization"):
|
150 |
+
with gr.Row():
|
151 |
+
llm_btn = gr.Radio(list_llm_simple, \
|
152 |
+
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
153 |
+
with gr.Accordion("Advanced options - LLM model", open=False):
|
154 |
with gr.Row():
|
155 |
+
slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
|
|
|
|
|
|
|
|
|
|
156 |
with gr.Row():
|
157 |
+
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
|
|
|
|
|
158 |
with gr.Row():
|
159 |
+
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
|
160 |
+
with gr.Row():
|
161 |
+
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
|
162 |
+
with gr.Row():
|
163 |
+
qachain_btn = gr.Button("Initialize question-answering chain...")
|
164 |
+
|
165 |
+
with gr.Tab("Step 3 - Conversation with chatbot"):
|
166 |
+
chatbot = gr.Chatbot(height=300)
|
167 |
+
with gr.Accordion("Advanced - Document references", open=False):
|
168 |
with gr.Row():
|
169 |
+
doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
|
170 |
+
source1_page = gr.Number(label="Page", scale=1)
|
171 |
with gr.Row():
|
172 |
+
doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
|
173 |
+
source2_page = gr.Number(label="Page", scale=1)
|
174 |
+
with gr.Row():
|
175 |
+
msg = gr.Textbox(placeholder="Type message", container=True)
|
176 |
+
with gr.Row():
|
177 |
+
submit_btn = gr.Button("Submit")
|
178 |
+
clear_btn = gr.ClearButton([msg, chatbot])
|
179 |
+
|
180 |
+
# Preprocessing events
|
181 |
+
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
182 |
+
db_btn.click(initialize_database, \
|
183 |
+
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
184 |
+
outputs=[vector_db, collection_name, db_progress])
|
185 |
+
qachain_btn.click(initialize_LLM, \
|
186 |
+
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
187 |
+
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
|
188 |
+
inputs=None, \
|
189 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
190 |
+
queue=False)
|
191 |
+
|
192 |
+
# Chatbot events
|
193 |
+
msg.submit(conversation, \
|
194 |
+
inputs=[qa_chain, msg, chatbot], \
|
195 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
196 |
+
queue=False)
|
197 |
+
submit_btn.click(conversation, \
|
198 |
+
inputs=[qa_chain, msg, chatbot], \
|
199 |
+
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
200 |
+
queue=False)
|
201 |
+
clear_btn.click(lambda:[None,"",0,"",0], \
|
202 |
+
inputs=None, \
|
203 |
+
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
|
204 |
+
queue=False)
|
205 |
+
demo.queue().launch(debug=True)
|
206 |
|
|
|
207 |
|
208 |
if __name__ == "__main__":
|
209 |
+
demo()
|