Spaces:
Running
Running
clementsan
commited on
Commit
•
51d2a09
1
Parent(s):
6e8daa8
Update UI with new step
Browse files
app.py
CHANGED
@@ -285,16 +285,21 @@ def demo():
|
|
285 |
collection_name = gr.State()
|
286 |
|
287 |
gr.Markdown(
|
288 |
-
"""<center><h2>PDF-based chatbot
|
289 |
-
<h3>Ask any questions about your PDF documents, along with follow-ups</h3>
|
290 |
-
|
291 |
-
|
292 |
-
|
|
|
|
|
293 |
""")
|
294 |
-
|
|
|
295 |
with gr.Row():
|
296 |
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
297 |
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
|
|
|
|
298 |
with gr.Row():
|
299 |
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
|
300 |
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
@@ -305,9 +310,9 @@ def demo():
|
|
305 |
with gr.Row():
|
306 |
db_progress = gr.Textbox(label="Vector database initialization", value="None")
|
307 |
with gr.Row():
|
308 |
-
db_btn = gr.Button("Generate vector database
|
309 |
|
310 |
-
with gr.Tab("Step
|
311 |
with gr.Row():
|
312 |
llm_btn = gr.Radio(list_llm_simple, \
|
313 |
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
@@ -321,9 +326,9 @@ def demo():
|
|
321 |
with gr.Row():
|
322 |
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
|
323 |
with gr.Row():
|
324 |
-
qachain_btn = gr.Button("Initialize
|
325 |
|
326 |
-
with gr.Tab("Step
|
327 |
chatbot = gr.Chatbot(height=300)
|
328 |
with gr.Accordion("Advanced - Document references", open=False):
|
329 |
with gr.Row():
|
@@ -336,10 +341,10 @@ def demo():
|
|
336 |
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
|
337 |
source3_page = gr.Number(label="Page", scale=1)
|
338 |
with gr.Row():
|
339 |
-
msg = gr.Textbox(placeholder="Type message", container=True)
|
340 |
with gr.Row():
|
341 |
-
submit_btn = gr.Button("Submit")
|
342 |
-
clear_btn = gr.ClearButton([msg, chatbot])
|
343 |
|
344 |
# Preprocessing events
|
345 |
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
|
|
285 |
collection_name = gr.State()
|
286 |
|
287 |
gr.Markdown(
|
288 |
+
"""<center><h2>PDF-based chatbot</center></h2>
|
289 |
+
<h3>Ask any questions about your PDF documents, along with follow-ups</h3>""")
|
290 |
+
gr.Markdown(
|
291 |
+
"""<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
|
292 |
+
The user interface explicitely shows multiple steps to help understand the RAG workflow.
|
293 |
+
This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
|
294 |
+
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
|
295 |
""")
|
296 |
+
|
297 |
+
with gr.Tab("Step 1 - Upload PDF"):
|
298 |
with gr.Row():
|
299 |
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
300 |
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
301 |
+
|
302 |
+
with gr.Tab("Step 2 - Process document"):
|
303 |
with gr.Row():
|
304 |
db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
|
305 |
with gr.Accordion("Advanced options - Document text splitter", open=False):
|
|
|
310 |
with gr.Row():
|
311 |
db_progress = gr.Textbox(label="Vector database initialization", value="None")
|
312 |
with gr.Row():
|
313 |
+
db_btn = gr.Button("Generate vector database")
|
314 |
|
315 |
+
with gr.Tab("Step 3 - Initialize QA chain"):
|
316 |
with gr.Row():
|
317 |
llm_btn = gr.Radio(list_llm_simple, \
|
318 |
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
|
|
326 |
with gr.Row():
|
327 |
llm_progress = gr.Textbox(value="None",label="QA chain initialization")
|
328 |
with gr.Row():
|
329 |
+
qachain_btn = gr.Button("Initialize Question Answering chain")
|
330 |
|
331 |
+
with gr.Tab("Step 4 - Chatbot"):
|
332 |
chatbot = gr.Chatbot(height=300)
|
333 |
with gr.Accordion("Advanced - Document references", open=False):
|
334 |
with gr.Row():
|
|
|
341 |
doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
|
342 |
source3_page = gr.Number(label="Page", scale=1)
|
343 |
with gr.Row():
|
344 |
+
msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True)
|
345 |
with gr.Row():
|
346 |
+
submit_btn = gr.Button("Submit message")
|
347 |
+
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
|
348 |
|
349 |
# Preprocessing events
|
350 |
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|