Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import qdrant_client
|
4 |
+
from llama_index.core import Settings, VectorStoreIndex, StorageContext
|
5 |
+
from llama_index.vector_stores.qdrant import QdrantVectorStore
|
6 |
+
from llama_index.embeddings.fastembed import FastEmbedEmbedding
|
7 |
+
from llama_index.llms.gemini import Gemini
|
8 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
9 |
+
from llama_index.readers.web import FireCrawlWebReader
|
10 |
+
import dotenv
|
11 |
+
import time
|
12 |
+
|
13 |
+
dotenv.load_dotenv()
|
14 |
+
|
15 |
+
# Global variables
|
16 |
+
index = None
|
17 |
+
chat_engine = None
|
18 |
+
collection_name = ""
|
19 |
+
|
20 |
+
def embed_setup():
|
21 |
+
Settings.embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
22 |
+
Settings.llm = Gemini(temperature=0.1, model_name="models/gemini-pro")
|
23 |
+
|
24 |
+
def qdrant_setup():
|
25 |
+
client = qdrant_client.QdrantClient(
|
26 |
+
os.getenv("QDRANT_URL"),
|
27 |
+
api_key=os.getenv("QDRANT_API_KEY"),
|
28 |
+
)
|
29 |
+
return client
|
30 |
+
|
31 |
+
def ingest_documents(url):
|
32 |
+
firecrawl_reader = FireCrawlWebReader(
|
33 |
+
api_key=os.getenv("FIRECRAWL_API_KEY"),
|
34 |
+
mode="scrape",
|
35 |
+
)
|
36 |
+
documents = firecrawl_reader.load_data(url=url)
|
37 |
+
return documents
|
38 |
+
|
39 |
+
def setup_query_engine(url, coll_name):
|
40 |
+
global index, chat_engine, collection_name
|
41 |
+
collection_name = coll_name
|
42 |
+
|
43 |
+
embed_setup()
|
44 |
+
client = qdrant_setup()
|
45 |
+
vector_store = QdrantVectorStore(client=client, collection_name=collection_name)
|
46 |
+
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
47 |
+
|
48 |
+
if url:
|
49 |
+
documents = ingest_documents(url)
|
50 |
+
index = VectorStoreIndex.from_documents(documents, vector_store=vector_store, storage_context=storage_context)
|
51 |
+
else:
|
52 |
+
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, storage_context=storage_context)
|
53 |
+
|
54 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
|
55 |
+
chat_engine = index.as_chat_engine(
|
56 |
+
chat_mode="context",
|
57 |
+
memory=memory,
|
58 |
+
system_prompt=(
|
59 |
+
"""You are an AI assistant for developers, specializing in technical documentation. Your task is to provide accurate, concise, and helpful responses based on the given documentation context.
|
60 |
+
Context information is below:
|
61 |
+
{context_str}
|
62 |
+
Always answer based on the information in the context and general knowledge and be precise
|
63 |
+
Given this context, please respond to the following user query:
|
64 |
+
{query_str}
|
65 |
+
Your response should:
|
66 |
+
Directly address the query using information from the context
|
67 |
+
Include relevant code examples or direct quotes if applicable
|
68 |
+
Mention specific sections or pages of the documentation
|
69 |
+
Highlight any best practices or potential pitfalls related to the query
|
70 |
+
After your response, suggest 3 follow-up questions based on the context that the user might find helpful for deeper understanding.
|
71 |
+
ALWAYS SUGGEST FOLLOW UP QUESTIONS
|
72 |
+
Your response:"""
|
73 |
+
),
|
74 |
+
)
|
75 |
+
return "Query engine setup completed successfully!"
|
76 |
+
|
77 |
+
def query_documentation(query):
|
78 |
+
global chat_engine
|
79 |
+
if not chat_engine:
|
80 |
+
return "Please set up the query engine first."
|
81 |
+
|
82 |
+
try:
|
83 |
+
response = chat_engine.chat(query)
|
84 |
+
return str(response.response)
|
85 |
+
except Exception as e:
|
86 |
+
error_message = f"An error occurred: {str(e)}"
|
87 |
+
time.sleep(120)
|
88 |
+
try:
|
89 |
+
response = chat_engine.chat(query)
|
90 |
+
return str(response.response)
|
91 |
+
except Exception as e:
|
92 |
+
return f"Retry failed. Error: {str(e)}"
|
93 |
+
|
94 |
+
# Gradio interface
|
95 |
+
with gr.Blocks() as app:
|
96 |
+
gr.Markdown("# Talk to Software Documentation")
|
97 |
+
|
98 |
+
with gr.Tab("Setup"):
|
99 |
+
url_input = gr.Textbox(label="Enter URL to crawl and ingest documents (optional)")
|
100 |
+
collection_input = gr.Textbox(label="Enter collection name for vector store (compulsory)")
|
101 |
+
setup_button = gr.Button("Setup Query Engine")
|
102 |
+
setup_output = gr.Textbox(label="Setup Output")
|
103 |
+
|
104 |
+
setup_button.click(setup_query_engine, inputs=[url_input, collection_input], outputs=setup_output)
|
105 |
+
|
106 |
+
with gr.Tab("Chat"):
|
107 |
+
chatbot = gr.Chatbot()
|
108 |
+
msg = gr.Textbox(label="Enter your query")
|
109 |
+
clear = gr.Button("Clear")
|
110 |
+
|
111 |
+
def user(user_message, history):
|
112 |
+
return "", history + [[user_message, None]]
|
113 |
+
|
114 |
+
def bot(history):
|
115 |
+
user_message = history[-1][0]
|
116 |
+
bot_message = query_documentation(user_message)
|
117 |
+
history[-1][1] = bot_message
|
118 |
+
return history
|
119 |
+
|
120 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
121 |
+
bot, chatbot, chatbot
|
122 |
+
)
|
123 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
124 |
+
|
125 |
+
if __name__ == "__main__":
|
126 |
+
app.launch()
|