ajalisatgi commited on
Commit
7e1dfd1
·
verified ·
1 Parent(s): c534f6f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain_community.vectorstores import Chroma
5
+
6
+ # Set API Key
7
+ openai.api_key = "your-openai-api-key"
8
+
9
+ # Load embedding model
10
+ model_name = "intfloat/e5-small"
11
+ embedding_model = HuggingFaceEmbeddings(model_name=model_name)
12
+
13
+ # Load ChromaDB
14
+ persist_directory = "./docs/chroma/"
15
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)
16
+
17
+ # Define RAG function
18
+ def rag_pipeline(question):
19
+ """Retrieve relevant documents and generate AI response"""
20
+ retrieved_docs = vectordb.similarity_search(question, k=5)
21
+ context = " ".join([doc.page_content for doc in retrieved_docs])
22
+
23
+ # Generate AI response
24
+ full_prompt = f"Context: {context}\\n\\nQuestion: {question}"
25
+ response = openai.ChatCompletion.create(
26
+ model="gpt-4",
27
+ messages=[{"role": "user", "content": full_prompt}],
28
+ max_tokens=300,
29
+ temperature=0.7
30
+ )
31
+
32
+ return response['choices'][0]['message']['content'].strip(), retrieved_docs
33
+
34
+ # Gradio UI
35
+ iface = gr.Interface(
36
+ fn=rag_pipeline,
37
+ inputs=gr.Textbox(label="Enter your question"),
38
+ outputs=[gr.Textbox(label="Generated Response"), gr.Textbox(label="Retrieved Documents")],
39
+ title="RAG-Based Question Answering System",
40
+ description="Enter a question and retrieve relevant documents with AI-generated response."
41
+ )
42
+
43
+ # Launch Gradio app
44
+ if __name__ == "__main__":
45
+ iface.launch()