sathyrajan commited on
Commit
423975e
·
verified ·
1 Parent(s): deb43f3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from autogen import AssistantAgent
4
+ from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv() # Load environment variables
8
+
9
+ gpt4 = {
10
+ "config_list": [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}],
11
+ }
12
+
13
+ # Initialize agents
14
+ assistant = AssistantAgent(
15
+ name="assistant",
16
+ system_message="You are a helpful assistant. Give the user information with a set of logical steps to follow. In addition provide helpful url resources from ragproxyagent.",
17
+ llm_config=gpt4,
18
+ )
19
+
20
+ ragproxyagent = RetrieveUserProxyAgent(
21
+ name="ragproxyagent",
22
+ human_input_mode="ALWAYS",
23
+ retrieve_config={
24
+ "task": "qa",
25
+ "docs_path": "./scraped_results/",
26
+ "context_max_tokens": 5000,
27
+ "overwrite": False,
28
+ "get_or_create": True,
29
+ "return_source": True,
30
+ },
31
+ code_execution_config=False,
32
+ )
33
+
34
+ # Function to reset agents
35
+ def _reset_agents():
36
+ ragproxyagent.reset()
37
+ assistant.reset()
38
+
39
+ import time
40
+
41
+
42
+ def chat_with_agents(user_input, history):
43
+ _reset_agents() # Reset agents before each new conversation
44
+
45
+ # Step 1: RAG Agent retrieves context (doc-based information)
46
+ chat_result = ragproxyagent.initiate_chat(
47
+ assistant,
48
+ message=user_input,
49
+ problem=user_input,
50
+ n_results=3,
51
+ max_turns=1,
52
+ )
53
+
54
+ print("Raw RAG Response:", chat_result) # Debugging
55
+
56
+ # Step 2: Extract assistant's final summarized response
57
+ response = "Sorry, I couldn't generate a response."
58
+
59
+ # Check if the response contains the summary
60
+ if hasattr(chat_result, 'summary') and chat_result.summary:
61
+ response = chat_result.summary
62
+
63
+ # print("Assistant's Final Response:", response) # Debugging
64
+
65
+ # Step 3: Append user and assistant messages
66
+ # history.append({"role": "user", "content": user_input})
67
+ # history.append({"role": "assistant", "content": response})
68
+
69
+ # Return only the last 2 messages (user + assistant) for Gradio
70
+ return response
71
+
72
+ # Use "messages" type for proper chat UI
73
+ chat_interface = gr.ChatInterface(
74
+ fn=chat_with_agents,
75
+ type="messages"
76
+ )
77
+
78
+ if __name__ == "__main__":
79
+ chat_interface.launch()
80
+