cmagganas commited on
Commit
e7f75f1
1 Parent(s): 18b3349

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -2,7 +2,7 @@ from langchain.agents import AgentExecutor, AgentType, initialize_agent
2
  from langchain.agents.structured_chat.prompt import SUFFIX
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.memory import ConversationBufferMemory
5
- from tools import generate_image_tool
6
 
7
  import chainlit as cl
8
  from chainlit.action import Action
@@ -19,7 +19,7 @@ def rename(orig_author):
19
  mapping = {
20
  "AgentExecutor": "The LLM Brain",
21
  "LLMChain": "The Assistant",
22
- "GenerateImage": "DALL-E 3",
23
  "ChatOpenAI": "GPT-4 Turbo",
24
  "Chatbot": "Coolest App",
25
  }
@@ -81,13 +81,12 @@ async def setup_agent(settings):
81
  # This suffix is used to provide the chat history to the prompt.
82
  _SUFFIX = "Chat history:\n{chat_history}\n\n" + SUFFIX
83
 
84
- # We initialize our agent here, which is simply being used to decide between responding with text
85
- # or an image
86
  agent = initialize_agent(
87
  llm=llm, # our LLM (default is GPT-4 Turbo)
88
  tools=[
89
- generate_image_tool
90
- ], # our custom tool used to generate images with DALL-E 3
91
  agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, # the agent type we're using today
92
  memory=memory, # our memory!
93
  agent_kwargs={
@@ -114,7 +113,7 @@ async def main(message: cl.Message):
114
  it back to the user.
115
  """
116
  agent = cl.user_session.get("agent")
117
- cl.user_session.set("generated_image", None)
118
 
119
  res = await cl.make_async(agent.run)(
120
  input=message.content, callbacks=[cl.LangchainCallbackHandler()]
@@ -123,15 +122,10 @@ async def main(message: cl.Message):
123
  elements = []
124
  actions = []
125
 
126
- generated_image_name = cl.user_session.get("generated_image")
127
- generated_image = cl.user_session.get(generated_image_name)
128
- if generated_image:
129
  elements = [
130
- cl.Image(
131
- content=generated_image,
132
- name=generated_image_name,
133
- display="inline",
134
- )
135
  ]
136
 
137
- await cl.Message(content=res, elements=elements, actions=actions).send()
 
2
  from langchain.agents.structured_chat.prompt import SUFFIX
3
  from langchain.chat_models import ChatOpenAI
4
  from langchain.memory import ConversationBufferMemory
5
+ from tools import rag
6
 
7
  import chainlit as cl
8
  from chainlit.action import Action
 
19
  mapping = {
20
  "AgentExecutor": "The LLM Brain",
21
  "LLMChain": "The Assistant",
22
+ "RAG": "Jonah",
23
  "ChatOpenAI": "GPT-4 Turbo",
24
  "Chatbot": "Coolest App",
25
  }
 
81
  # This suffix is used to provide the chat history to the prompt.
82
  _SUFFIX = "Chat history:\n{chat_history}\n\n" + SUFFIX
83
 
84
+ # We initialize our agent here, which is simply being used to decide between responding with llm or tool
 
85
  agent = initialize_agent(
86
  llm=llm, # our LLM (default is GPT-4 Turbo)
87
  tools=[
88
+ rag
89
+ ], # our custom tool used to retrieve context
90
  agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, # the agent type we're using today
91
  memory=memory, # our memory!
92
  agent_kwargs={
 
113
  it back to the user.
114
  """
115
  agent = cl.user_session.get("agent")
116
+ cl.user_session.set("rag", None)
117
 
118
  res = await cl.make_async(agent.run)(
119
  input=message.content, callbacks=[cl.LangchainCallbackHandler()]
 
122
  elements = []
123
  actions = []
124
 
125
+ tool_res = cl.user_session.get("rag")
126
+ if tool_res:
 
127
  elements = [
128
+ tool_res
 
 
 
 
129
  ]
130
 
131
+ await cl.Message(content=res, elements=elements, actions=actions).send()