Spaces:
Sleeping
Sleeping
import streamlit as st | |
import time | |
from crewai import Agent, Task, Crew, Process | |
from langchain.llms import OpenAI | |
from textwrap import dedent | |
from langchain_openai import ChatOpenAI | |
from crewai_tools import CSVSearchTool | |
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource | |
import nest_asyncio | |
import os | |
from crewai.tools import BaseTool | |
from langchain_community.tools import DuckDuckGoSearchRun | |
nest_asyncio.apply() | |
class MyCustomDuckDuckGoTool(BaseTool): | |
name: str = "DuckDuckGo Search Tool" | |
description: str = "Search the web for a given query." | |
def _run(self, query: str) -> str: | |
# Ensure the DuckDuckGoSearchRun is invoked properly. | |
duckduckgo_tool = DuckDuckGoSearchRun() | |
response = duckduckgo_tool.invoke(query) | |
return response | |
def _get_tool(self): | |
# Create an instance of the tool when needed | |
return MyCustomDuckDuckGoTool() | |
api_key = os.getenv("OPENAI_API_KEY") | |
os.environ["OPENAI_API_KEY"] = api_key | |
st.set_page_config( | |
page_title="CrewAI Inventory Query Demo !", page_icon=":flag-ca:") | |
st.title("CrewAI Inventory Query Demo ! π") | |
st.header("Let's chat :star2:") | |
def prepare_search_tool_and_kb(): | |
tool = CSVSearchTool(csv='KB.csv') | |
excel_source = ExcelKnowledgeSource(file_paths=["Master List 1-2-25.xlsx"]) | |
Duck_search = MyCustomDuckDuckGoTool() | |
return tool,Duck_search,excel_source | |
def prepare_crew(): | |
tool,Duck_search,excel_source = prepare_search_tool_and_kb() | |
agent_1 = Agent( | |
role=dedent(( | |
""" | |
Data Knowdledge Agent. | |
""")), | |
backstory=dedent(( | |
""" | |
An angent with the abiity to search the database return the relevant answer for the question. | |
""")), | |
goal=dedent(( | |
""" | |
Get relevant answer about the question. | |
""")), | |
allow_delegation=True, | |
verbose=True, | |
# β Whether the agent execution should be in verbose mode | |
max_iter=5, | |
# β maximum number of iterations the agent can perform before being forced to give its best answer | |
llm=ChatOpenAI(model_name="gpt-4o-mini", temperature=0), | |
tools = [tool], | |
) | |
agent_2 = Agent( | |
role=dedent(( | |
""" | |
Web Search Agent. | |
""")), | |
backstory=dedent(( | |
""" | |
An angent with the abiity to search search the web for the relevant information based on the asked question. | |
""")), | |
goal=dedent(( | |
""" | |
Get relevant answer about the question. | |
""")), | |
allow_delegation=False, | |
verbose=False, | |
# β Whether the agent execution should be in verbose mode | |
max_iter=5, | |
# β maximum number of iterations the agent can perform before being forced to give its best answer | |
llm=ChatOpenAI(model_name="gpt-4o-mini", temperature=0), | |
tool=[Duck_search] | |
) | |
task_1 = Task( | |
description=dedent(( | |
""" | |
Analyze the csv file and get all the relevant information for the following question. | |
Question: {question} | |
Make sure to get all the relevant data if there are more than one results. | |
Aggerate results into a single output. | |
""")), | |
expected_output=dedent(( | |
""" | |
A detailed data answer to the question. | |
""")), | |
agent=agent_1, | |
) | |
task_2 = Task( | |
description=dedent(( | |
""" | |
Search for the following question in the web. | |
Question: {question} | |
Make sure to get all the relevant data if there are more than one results. | |
Aggerate results into a single output. | |
""")), | |
expected_output=dedent(( | |
""" | |
A detailed data answer to the question. | |
""")), | |
agent=agent_2, | |
) | |
crew = Crew(agents =[agent_1,agent_2], | |
tasks =[task_1,task_2],verbose=True, # You can set it to True or False | |
# β indicates the verbosity level for logging during execution. | |
process=Process.sequential, | |
knowledge_sources = [excel_source] | |
# β the process flow that the crew will follow (e.g., sequential, hierarchical). | |
) | |
return crew | |
crew = prepare_crew() | |
YES_MESSAGE = "Hello there, please ask a question about the inventory? " | |
if "messages" not in st.session_state.keys(): | |
st.session_state.messages = [ | |
{"role": "assistant", "content": YES_MESSAGE} | |
] | |
for message in st.session_state.messages: # Display the prior chat messages | |
with st.chat_message(message["role"]): | |
st.write(message["content"]) | |
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
st.chat_message("user").write(prompt) | |
with st.chat_message("assistant"): | |
with st.spinner("Thinking..., please be patient"): | |
inputs ={"question":prompt} | |
response = crew.kickoff(inputs=inputs) | |
response_str = response.raw | |
st.write(response_str) | |
message = {"role": "assistant", "content": response_str} | |
st.session_state.messages.append(message) |