llama-agents / app.py
kneau007's picture
Update app.py
2e70e10 verified
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
import os
import re
from datetime import datetime
import pandas as pd
import requests
from crewai import Agent, Crew, Process, Task
from crewai_tools import tool
from langchain.agents import load_tools
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
import gradio as gr
from huggingface_hub import InferenceClient
def format_response(response: str) -> str:
entries = re.split(r"(?<=]), (?=\[)", response)
return [entry.strip("[]") for entry in entries]
human_tools = load_tools(["human"])
os.environ["GROQ_API_KEY"] = "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"
llm = ChatGroq(temperature=0, model_name="llama3-70b-8192")
customer_communicator=Agent(
role="financial communicator",
goal="Gather the customer financial situation.",
backstory="""You are experienced in communicating with customer and asking about their financial details""",
verbose=True,
allow_delegation=False,
llm=llm,
max_iter=5,
memory=True,
tools=human_tools,
)
comprehensive_financial_advisor = Agent(
role="Senior Financial Advisor",
goal="Provide user with personalized financial advice and management tools, including investment recommendations, budget tracking, financial goal setting, and market analysis",
backstory="""You are an expert in comprehensive financial planning, investment strategies, budgeting, and market analysis,
helping users achieve their financial goals through tailored advice and insights in indian market""",
verbose=True,
allow_delegation=False,
llm=llm,
max_iter=5,
memory=True,
)
financial_communicator= Task(
description="Ask customer financial income per month basis and gather the information about their monthly expenses and their saving and is there any loan",
expected_output="Gather all the financial details of user.",
agent=customer_communicator,
)
personalized_financial_planning_task = Task(
name="Personalized Financial Planning Session",
description="Customize the financial planning of customer",
steps=[
"Assess the user's current financial situation, including income, expenses, assets, and liabilities",
"Discuss the user's short-term and long-term financial goals",
"Provide personalized investment recommendations based on the user's risk tolerance and financial goals",
"Create a realistic budget and tracking plan to help the user manage their expenses",
"Set actionable financial goals and milestones to monitor progress",
"Offer ongoing market analysis and updates to keep the user informed of relevant financial trends"
],
expected_output="Give a 1-paragraph report for the user, providing advice by following the steps",
agent=comprehensive_financial_advisor ,
context =[financial_communicator], # Ensure the task is associated with the agent
)
def financial_advisor_response(message, history):
# Update the agents and tasks with the user input
crew = Crew(
agents=[customer_communicator, comprehensive_financial_advisor],
tasks=[financial_communicator, personalized_financial_planning_task],
verbose=2,
process=Process.sequential,
full_output=True,
share_crew=False,
manager_llm=llm,
max_iter=15,
)
# Kick off the crew and get the results
results = crew.kickoff()
return results
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = financial_advisor_response(message, history)
yield response
response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()