Pouya
Upload 8 files
4e76cdd verified
raw
history blame
9.58 kB
import streamlit as st
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA, LLMChain
from langchain import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.text_splitter import CharacterTextSplitter
import chromadb
# API
openai_api_key = st.secrets["OPENAI_API_KEY"]
# Define the path to your document files
file1 = "./DIVISION OF ASSETS AFTER DIVORCE.txt"
file2 = "./INHERITANCE.txt"
# Function to initialize the OpenAI embeddings and model
def openai_setting():
embedding = OpenAIEmbeddings()
model_name = "gpt-3.5-turbo"
llm = ChatOpenAI(model_name=model_name, temperature=0)
return embedding, llm
# Function to split the law content
def law_content_splitter(path, splitter="CIVIL CODE"):
with open(path) as f:
law_content = f.read()
law_content_by_article = law_content.split(splitter)[1:]
text_splitter = CharacterTextSplitter()
return text_splitter.create_documents(law_content_by_article)
# Splitting the content of law documents
divorce_splitted = law_content_splitter(file1)
inheritance_splitted = law_content_splitter(file2)
# Initializing embedding and language model
embedding, llm = openai_setting()
# Define the prompts
divorce_prompt = """As a specialized bot in divorce law, you should offer accurate insights on Italian divorce regulations.
You should always cite the article numbers you reference.
Ensure you provide detailed and exact data.
If a query doesn't pertain to the legal documents, you should remind the user that it falls outside your expertise.
You should be adept at discussing the various Italian divorce categories, including fault-based divorce, mutual-consent divorce, and divorce due to infidelity.
You should guide users through the prerequisites and procedures of each divorce type, detailing the essential paperwork, expected duration, and potential legal repercussions.
You should capably address queries regarding asset allocation, child custody, spousal support, and other financial concerns related to divorce, all while staying true to Italian legislation.
{context}
Question: {question}"""
DIVORCE_BOT_PROMPT = PromptTemplate(
template=divorce_prompt, input_variables=["context", "question"]
)
# define inheritance prompt
inheritance_prompt = """As a specialist in Italian inheritance law, you should deliver detailed and accurate insights about inheritance regulations in Italy.
You should always cite the article numbers you reference.
When responding to user queries, you should always base your answers on the provided context.
Always cite the specific article numbers you mention and refrain from speculating.
Maintain precision in all your responses.
If a user's question doesn't align with the legal documents, you should point out that it's beyond your domain of expertise.
You should elucidate Italian inheritance law comprehensively, touching on topics such as testamentary inheritance, intestate inheritance, and other pertinent subjects.
Make sure to elaborate on the obligations and rights of inheritors, the methodology of estate distribution, asset assessment, and settling debts, all while adhering to Italian law specifics.
You should adeptly tackle questions about various will forms like holographic or notarial wills, ensuring you clarify their legitimacy within Italian jurisdiction.
Offer advice on creating a will, naming heirs, and managing potential conflicts.
You should provide detailed information on tax nuances associated with inheritance in Italy, inclusive of exemptions, tax rates, and mandatory disclosures.
{context}
Question: {question}"""
INHERITANCE_BOT_PROMPT = PromptTemplate(
template=inheritance_prompt, input_variables=["context", "question"]
)
# Setup for Chroma databases and RetrievalQA
chroma_directory = "./docs/chroma/"
inheritance_db = Chroma.from_documents(
documents=inheritance_splitted,
embedding=embedding,
persist_directory=chroma_directory,
)
inheritance = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=inheritance_db.as_retriever(),
chain_type_kwargs={"prompt": INHERITANCE_BOT_PROMPT},
)
divorce_db = Chroma.from_documents(
documents=divorce_splitted, embedding=embedding, persist_directory=chroma_directory
)
divorce = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=divorce_db.as_retriever(),
chain_type_kwargs={"prompt": DIVORCE_BOT_PROMPT},
)
# Define the tools for the chatbot
tools = [
Tool(
name="Divorce Italian law QA System",
func=divorce.run,
description="useful for when you need to answer questions about divorce laws in Italy.Give also the number of article you use for it.",
),
Tool(
name="Inheritance Italian law QA System",
func=inheritance.run,
description="useful for when you need to answer questions about inheritance laws in Italy.Give also the number of article you use for it.",
),
]
# Initialize conversation memory
memory = ConversationBufferMemory(
memory_key="chat_history", input_key="input", output_key="output"
)
# initialize ReAct agent
react = initialize_agent(tools, llm, agent="zero-shot-react-description")
agent = AgentExecutor.from_agent_and_tools(
tools=tools, agent=react.agent, memory=memory, verbose=False
)
# Define the chatbot function
# question = "I'm getting divorced,what's happen at my children"
def questions(question):
return agent.run(question)
def chatbot1(question):
try:
return questions(question)
except:
return "I'm sorry, I'm having trouble understanding your question. Could you please rephrase it or provide more context"
def is_greeting(input_str):
"""Check if the input is a greeting."""
greetings = [
"hello",
"hi",
"hey",
"greetings",
"good morning",
"good afternoon",
"good evening",
"hi there",
"hello there",
"hey there",
"howdy",
"sup",
"what's up",
"how's it going",
"how are you",
"good day",
"salutations",
"hiya",
"yo",
"hola",
"bonjour",
"g'day",
"how do you do",
"what’s new",
"what’s up",
"how’s everything",
"how are things",
"how’s life",
"how’s your day",
"how’s your day going",
"good to see you",
"nice to see you",
"great to see you",
"lovely to see you",
"how have you been",
"what’s going on",
"what’s happening",
"what’s new",
"long time no see",
# Italian greetings
"ciao",
"salve",
"buongiorno",
"buona sera",
"buonasera",
"buon pomeriggio",
"buonpomeriggio",
"come stai",
"comestai",
"come va",
"comeva",
"come sta",
"comesta",
"piacere di conoscerti",
"piacere",
"benvenuto",
"ben trovato",
]
return any(greet in input_str.lower() for greet in greetings)
def chatbot(input_str):
# Check for greetings first
if is_greeting(input_str):
return "Hello! Ask me your question about Italian Divorce or Inheritance Law?"
# Existing chatbot logic
response = chatbot1(input_str)
if response == "N/A":
return "I'm sorry, I'm having trouble understanding your question. Could you please rephrase it or provide more context"
else:
return response
# Streamlit Chat UI
st.set_page_config(
page_title="Italian Law Chatbot",
page_icon="⚖️",
layout="centered",
initial_sidebar_state="auto",
)
st.title("Italian Law Chatbot 🏛️")
st.info(
"Check out the full tutorial to build this app in our [📝 blog post](https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/) — "
"[GitHub Repository](https://sattari.org)",
icon="ℹ️",
)
st.success(
"Check out [Prompt Examples List](https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/) to learn how to interact with this ChatBot 🤗 ",
icon="✅",
)
# Initialize session state for conversation history
if "messages" not in st.session_state:
st.session_state.messages = [
{
"role": "assistant",
"content": "Hello! I'm here to help you with Italian Divorce or Inheritance Law. How can I assist you today?",
}
]
# Display previous messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Handle new user input
if user_input := st.chat_input(
"Ask a question about Italian Divorce or Inheritance Law:"
):
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# Generate and display chatbot response
with st.chat_message("assistant"):
response_placeholder = st.empty()
response = chatbot(user_input) # Your existing chatbot function
response_placeholder.markdown(response)
# Append the response to the conversation history
st.session_state.messages.append({"role": "assistant", "content": response})