import os import streamlit as st from langchain_chroma import Chroma from langchain_community.embeddings import HuggingFaceEmbeddings from langchain.chains.question_answering import load_qa_chain from langchain.memory import ConversationBufferMemory from langchain_core.prompts import PromptTemplate from langchain_groq import ChatGroq from dotenv import load_dotenv from sentence_transformers import SentenceTransformer st.title("Chatbot") # Load environment variables load_dotenv() GROQ_API_KEY = os.getenv("GROQ_API_KEY") assert GROQ_API_KEY, "GROQ_API_KEY environment variable not set." # One-time setup in session state if 'initialized' not in st.session_state: st.session_state.initialized = False try: with st.spinner("Initializing..."): # Initialize embeddings model model_path = "sentence-transformers/all-MiniLM-L12-v2" # Use a smaller, faster model st.session_state.embedding_function = HuggingFaceEmbeddings( model_name=model_path, model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': False} ) # Set up document search persist_directory = "doc_db" st.session_state.docsearch = Chroma( persist_directory=persist_directory, embedding_function=st.session_state.embedding_function ) # Initialize ChatGroq model st.session_state.chat_model = ChatGroq( model="llama-3.1-8b-instant", temperature=0, api_key=GROQ_API_KEY ) # Define prompt template and memory template = """You are a chatbot having a conversation with a human. Your name is Devrim. Given the following extracted parts of a long document and a question, create a final answer. If the answer is not in the document or irrelevant, just say that you don't know, don't try to make up an answer. {context} {chat_history} Human: {human_input} Chatbot:""" prompt = PromptTemplate( input_variables=["chat_history", "human_input", "context"], template=template ) st.session_state.memory = ConversationBufferMemory(memory_key="chat_history", input_key="human_input") # Load QA chain st.session_state.qa_chain = load_qa_chain( llm=st.session_state.chat_model, chain_type="stuff", memory=st.session_state.memory, prompt=prompt ) st.session_state.initialized = True st.success("Initialization successful.") except Exception as e: st.session_state.initialized = False st.error(f"Initialization failed: {e}") # Clear chat history buttons if st.button("Clear Chat History"): if 'memory' in st.session_state: st.session_state.memory.clear() st.experimental_rerun() # Refresh the app to reflect the cleared history # Display chat history if initialized if st.session_state.initialized and 'memory' in st.session_state: if st.session_state.memory.buffer_as_messages: for message in st.session_state.memory.buffer_as_messages: if message.type == "ai": st.chat_message(name="ai", avatar="🤖").write(message.content) else: st.chat_message(name="human", avatar="👤").write(message.content) # Input for new query query = st.chat_input("Ask something") if query: try: with st.spinner("Answering..."): # Perform similarity search and get response docs = st.session_state.docsearch.similarity_search(query, k=1) # Reduced k for speed response = st.session_state.qa_chain( {"input_documents": docs, "human_input": query}, return_only_outputs=True )["output_text"] # Display new message st.chat_message(name="human", avatar="👤").write(query) st.chat_message(name="ai", avatar="🤖").write(response) except Exception as e: st.error(f"An error occurred: {e}")