import streamlit as st from langchain_huggingface import HuggingFaceEndpoint from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory import os # Set up your Hugging Face API token sec_key = os.getenv('HUGGINGFACE_API_TOKEN') os.environ['HUGGINGFACE_API_TOKEN'] = sec_key # Define your Hugging Face model repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.7) # Define the prompt template template = """The following is a conversation between a user and an AI assistant. history:{history} Final Message by Human: {user_input} Final Message by AI: """ prompt = PromptTemplate( template=template, input_variables=["history", "user_input"], ) # Initialize memory memory = ConversationBufferMemory() # Create the LLM chain llm_chain = LLMChain( prompt=prompt, llm=llm, memory=memory ) # Streamlit app st.title("AI Chatbot") st.write("Welcome to the AI Chatbot! Ask anything you like.") # User input user_input = st.text_input("You:", key="input") if st.button("Send"): if user_input: # Generate response response = llm_chain.invoke({"history": memory.chat_memory.messages, 'user_input': user_input}) response_text = response['text'] # Display the response st.text_area("ChatBot:", response_text, height=100) # Save the interaction in memory memory.save_context({"input": user_input}, {"output": response_text})