import streamlit as st from openai import OpenAI import os from dotenv import load_dotenv load_dotenv() # Initialize the client client = OpenAI( base_url="https://api-inference.huggingface.co/v1", api_key=os.getenv('HUGGINGFACEHUB_API_TOKEN') ) # Model link for Mistral model_link = "mistralai/Mistral-7B-Instruct-v0.2" def reset_conversation(): ''' Resets Conversation ''' st.session_state.messages = [] # Set the temperature value directly in the code temperature = 0.5 # Add reset button to clear conversation st.button('Reset Chat', on_click=reset_conversation) # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("Type your message here..."): # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Interact with the model try: # Send the user and system messages to the API messages_for_api = [{"role": "system", "content": "You are a helpful assistant."}] + st.session_state.messages response = client.chat.completions.create( model=model_link, messages=messages_for_api, temperature=temperature, max_tokens=150 # Adjust the max tokens according to your needs ) assistant_response = response["choices"][0]["message"]["content"] # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(assistant_response) # Append the assistant's response to the chat history st.session_state.messages.append({"role": "assistant", "content": assistant_response}) except Exception as e: # Display error message to user with st.chat_message("assistant"): st.markdown("Sorry, I couldn't process your request. Please try again later.") st.write(e)