Spaces:
Sleeping
Sleeping
File size: 2,139 Bytes
a9c7401 9f54a3b 9892e5a 9f54a3b 9892e5a 9f54a3b 7986268 a9c7401 9f54a3b 7986268 002b092 142827c a9c7401 142827c a9c7401 142827c 7986268 9f54a3b a9c7401 ed8bd19 9f54a3b a9c7401 d36f2e1 9f54a3b a9c7401 ed8bd19 a9c7401 9f54a3b a9c7401 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
"""
Simple Chatbot
@author: Nigel Gebodh
@email: [email protected]
"""
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
)
model_link = "mistralai/Mistral-7B-Instruct-v0.2"
def reset_conversation():
"""Resets Conversation"""
st.session_state.conversation = []
st.session_state.messages = []
return None
# Set the temperature value directly in the code
temperature = 0.5
# Add a button to clear conversation
if st.button('Reset Chat'):
reset_conversation()
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
st.title("Mistral-7B Chatbot")
st.subheader("Ask me anything!")
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
prompt = st.chat_input("Type your message here...")
if prompt:
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
response = client.chat.completions.create(
model=model_link,
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temperature,
max_tokens=3000
)['choices'][0]['message']['content']
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
except Exception as e:
st.markdown("An error occurred. Please try again later.")
st.markdown(f"Error details: {e}")
|