File size: 1,946 Bytes
a9c7401
 
 
 
9f54a3b
0634989
9f54a3b
9892e5a
9f54a3b
9892e5a
9f54a3b
7986268
0634989
9f54a3b
7986268
002b092
142827c
a9c7401
 
142827c
a9c7401
142827c
7986268
 
9f54a3b
a9c7401
 
 
ed8bd19
9f54a3b
 
 
 
a9c7401
 
 
d36f2e1
 
 
 
 
9f54a3b
a9c7401
ed8bd19
a9c7401
9f54a3b
 
 
 
 
 
a9c7401
 
 
0634989
 
 
 
 
 
a9c7401
0634989
 
 
a9c7401
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
Simple Chatbot
"""

import streamlit as st
import openai  # Ensure you have the correct import
import os
from dotenv import load_dotenv

load_dotenv()

# Initialize the client
openai.api_key = os.environ.get('HUGGINGFACEHUB_API_TOKEN')  # Replace with your token

model_link = "mistralai/Mistral-7B-Instruct-v0.2"

def reset_conversation():
    """Resets Conversation"""
    st.session_state.conversation = []
    st.session_state.messages = []
    return None

# Set the temperature value directly in the code
temperature = 0.5

# Add a button to clear conversation
if st.button('Reset Chat'):
    reset_conversation()

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

st.title("Mistral-7B Chatbot")
st.subheader("Ask me anything!")

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
prompt = st.chat_input("Type your message here...")

if prompt:
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        try:
            response = openai.Completion.create(
                engine=model_link,
                prompt=prompt,
                max_tokens=3000,
                temperature=temperature
            )

            response_content = response.choices[0].text.strip()
            st.markdown(response_content)
            st.session_state.messages.append({"role": "assistant", "content": response_content})

        except Exception as e:
            st.markdown("An error occurred. Please try again later.")
            st.markdown(f"Error details: {e}")