File size: 2,245 Bytes
9f54a3b
 
 
9892e5a
9f54a3b
9892e5a
9f54a3b
7986268
9f54a3b
9892e5a
ed8bd19
9892e5a
9f54a3b
ed8bd19
7986268
002b092
142827c
 
 
 
 
 
7986268
 
9f54a3b
ed8bd19
 
 
9f54a3b
 
 
 
d36f2e1
 
 
 
 
9f54a3b
091c4e8
ed8bd19
9f54a3b
 
 
d36f2e1
9f54a3b
 
 
d36f2e1
 
 
 
7d011fb
d36f2e1
 
 
 
 
 
ed8bd19
d36f2e1
7d011fb
d36f2e1
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv

load_dotenv()

# Initialize the client
client = OpenAI(
    base_url="https://api-inference.huggingface.co/v1",
    api_key=os.getenv('HUGGINGFACEHUB_API_TOKEN')
)

# Model link for Mistral
model_link = "mistralai/Mistral-7B-Instruct-v0.2"

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.messages = []

# Set the temperature value directly in the code
temperature = 0.5

# Add reset button to clear conversation
st.button('Reset Chat', on_click=reset_conversation)

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input("Type your message here..."):

    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)

    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})

    # Interact with the model
    try:
        # Send the user and system messages to the API
        messages_for_api = [{"role": "system", "content": "You are a helpful assistant."}] + st.session_state.messages

        response = client.chat.completions.create(
            model=model_link,
            messages=messages_for_api,
            temperature=temperature,
            max_tokens=150  # Adjust the max tokens according to your needs
        )

        assistant_response = response["choices"][0]["message"]["content"]

        # Display assistant response in chat message container
        with st.chat_message("assistant"):
            st.markdown(assistant_response)

        # Append the assistant's response to the chat history
        st.session_state.messages.append({"role": "assistant", "content": assistant_response})

    except Exception as e:
        # Display error message to user
        with st.chat_message("assistant"):
            st.markdown("Sorry, I couldn't process your request. Please try again later.")
        st.write(e)