Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
from openai import OpenAI # Assuming Nvidia client is available in the same library, adjust if necessary
|
2 |
import streamlit as st
|
3 |
-
|
4 |
import os
|
|
|
5 |
|
6 |
# Initialize Nvidia client
|
7 |
client = OpenAI(
|
@@ -11,6 +11,13 @@ client = OpenAI(
|
|
11 |
|
12 |
st.title("ChatGPT-like clone with Nvidia Model")
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# Initialize session state variables if not already present
|
15 |
if "openai_model" not in st.session_state:
|
16 |
st.session_state["openai_model"] = "nvidia/llama-3.1-nemotron-70b-instruct"
|
@@ -33,21 +40,22 @@ if prompt := st.chat_input("What is up?"):
|
|
33 |
|
34 |
# Display assistant's message while waiting for the response
|
35 |
with st.chat_message("assistant"):
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
51 |
|
52 |
# Store the assistant response in the session state
|
53 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
1 |
from openai import OpenAI # Assuming Nvidia client is available in the same library, adjust if necessary
|
2 |
import streamlit as st
|
|
|
3 |
import os
|
4 |
+
from datetime import datetime
|
5 |
|
6 |
# Initialize Nvidia client
|
7 |
client = OpenAI(
|
|
|
11 |
|
12 |
st.title("ChatGPT-like clone with Nvidia Model")
|
13 |
|
14 |
+
# Sidebar with instructions and Clear Session button
|
15 |
+
with st.sidebar:
|
16 |
+
st.markdown("### Instructions 🤖\nThis is a basic chatbot. Ask anything, and the AI will try to help you!")
|
17 |
+
if st.button("Clear Session"):
|
18 |
+
st.session_state.clear()
|
19 |
+
st.text(f"Copyright © 2010-{datetime.now().year} Present Yiqiao Yin")
|
20 |
+
|
21 |
# Initialize session state variables if not already present
|
22 |
if "openai_model" not in st.session_state:
|
23 |
st.session_state["openai_model"] = "nvidia/llama-3.1-nemotron-70b-instruct"
|
|
|
40 |
|
41 |
# Display assistant's message while waiting for the response
|
42 |
with st.chat_message("assistant"):
|
43 |
+
with st.spinner("The assistant is thinking... Please wait."):
|
44 |
+
# Create Nvidia completion request with full conversation history
|
45 |
+
stream = client.chat.completions.create(
|
46 |
+
model=st.session_state["openai_model"],
|
47 |
+
messages=st.session_state.messages, # Include all previous messages in the API call
|
48 |
+
temperature=0.5,
|
49 |
+
top_p=0.7,
|
50 |
+
max_tokens=1024,
|
51 |
+
stream=True,
|
52 |
+
)
|
53 |
+
response_chunks = []
|
54 |
+
for chunk in stream:
|
55 |
+
if chunk.choices[0].delta.content is not None:
|
56 |
+
response_chunks.append(chunk.choices[0].delta.content)
|
57 |
+
response = "".join(response_chunks)
|
58 |
+
st.markdown(response)
|
59 |
|
60 |
# Store the assistant response in the session state
|
61 |
st.session_state.messages.append({"role": "assistant", "content": response})
|