Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
import os
|
|
|
|
|
|
|
4 |
|
5 |
# Set your API key and base URL
|
6 |
API_KEY = os.getenv("API_KEY")
|
@@ -13,15 +16,13 @@ client = OpenAI(
|
|
13 |
)
|
14 |
|
15 |
# App Title and Sidebar Configuration
|
16 |
-
st.title("
|
17 |
|
18 |
# Sidebar: Model Selection and Advanced Settings
|
19 |
st.sidebar.header("Configuration")
|
20 |
model_options = [
|
21 |
"deepseek/deepseek-v3-base:free",
|
22 |
-
"
|
23 |
-
"gpt-4",
|
24 |
-
# Add more models as needed
|
25 |
]
|
26 |
selected_model = st.sidebar.selectbox("Select Model", model_options)
|
27 |
|
@@ -43,12 +44,42 @@ for msg in st.session_state.messages:
|
|
43 |
else:
|
44 |
st.chat_message("user").write(msg["content"])
|
45 |
|
46 |
-
# Input for New Message
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
|
|
|
50 |
# Append the user's message to the conversation history
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Use a spinner while waiting for the API response
|
54 |
with st.spinner("Waiting for response..."):
|
@@ -62,6 +93,7 @@ if st.button("Send") and user_input:
|
|
62 |
model=selected_model,
|
63 |
messages=st.session_state.messages,
|
64 |
)
|
|
|
65 |
assistant_reply = response.choices[0].message.content
|
66 |
except Exception as e:
|
67 |
assistant_reply = f"An error occurred: {str(e)}"
|
@@ -69,5 +101,5 @@ if st.button("Send") and user_input:
|
|
69 |
# Append the assistant's reply to the conversation history
|
70 |
st.session_state.messages.append({"role": "assistant", "content": assistant_reply})
|
71 |
|
72 |
-
#
|
73 |
st.rerun()
|
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
+
import base64
|
5 |
+
import io
|
6 |
+
|
7 |
|
8 |
# Set your API key and base URL
|
9 |
API_KEY = os.getenv("API_KEY")
|
|
|
16 |
)
|
17 |
|
18 |
# App Title and Sidebar Configuration
|
19 |
+
st.title("Advance Reasoning!!!")
|
20 |
|
21 |
# Sidebar: Model Selection and Advanced Settings
|
22 |
st.sidebar.header("Configuration")
|
23 |
model_options = [
|
24 |
"deepseek/deepseek-v3-base:free",
|
25 |
+
"google/gemini-2.5-pro-exp-03-25:free", # Gemini model with image support
|
|
|
|
|
26 |
]
|
27 |
selected_model = st.sidebar.selectbox("Select Model", model_options)
|
28 |
|
|
|
44 |
else:
|
45 |
st.chat_message("user").write(msg["content"])
|
46 |
|
47 |
+
# Input for New Message (text input)
|
48 |
+
user_text = st.text_input("Type your message here...", key="input")
|
49 |
+
|
50 |
+
# If the Gemini model is selected, show an image uploader option
|
51 |
+
uploaded_image = None
|
52 |
+
if selected_model == "google/gemini-2.5-pro-exp-03-25:free":
|
53 |
+
uploaded_image = st.file_uploader("Upload an image (optional)", type=["png", "jpg", "jpeg"])
|
54 |
|
55 |
+
# When the user clicks "Send", process the input
|
56 |
+
if st.button("Send") and (user_text or uploaded_image):
|
57 |
# Append the user's message to the conversation history
|
58 |
+
# For Gemini, if an image is uploaded, create a message with both text and image
|
59 |
+
if selected_model == "google/gemini-2.5-pro-exp-03-25:free" and uploaded_image is not None:
|
60 |
+
# Read image bytes and encode as a base64 data URL
|
61 |
+
image_bytes = uploaded_image.read()
|
62 |
+
encoded_image = base64.b64encode(image_bytes).decode('utf-8')
|
63 |
+
# Attempt to determine image type from the file name extension
|
64 |
+
ext = uploaded_image.name.split('.')[-1]
|
65 |
+
data_url = f"data:image/{ext};base64,{encoded_image}"
|
66 |
+
|
67 |
+
# Construct a combined message payload as a list of objects
|
68 |
+
user_message = {
|
69 |
+
"role": "user",
|
70 |
+
"content": [
|
71 |
+
{"type": "text", "text": user_text},
|
72 |
+
{"type": "image_url", "image_url": {"url": data_url}}
|
73 |
+
]
|
74 |
+
}
|
75 |
+
else:
|
76 |
+
# Regular text message payload
|
77 |
+
user_message = {
|
78 |
+
"role": "user",
|
79 |
+
"content": user_text
|
80 |
+
}
|
81 |
+
|
82 |
+
st.session_state.messages.append(user_message)
|
83 |
|
84 |
# Use a spinner while waiting for the API response
|
85 |
with st.spinner("Waiting for response..."):
|
|
|
93 |
model=selected_model,
|
94 |
messages=st.session_state.messages,
|
95 |
)
|
96 |
+
# Assuming the API returns a similar structure for assistant messages
|
97 |
assistant_reply = response.choices[0].message.content
|
98 |
except Exception as e:
|
99 |
assistant_reply = f"An error occurred: {str(e)}"
|
|
|
101 |
# Append the assistant's reply to the conversation history
|
102 |
st.session_state.messages.append({"role": "assistant", "content": assistant_reply})
|
103 |
|
104 |
+
# Refresh the app to update the chat window with new messages
|
105 |
st.rerun()
|