deepseek-V3 / app.py
mohsinmubaraksk's picture
Update app.py
729bb68 verified
import streamlit as st
from openai import OpenAI
import os
import base64
import io
# Set your API key and base URL
API_KEY = os.getenv("API_KEY")
BASE_URL = "https://openrouter.ai/api/v1"
# Initialize the OpenAI client with OpenRouter settings
client = OpenAI(
base_url=BASE_URL,
api_key=API_KEY,
)
# App Title and Sidebar Configuration
st.title("Advance Reasoning!!!")
# Sidebar: Model Selection and Advanced Settings
st.sidebar.header("Configuration")
model_options = [
"deepseek/deepseek-v3-base:free",
"google/gemini-2.5-pro-exp-03-25:free", # Gemini model with image support
]
selected_model = st.sidebar.selectbox("Select Model", model_options)
with st.sidebar.expander("Advanced Settings"):
referer = st.text_input("HTTP-Referer", value="http://your-site-url.com")
title = st.text_input("X-Title", value="Your Site Title")
# Initialize session state for storing the conversation history
if 'messages' not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "Hello! How can I help you today?"}
]
# Chat Window: Display conversation using chat message components
st.subheader("Chat Window")
for msg in st.session_state.messages:
if msg["role"] == "assistant":
st.chat_message("assistant").write(msg["content"])
else:
st.chat_message("user").write(msg["content"])
# Input for New Message (text input)
user_text = st.text_input("Type your message here...", key="input")
# If the Gemini model is selected, show an image uploader option
uploaded_image = None
if selected_model == "google/gemini-2.5-pro-exp-03-25:free":
uploaded_image = st.file_uploader("Upload an image (optional)", type=["png", "jpg", "jpeg"])
# When the user clicks "Send", process the input
if st.button("Send") and (user_text or uploaded_image):
# Append the user's message to the conversation history
# For Gemini, if an image is uploaded, create a message with both text and image
if selected_model == "google/gemini-2.5-pro-exp-03-25:free" and uploaded_image is not None:
# Read image bytes and encode as a base64 data URL
image_bytes = uploaded_image.read()
encoded_image = base64.b64encode(image_bytes).decode('utf-8')
# Attempt to determine image type from the file name extension
ext = uploaded_image.name.split('.')[-1]
data_url = f"data:image/{ext};base64,{encoded_image}"
# Construct a combined message payload as a list of objects
user_message = {
"role": "user",
"content": [
{"type": "text", "text": user_text},
{"type": "image_url", "image_url": {"url": data_url}}
]
}
else:
# Regular text message payload
user_message = {
"role": "user",
"content": user_text
}
st.session_state.messages.append(user_message)
# Use a spinner while waiting for the API response
with st.spinner("Waiting for response..."):
try:
response = client.chat.completions.create(
extra_headers={
"HTTP-Referer": referer,
"X-Title": title
},
extra_body={},
model=selected_model,
messages=st.session_state.messages,
)
# Assuming the API returns a similar structure for assistant messages
assistant_reply = response.choices[0].message.content
except Exception as e:
assistant_reply = f"An error occurred: {str(e)}"
# Append the assistant's reply to the conversation history
st.session_state.messages.append({"role": "assistant", "content": assistant_reply})
# Refresh the app to update the chat window with new messages
st.rerun()