Spaces:
Runtime error
Runtime error
import streamlit as st | |
from PIL import Image | |
import google.generativeai as genai | |
import os | |
MODEL_ID = "gemini-2.0-flash-exp" | |
api_key = os.getenv("GEMINI_API_KEY") | |
model_id = MODEL_ID | |
genai.configure(api_key=api_key) | |
enable_stream = False | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "model" not in st.session_state: | |
st.session_state.model = genai.GenerativeModel(MODEL_ID) | |
if "chat" not in st.session_state: | |
st.session_state.chat = st.session_state.model.start_chat() | |
if "is_new_file" not in st.session_state: | |
st.session_state.is_new_file = True | |
# Function to reset chat history | |
def reset_chat(): | |
st.session_state.messages = [] | |
st.session_state.model.start_chat() | |
def main(): | |
# Streamlit app | |
st.title("Gemini Image Chat") | |
# File uploader with allowed types | |
uploaded_file = st.file_uploader("Choose an image or PDF...", type=["jpg", "jpeg", "png", "pdf"]) | |
if uploaded_file is not None: | |
# Determine file type | |
file_type = uploaded_file.type | |
if file_type.startswith('image'): | |
# Display the uploaded image | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image.", use_container_width=True) | |
mime_type = "image/jpeg" # Use a consistent MIME type for images | |
elif file_type == 'application/pdf': | |
# Display a message for PDF upload | |
st.write("PDF file uploaded. You can ask questions about its content.") | |
mime_type = "application/pdf" | |
else: | |
st.error("Unsupported file type. Please upload an image or PDF.") | |
st.stop() | |
# Reset chat history when a new file is uploaded | |
reset_chat() | |
st.session_state.is_new_file = True | |
# Text input for user prompt | |
user_input = st.text_area("Enter your prompt:", height=200) | |
# Send button | |
if st.button("Send"): | |
if not uploaded_file or not user_input: | |
st.warning("Please upload an image or PDF and enter a prompt.") | |
st.stop() | |
if user_input: | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
with st.spinner("Processing..."): | |
if st.session_state.is_new_file: | |
# Upload the file with the correct MIME type | |
file_data = genai.upload_file(uploaded_file, mime_type=mime_type) | |
# Send file and prompt to Gemini API | |
chat = st.session_state.chat | |
response = chat.send_message( | |
[ | |
user_input, | |
file_data | |
], | |
stream=enable_stream | |
) | |
st.session_state.is_new_file = False | |
else: | |
# continue chat without sending the file again | |
# Send a text prompt to Gemini API | |
chat = st.session_state.chat | |
response = chat.send_message( | |
[ | |
user_input | |
], | |
stream=enable_stream | |
) | |
# Display Gemini response as it streams in | |
full_response = "" | |
if enable_stream: | |
for chunk in response: | |
with st.chat_message("assistant"): | |
st.write(chunk.text) | |
full_response += chunk.text | |
else: | |
full_response = response.text | |
with st.chat_message("assistant"): | |
st.write(full_response) | |
# Add Gemini response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
st.subheader("Chat History") | |
# Display chat history | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if __name__ == "__main__": | |
main() | |