ziyadsuper2017's picture
Major changes
d1f86df
raw
history blame
6.33 kB
import os
import time
import uuid
from typing import List, Tuple, Optional, Dict, Union
import google.generativeai as genai
import streamlit as st
from PIL import Image
# Database setup
conn = sqlite3.connect('chat_history.db')
c = conn.cursor()
c.execute('''
CREATE TABLE IF NOT EXISTS history
(role TEXT, message TEXT)
''')
# Generative AI setup
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
genai.configure(api_key=api_key)
generation_config = {
"temperature": 0.9,
"max_output_tokens": 3000
}
safety_settings = []
# Streamlit UI
st.set_page_config(page_title="Chatbot", page_icon="🤖")
# Header
st.markdown("""
<style>
.container {
display: flex;
}
.logo-text {
font-weight:700 !important;
font-size:50px !important;
color: #f9a01b !important;
padding-top: 75px !important;
}
.logo-img {
float:right;
}
</style>
<div class="container">
<p class="logo-text">Chatbot</p>
<img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120>
</div>
""", unsafe_allow_html=True)
# Sidebar
st.sidebar.title("Parameters")
temperature = st.sidebar.slider(
"Temperature",
min_value=0.0,
max_value=1.0,
value=0.9,
step=0.01,
help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results."
)
max_output_tokens = st.sidebar.slider(
"Token limit",
min_value=1,
max_value=2048,
value=3000,
step=1,
help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048."
)
st.sidebar.title("Model")
model_name = st.sidebar.selectbox(
"Select a model",
options=["gemini-pro", "gemini-pro-vision"],
index=0,
help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
)
model_info = st.sidebar.expander("Model info", expanded=False)
with model_info:
st.markdown(f"""
- Model name: {model_name}
- Model size: {genai.get_model_size(model_name)}
- Model description: {genai.get_model_description(model_name)}
""")
# Chat history
st.title("Chatbot")
chat_history = st.session_state.get("chat_history", [])
if len(chat_history) % 2 == 0:
role = "user"
else:
role = "model"
for message in chat_history:
r, t = message["role"], message["parts"][0]["text"]
st.markdown(f"**{r.title()}:** {t}")
# User input
user_input = st.text_area("", height=5, key="user_input")
# Image uploader
uploaded_files = st.image_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
# Run button
run_button = st.button("Run", key="run_button")
# Clear button
clear_button = st.button("Clear", key="clear_button")
# Download button
download_button = st.button("Download", key="download_button")
# Progress bar
progress_bar = st.progress(0)
# Footer
st.markdown("""
<style>
.footer {
position: fixed;
left: 0;
bottom: 0;
width: 100%;
background-color: #f9a01b;
color: white;
text-align: center;
}
</style>
<div class="footer">
<p>Made with Streamlit and Google Generative AI</p>
</div>
""", unsafe_allow_html=True)
# Clear chat history and image uploader
if clear_button:
chat_history.clear()
st.session_state["chat_history"] = chat_history
st.session_state["user_input"] = ""
st.session_state["uploaded_files"] = None
st.experimental_rerun()
# Save chat history to a text file
if download_button:
chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
st.download_button(
label="Download chat history",
data=chat_text,
file_name="chat_history.txt",
mime="text/plain"
)
# Generate model response
if run_button or user_input:
if user_input:
chat_history.append({"role": role, "parts": [{"text": user_input}]})
st.session_state["user_input"] = ""
if role == "user":
# Model code
model = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
safety_settings=safety_settings
)
if uploaded_files:
# Preprocess the uploaded images and convert them to image_parts
image_parts = []
for uploaded_file in uploaded_files:
image = Image.open(uploaded_file).convert('RGB')
image_parts.append({
"mime_type": uploaded_file.type,
"data": uploaded_file.read()
})
# Display the uploaded images
st.image(image)
# Add the user input to the prompt_parts
prompt_parts = [
user_input,
] + image_parts
# Use gemini-pro-vision model to generate the response
response = model.generate_content(prompt_parts, stream=True)
else:
# Use gemini-pro model to generate the response
response = model.generate_content(chat_history, stream=True)
# Streaming effect
chat_history.append({"role": "model", "parts": [{"text": ""}]})
progress_bar.progress(0)
for chunk in response:
for i in range(0, len(chunk.text), 10):
section = chunk.text[i:i + 10]
chat_history[-1]["parts"][0]["text"] += section
progress = min((i + 10) / len(chunk.text), 1.0)
progress_bar.progress(progress)
time.sleep(0.01)
st.experimental_rerun()
progress_bar.progress(1.0)
st.session_state["chat_history"] = chat_history
st.session_state["uploaded_files"] = None
st.experimental_rerun()