ziyadsuper2017 commited on
Commit
d1f86df
·
1 Parent(s): b83b86b

Major changes

Browse files
Files changed (1) hide show
  1. app.py +173 -70
app.py CHANGED
@@ -1,7 +1,11 @@
1
- import streamlit as st
 
 
 
 
2
  import google.generativeai as genai
3
- import sqlite3
4
- from streamlit import file_uploader
5
 
6
  # Database setup
7
  conn = sqlite3.connect('chat_history.db')
@@ -24,8 +28,65 @@ generation_config = {
24
  safety_settings = []
25
 
26
  # Streamlit UI
27
- st.title("Chatbot")
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  chat_history = st.session_state.get("chat_history", [])
30
 
31
  if len(chat_history) % 2 == 0:
@@ -37,69 +98,111 @@ for message in chat_history:
37
  r, t = message["role"], message["parts"][0]["text"]
38
  st.markdown(f"**{r.title()}:** {t}")
39
 
40
- # Use text_area for multiline input
41
- user_input = st.text_area("", height=5)
42
- if user_input:
43
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
44
- if role == "user":
45
-
46
- # Model code
47
- model_name = "gemini-pro"
48
- model = genai.GenerativeModel(
49
- model_name=model_name,
50
- generation_config=generation_config,
51
- safety_settings=safety_settings
52
- )
53
-
54
- response = model.generate_content(chat_history)
55
- response_text = response.text
56
- chat_history.append({"role": "model", "parts": [{"text": response_text}]})
57
-
58
- st.session_state["chat_history"] = chat_history
59
-
60
- for message in chat_history:
61
- r, t = message["role"], message["parts"][0]["text"]
62
- st.markdown(f"**{r.title()}:** {t}")
63
- if st.button("Display History"):
64
- c.execute("SELECT * FROM history")
65
- rows = c.fetchall()
66
-
67
- for row in rows:
68
- st.markdown(f"**{row[0].title()}:** {row[1]}")
69
-
70
- # Save chat history to database
71
- for message in chat_history:
72
- c.execute("INSERT INTO history VALUES (?, ?)",
73
- (message["role"], message["parts"][0]["text"]))
74
- conn.commit()
75
-
76
- conn.close()
77
-
78
- # Separate section for image uploading and description generation
79
- st.title("Image Description Generator")
80
-
81
- uploaded_file = st.image_uploader("Upload an image here or paste a screenshot", type=["png", "jpg", "jpeg"])
82
-
83
- image_question = st.text_input("Ask something about the image:")
84
-
85
- if uploaded_file and image_question:
86
- image_parts = [
87
- {
88
- "mime_type": uploaded_file.type,
89
- "data": uploaded_file.read()
90
- },
91
- ]
92
-
93
- prompt_parts = [
94
- image_question,
95
- image_parts[0],
96
- ]
97
-
98
- model = genai.GenerativeModel(
99
- model_name="gemini-pro-vision",
100
- generation_config=generation_config,
101
- safety_settings=safety_settings
102
- )
103
-
104
- response = model.generate_content(prompt_parts)
105
- st.markdown(f"**Model's answer:** {response.text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import uuid
4
+ from typing import List, Tuple, Optional, Dict, Union
5
+
6
  import google.generativeai as genai
7
+ import streamlit as st
8
+ from PIL import Image
9
 
10
  # Database setup
11
  conn = sqlite3.connect('chat_history.db')
 
28
  safety_settings = []
29
 
30
  # Streamlit UI
31
+ st.set_page_config(page_title="Chatbot", page_icon="🤖")
32
 
33
+ # Header
34
+ st.markdown("""
35
+ <style>
36
+ .container {
37
+ display: flex;
38
+ }
39
+ .logo-text {
40
+ font-weight:700 !important;
41
+ font-size:50px !important;
42
+ color: #f9a01b !important;
43
+ padding-top: 75px !important;
44
+ }
45
+ .logo-img {
46
+ float:right;
47
+ }
48
+ </style>
49
+ <div class="container">
50
+ <p class="logo-text">Chatbot</p>
51
+ <img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120>
52
+ </div>
53
+ """, unsafe_allow_html=True)
54
+
55
+ # Sidebar
56
+ st.sidebar.title("Parameters")
57
+ temperature = st.sidebar.slider(
58
+ "Temperature",
59
+ min_value=0.0,
60
+ max_value=1.0,
61
+ value=0.9,
62
+ step=0.01,
63
+ help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results."
64
+ )
65
+ max_output_tokens = st.sidebar.slider(
66
+ "Token limit",
67
+ min_value=1,
68
+ max_value=2048,
69
+ value=3000,
70
+ step=1,
71
+ help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048."
72
+ )
73
+ st.sidebar.title("Model")
74
+ model_name = st.sidebar.selectbox(
75
+ "Select a model",
76
+ options=["gemini-pro", "gemini-pro-vision"],
77
+ index=0,
78
+ help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
79
+ )
80
+ model_info = st.sidebar.expander("Model info", expanded=False)
81
+ with model_info:
82
+ st.markdown(f"""
83
+ - Model name: {model_name}
84
+ - Model size: {genai.get_model_size(model_name)}
85
+ - Model description: {genai.get_model_description(model_name)}
86
+ """)
87
+
88
+ # Chat history
89
+ st.title("Chatbot")
90
  chat_history = st.session_state.get("chat_history", [])
91
 
92
  if len(chat_history) % 2 == 0:
 
98
  r, t = message["role"], message["parts"][0]["text"]
99
  st.markdown(f"**{r.title()}:** {t}")
100
 
101
+ # User input
102
+ user_input = st.text_area("", height=5, key="user_input")
103
+
104
+ # Image uploader
105
+ uploaded_files = st.image_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
106
+
107
+ # Run button
108
+ run_button = st.button("Run", key="run_button")
109
+
110
+ # Clear button
111
+ clear_button = st.button("Clear", key="clear_button")
112
+
113
+ # Download button
114
+ download_button = st.button("Download", key="download_button")
115
+
116
+ # Progress bar
117
+ progress_bar = st.progress(0)
118
+
119
+ # Footer
120
+ st.markdown("""
121
+ <style>
122
+ .footer {
123
+ position: fixed;
124
+ left: 0;
125
+ bottom: 0;
126
+ width: 100%;
127
+ background-color: #f9a01b;
128
+ color: white;
129
+ text-align: center;
130
+ }
131
+ </style>
132
+ <div class="footer">
133
+ <p>Made with Streamlit and Google Generative AI</p>
134
+ </div>
135
+ """, unsafe_allow_html=True)
136
+
137
+ # Clear chat history and image uploader
138
+ if clear_button:
139
+ chat_history.clear()
140
+ st.session_state["chat_history"] = chat_history
141
+ st.session_state["user_input"] = ""
142
+ st.session_state["uploaded_files"] = None
143
+ st.experimental_rerun()
144
+
145
+ # Save chat history to a text file
146
+ if download_button:
147
+ chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
148
+ st.download_button(
149
+ label="Download chat history",
150
+ data=chat_text,
151
+ file_name="chat_history.txt",
152
+ mime="text/plain"
153
+ )
154
+
155
+ # Generate model response
156
+ if run_button or user_input:
157
+ if user_input:
158
+ chat_history.append({"role": role, "parts": [{"text": user_input}]})
159
+ st.session_state["user_input"] = ""
160
+ if role == "user":
161
+
162
+ # Model code
163
+ model = genai.GenerativeModel(
164
+ model_name=model_name,
165
+ generation_config=generation_config,
166
+ safety_settings=safety_settings
167
+ )
168
+
169
+ if uploaded_files:
170
+ # Preprocess the uploaded images and convert them to image_parts
171
+ image_parts = []
172
+ for uploaded_file in uploaded_files:
173
+ image = Image.open(uploaded_file).convert('RGB')
174
+ image_parts.append({
175
+ "mime_type": uploaded_file.type,
176
+ "data": uploaded_file.read()
177
+ })
178
+ # Display the uploaded images
179
+ st.image(image)
180
+
181
+ # Add the user input to the prompt_parts
182
+ prompt_parts = [
183
+ user_input,
184
+ ] + image_parts
185
+
186
+ # Use gemini-pro-vision model to generate the response
187
+ response = model.generate_content(prompt_parts, stream=True)
188
+ else:
189
+ # Use gemini-pro model to generate the response
190
+ response = model.generate_content(chat_history, stream=True)
191
+
192
+ # Streaming effect
193
+ chat_history.append({"role": "model", "parts": [{"text": ""}]})
194
+ progress_bar.progress(0)
195
+ for chunk in response:
196
+ for i in range(0, len(chunk.text), 10):
197
+ section = chunk.text[i:i + 10]
198
+ chat_history[-1]["parts"][0]["text"] += section
199
+ progress = min((i + 10) / len(chunk.text), 1.0)
200
+ progress_bar.progress(progress)
201
+ time.sleep(0.01)
202
+ st.experimental_rerun()
203
+ progress_bar.progress(1.0)
204
+
205
+ st.session_state["chat_history"] = chat_history
206
+ st.session_state["uploaded_files"] = None
207
+
208
+ st.experimental_rerun()