aiqcamp commited on
Commit
9cb71c2
·
verified ·
1 Parent(s): 5a1d31c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -26
app.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  from gradio import ChatMessage
4
  from typing import Iterator
5
  import google.generativeai as genai
 
6
 
7
  # get Gemini API Key from the environ variable
8
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
@@ -26,27 +27,62 @@ def format_chat_history(messages: list) -> list:
26
  })
27
  return formatted_history
28
 
29
- def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
30
  """
31
- Streams thoughts and response with conversation history support.
32
  """
33
- try:
34
- print(f"\n=== New Request ===")
 
 
 
 
35
  print(f"User message: {user_message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
 
37
  # Format chat history for Gemini
38
  chat_history = format_chat_history(messages)
39
 
40
  # Initialize Gemini chat
41
  chat = model.start_chat(history=chat_history)
42
- response = chat.send_message(user_message, stream=True)
43
 
44
- # Initialize buffers and flags
45
  thought_buffer = ""
46
  response_buffer = ""
47
  thinking_complete = False
48
 
49
- # Add initial thinking message
50
  messages.append(
51
  ChatMessage(
52
  role="assistant",
@@ -55,7 +91,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
55
  )
56
  )
57
 
58
- for chunk in response:
59
  parts = chunk.candidates[0].content.parts
60
  current_chunk = parts[0].text
61
 
@@ -103,6 +139,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
103
  content=thought_buffer,
104
  metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
105
  )
 
106
 
107
  yield messages
108
 
@@ -118,11 +155,11 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
118
  )
119
  yield messages
120
 
121
- def user_message(msg: str, history: list) -> tuple[str, list]:
122
  """Adds user message to chat history"""
123
- history.append(ChatMessage(role="user", content=msg))
124
- return "", history
125
-
126
 
127
  # Create the Gradio interface
128
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
@@ -130,7 +167,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
130
 
131
  chatbot = gr.Chatbot(
132
  type="messages",
133
- label="Gemini2.0 'Thinking' Chatbot",
134
  render_markdown=True,
135
  scale=1,
136
  avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
@@ -141,8 +178,9 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
141
  lines=1,
142
  label="Chat Message",
143
  placeholder="Type your message here...",
144
- scale=4
145
  )
 
146
 
147
  clear_button = gr.Button("Clear Chat", scale=1)
148
 
@@ -157,7 +195,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
157
 
158
  gr.Examples(
159
  examples=example_prompts,
160
- inputs=input_box,
161
  label="Examples: Get Gemini to show its thinking process with these prompts!",
162
  examples_per_page=5 # Adjust as needed
163
  )
@@ -166,22 +204,30 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
166
  # Set up event handlers
167
  msg_store = gr.State("") # Store for preserving user message
168
 
 
169
  input_box.submit(
170
- lambda msg: (msg, msg, ""), # Store message and clear input
171
- inputs=[input_box],
172
- outputs=[msg_store, input_box, input_box],
173
  queue=False
174
  ).then(
175
- user_message, # Add user message to chat
176
- inputs=[msg_store, chatbot],
177
- outputs=[input_box, chatbot],
 
 
 
 
 
 
178
  queue=False
179
  ).then(
180
- stream_gemini_response, # Generate and stream response
181
- inputs=[msg_store, chatbot],
182
  outputs=chatbot
183
  )
184
 
 
185
  clear_button.click(
186
  lambda: ([], "", ""),
187
  outputs=[chatbot, input_box, msg_store],
@@ -202,14 +248,15 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
202
  * Powered by Google's **Gemini 2.0 Flash** model.
203
  * Shows the model's **thoughts** before the final answer (experimental feature).
204
  * Supports **conversation history** for multi-turn chats.
 
205
  * Uses **streaming** for a more interactive experience.
206
  **Instructions:**
207
- 1. Type your message in the input box below or select an example.
208
- 2. Press Enter or click Submit to send.
209
  3. Observe the chatbot's "Thinking" process followed by the final response.
210
  4. Use the "Clear Chat" button to start a new conversation.
211
 
212
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
213
  """
214
  )
215
 
 
3
  from gradio import ChatMessage
4
  from typing import Iterator
5
  import google.generativeai as genai
6
+ import time # Import time module for potential debugging/delay
7
 
8
  # get Gemini API Key from the environ variable
9
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
 
27
  })
28
  return formatted_history
29
 
30
+ def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]:
31
  """
32
+ Streams thoughts and response with conversation history support, handling text or file input.
33
  """
34
+ user_message = ""
35
+ input_file = None
36
+
37
+ if isinstance(message_input, str):
38
+ user_message = message_input
39
+ print(f"\n=== New Request (Text) ===")
40
  print(f"User message: {user_message}")
41
+ if not user_message.strip(): # Robust check: if text message is empty or whitespace
42
+ messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message or upload a file.")) # More specific message
43
+ yield messages
44
+ return
45
+
46
+ elif isinstance(message_input, gr.File): #gr.File directly should be used with newer gradio versions (v4+)
47
+ input_file = message_input.name # Access the temporary file path
48
+ file_type = message_input.original_name.split('.')[-1].lower() #Get original filename's extension
49
+ print(f"\n=== New Request (File) ===")
50
+ print(f"File uploaded: {input_file}, type: {file_type}")
51
+
52
+ try:
53
+ with open(input_file, "rb") as f: #Open file in binary mode for universal handling
54
+ file_data = f.read()
55
+
56
+ if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed
57
+ user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini
58
+ elif file_type == 'csv':
59
+ user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part
60
+
61
+ except Exception as e:
62
+ print(f"Error reading file: {e}")
63
+ messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}"))
64
+ yield messages
65
+ return
66
+ else:
67
+ messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format. Please use text or upload a valid file.")) # More informative error
68
+ yield messages
69
+ return
70
+
71
 
72
+ try:
73
  # Format chat history for Gemini
74
  chat_history = format_chat_history(messages)
75
 
76
  # Initialize Gemini chat
77
  chat = model.start_chat(history=chat_history)
78
+ response = chat.send_message(user_message, stream=True) #Send the message part as is
79
 
80
+ # Initialize buffers and flags - same as before
81
  thought_buffer = ""
82
  response_buffer = ""
83
  thinking_complete = False
84
 
85
+ # Add initial thinking message - same as before
86
  messages.append(
87
  ChatMessage(
88
  role="assistant",
 
91
  )
92
  )
93
 
94
+ for chunk in response: #streaming logic - same as before
95
  parts = chunk.candidates[0].content.parts
96
  current_chunk = parts[0].text
97
 
 
139
  content=thought_buffer,
140
  metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
141
  )
142
+ #time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
143
 
144
  yield messages
145
 
 
155
  )
156
  yield messages
157
 
158
+ def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]:
159
  """Adds user message to chat history"""
160
+ msg = message_text if message_text else file_upload
161
+ history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history.
162
+ return "", None, history #clear both input fields
163
 
164
  # Create the Gradio interface
165
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
 
167
 
168
  chatbot = gr.Chatbot(
169
  type="messages",
170
+ label="Gemini2.0 'Thinking' Chatbot (Streaming Output)", #Label now indicates streaming
171
  render_markdown=True,
172
  scale=1,
173
  avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
 
178
  lines=1,
179
  label="Chat Message",
180
  placeholder="Type your message here...",
181
+ scale=3
182
  )
183
+ file_upload = gr.File(label="Upload File", file_types=["image", ".csv"], scale=2) # Allow image and CSV files
184
 
185
  clear_button = gr.Button("Clear Chat", scale=1)
186
 
 
195
 
196
  gr.Examples(
197
  examples=example_prompts,
198
+ inputs=[input_box],
199
  label="Examples: Get Gemini to show its thinking process with these prompts!",
200
  examples_per_page=5 # Adjust as needed
201
  )
 
204
  # Set up event handlers
205
  msg_store = gr.State("") # Store for preserving user message
206
 
207
+
208
  input_box.submit(
209
+ user_message,
210
+ inputs=[input_box, file_upload, chatbot],
211
+ outputs=[input_box, file_upload, chatbot],
212
  queue=False
213
  ).then(
214
+ stream_gemini_response,
215
+ inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response
216
+ outputs=chatbot
217
+ )
218
+
219
+ file_upload.upload(
220
+ user_message,
221
+ inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work
222
+ outputs=[input_box, file_upload, chatbot],
223
  queue=False
224
  ).then(
225
+ stream_gemini_response,
226
+ inputs=[file_upload, chatbot], # Input is now the uploaded file.
227
  outputs=chatbot
228
  )
229
 
230
+
231
  clear_button.click(
232
  lambda: ([], "", ""),
233
  outputs=[chatbot, input_box, msg_store],
 
248
  * Powered by Google's **Gemini 2.0 Flash** model.
249
  * Shows the model's **thoughts** before the final answer (experimental feature).
250
  * Supports **conversation history** for multi-turn chats.
251
+ * Supports **Image and CSV file uploads** for analysis.
252
  * Uses **streaming** for a more interactive experience.
253
  **Instructions:**
254
+ 1. Type your message in the input box or Upload a file below.
255
+ 2. Press Enter/Submit or Upload to send.
256
  3. Observe the chatbot's "Thinking" process followed by the final response.
257
  4. Use the "Clear Chat" button to start a new conversation.
258
 
259
+ *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features.
260
  """
261
  )
262