haydenk commited on
Commit
1b4d595
·
verified ·
1 Parent(s): 86aba3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -89
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  # import required packages
3
  import google.generativeai as genai
4
  import os
@@ -8,14 +7,8 @@ from gradio_multimodalchatbot import MultimodalChatbot
8
  from gradio.data_classes import FileData
9
 
10
  # For better security practices, retrieve sensitive information like API keys from environment variables.
11
- import google.generativeai as genai
12
- import os
13
- import PIL.Image
14
- import gradio as gr
15
- from gradio_multimodalchatbot import MultimodalChatbot
16
- from gradio.data_classes import FileData
17
 
18
- # Retrieve API key from environment variable for security
19
  GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
20
  genai.configure(api_key=GOOGLE_API_KEY)
21
 
@@ -31,87 +24,84 @@ model = genai.GenerativeModel('gemini-pro')
31
  modelvis = genai.GenerativeModel('gemini-pro-vision')
32
 
33
  def gemini(input, file, chatbot=[]):
34
- """
35
- Function to handle gemini model and gemini vision model interactions.
36
-
37
- Parameters:
38
- input (str): The input text.
39
- file (File): An optional file object for image processing.
40
- chatbot (list): A list to keep track of chatbot interactions.
41
-
42
- Returns:
43
- tuple: Updated chatbot interaction list, an empty string, and None.
44
- """
45
-
46
- messages = []
47
- print(chatbot)
48
-
49
- # Process previous chatbot messages if present
50
- if len(chatbot) != 0:
51
- for user, bot in chatbot:
52
- user, bot = user.text, bot.text
53
- messages.extend([
54
- {'role': 'user', 'parts': [user]},
55
- {'role': 'model', 'parts': [bot]}
56
- ])
57
- messages.append({'role': 'user', 'parts': [input]})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  else:
59
- messages.append({'role': 'user', 'parts': [input]})
60
-
61
- try:
62
- # Process image if file is provided
63
- if file is not None:
64
- with PIL.Image.open(file.name) as img:
65
- message = [{'role': 'user', 'parts': [input, img]}]
66
- response = modelvis.generate_content(message)
67
- gemini_video_resp = response.text
68
- messages.append({'role': 'model', 'parts': [gemini_video_resp]})
69
-
70
- # Construct list of messages in the required format
71
- user_msg = {"text": input, "files": [{"file": FileData(path=file.name)}]}
72
- bot_msg = {"text": gemini_video_resp, "files": []}
73
- chatbot.append([user_msg, bot_msg])
74
- else:
75
- response = model.generate_content(messages)
76
- gemini_resp = response.text
77
-
78
- # Construct list of messages in the required format
79
- user_msg = {"text": input, "files": []}
80
- bot_msg = {"text": gemini_resp, "files": []}
81
- chatbot.append([user_msg, bot_msg])
82
- except Exception as e:
83
- # Handling exceptions and raising error to the modal
84
- print(f"An error occurred: {e}")
85
- raise gr.Error(e)
86
-
87
- return chatbot, "", None
88
-
89
- # Custom theme with flexible height for the embedded Gradio component
90
- custom_theme = gr.themes.Default().add_class(
91
- "gradio-app",
92
- "height: 100vh; /* or vh for viewport height */"
93
- )
94
-
95
- # Define the Gradio Blocks interface with flexible component heights
96
- with gr.Blocks(theme=custom_theme) as demo:
97
-
98
- # Initialize the MultimodalChatbot component
99
- multi = MultimodalChatbot(value=[], height=250) # Adjust height as needed
100
-
101
- with gr.Row():
102
- # Textbox for user input with increased scale for better visibility
103
- tb = gr.Textbox(scale=4, placeholder='Message CortexChatV...', height=60) # Adjust height
104
- # Upload button for image files
105
- up = gr.UploadButton("Attach File", file_types=["image"], scale=1, height=40) # Adjust height
106
-
107
- # Define the behavior on text submission
108
- tb.submit(gemini, [tb, up, multi], [multi, tb, up])
109
-
110
- # Define the behavior on image upload
111
- # Using chained then() calls to update the upload button's state
112
- up.upload(lambda: gr.UploadButton("Uploading Image..."), [], up) \
113
- .then(lambda: gr.UploadButton("Image Uploaded"), [], up) \
114
- .then(lambda: gr.UploadButton("Upload Image"), [], up)
115
 
116
  # Launch the demo with a queue to handle multiple users
117
- demo.queue().launch()
 
 
1
  # import required packages
2
  import google.generativeai as genai
3
  import os
 
7
  from gradio.data_classes import FileData
8
 
9
  # For better security practices, retrieve sensitive information like API keys from environment variables.
 
 
 
 
 
 
10
 
11
+ # Fetch an environment variable.
12
  GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
13
  genai.configure(api_key=GOOGLE_API_KEY)
14
 
 
24
  modelvis = genai.GenerativeModel('gemini-pro-vision')
25
 
26
  def gemini(input, file, chatbot=[]):
27
+ """
28
+ Function to handle gemini model and gemini vision model interactions.
29
+
30
+ Parameters:
31
+ input (str): The input text.
32
+ file (File): An optional file object for image processing.
33
+ chatbot (list): A list to keep track of chatbot interactions.
34
+
35
+ Returns:
36
+ tuple: Updated chatbot interaction list, an empty string, and None.
37
+ """
38
+
39
+ messages = []
40
+ print(chatbot)
41
+
42
+ # Process previous chatbot messages if present
43
+ if len(chatbot) != 0:
44
+ for user, bot in chatbot:
45
+ user, bot = user.text, bot.text
46
+ messages.extend([
47
+ {'role': 'user', 'parts': [user]},
48
+ {'role': 'model', 'parts': [bot]}
49
+ ])
50
+ messages.append({'role': 'user', 'parts': [input]})
51
+ else:
52
+ messages.append({'role': 'user', 'parts': [input]})
53
+
54
+ try:
55
+ # Process image if file is provided
56
+ if file is not None:
57
+ with PIL.Image.open(file.name) as img:
58
+ message = [{'role': 'user', 'parts': [input, img]}]
59
+ response = modelvis.generate_content(message)
60
+ gemini_video_resp = response.text
61
+ messages.append({'role': 'model', 'parts': [gemini_video_resp]})
62
+
63
+ # Construct list of messages in the required format
64
+ user_msg = {"text": input, "files": [{"file": FileData(path=file.name)}]}
65
+ bot_msg = {"text": gemini_video_resp, "files": []}
66
+ chatbot.append([user_msg, bot_msg])
67
  else:
68
+ response = model.generate_content(messages)
69
+ gemini_resp = response.text
70
+
71
+ # Construct list of messages in the required format
72
+ user_msg = {"text": input, "files": []}
73
+ bot_msg = {"text": gemini_resp, "files": []}
74
+ chatbot.append([user_msg, bot_msg])
75
+ except Exception as e:
76
+ # Handling exceptions and raising error to the modal
77
+ print(f"An error occurred: {e}")
78
+ raise gr.Error(e)
79
+
80
+ return chatbot, "", None
81
+
82
+ # Define the Gradio Blocks interface
83
+ with gr.Blocks() as demo:
84
+ # # Add a centered header using HTML
85
+ # gr.HTML("<center><h1>Gemini-PRO & Gemini-PRO-Vision API</h1></center>")
86
+
87
+ # Initialize the MultimodalChatbot component
88
+ multi = MultimodalChatbot(value=[], height=800)
89
+
90
+ with gr.Row():
91
+ # Textbox for user input with increased scale for better visibility
92
+ tb = gr.Textbox(scale=4, placeholder='Message CortexChatV...')
93
+
94
+ # Upload button for image files
95
+ up = gr.UploadButton("Attach File", file_types=["image"], scale=1)
96
+
97
+ # Define the behavior on text submission
98
+ tb.submit(gemini, [tb, up, multi], [multi, tb, up])
99
+
100
+ # Define the behavior on image upload
101
+ # Using chained then() calls to update the upload button's state
102
+ up.upload(lambda: gr.UploadButton("Uploading Image..."), [], up) \
103
+ .then(lambda: gr.UploadButton("Image Uploaded"), [], up) \
104
+ .then(lambda: gr.UploadButton("Upload Image"), [], up)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  # Launch the demo with a queue to handle multiple users
107
+ demo.queue().launch()