hsienchen commited on
Commit
2240977
·
verified ·
1 Parent(s): 7eb0c33

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -234
app.py DELETED
@@ -1,234 +0,0 @@
1
- import os
2
- import time
3
- import uuid
4
- from typing import List, Tuple, Optional, Dict, Union
5
-
6
- import google.generativeai as genai
7
- import gradio as gr
8
- from PIL import Image
9
-
10
- print("google-generativeai:", genai.__version__)
11
-
12
- GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
13
-
14
- TITLE = """<h1 align="center">My Own Playground 💬</h1>"""
15
- SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision API</h2>"""
16
- DUPLICATE = """
17
- <div style="text-align: center; display: flex; justify-content: center; align-items: center;">
18
- <a href="https://huggingface.co/spaces/SkalskiP/ChatGemini?duplicate=true">
19
- <img src="https://bit.ly/3gLdBN6" alt="Duplicate Space" style="margin-right: 10px;">
20
- </a>
21
- <span>Duplicate the Space and run securely with your
22
- <a href="https://makersuite.google.com/app/apikey">GOOGLE API KEY</a>.
23
- </span>
24
- </div>
25
- """
26
-
27
- AVATAR_IMAGES = (
28
- None,
29
- "https://media.roboflow.com/spaces/gemini-icon.png"
30
- )
31
-
32
- IMAGE_CACHE_DIRECTORY = "/tmp"
33
- IMAGE_WIDTH = 512
34
- CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
35
-
36
-
37
- def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
38
- if not stop_sequences:
39
- return None
40
- return [sequence.strip() for sequence in stop_sequences.split(",")]
41
-
42
-
43
- def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
44
- image_height = int(image.height * IMAGE_WIDTH / image.width)
45
- return image.resize((IMAGE_WIDTH, image_height))
46
-
47
-
48
- def cache_pil_image(image: Image.Image) -> str:
49
- image_filename = f"{uuid.uuid4()}.jpeg"
50
- os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
51
- image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
52
- image.save(image_path, "JPEG")
53
- return image_path
54
-
55
-
56
- def preprocess_chat_history(
57
- history: CHAT_HISTORY
58
- ) -> List[Dict[str, Union[str, List[str]]]]:
59
- messages = []
60
- for user_message, model_message in history:
61
- if isinstance(user_message, tuple):
62
- pass
63
- elif user_message is not None:
64
- messages.append({'role': 'user', 'parts': [user_message]})
65
- if model_message is not None:
66
- messages.append({'role': 'model', 'parts': [model_message]})
67
- return messages
68
-
69
-
70
- def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
71
- for file in files:
72
- image = Image.open(file).convert('RGB')
73
- image = preprocess_image(image)
74
- image_path = cache_pil_image(image)
75
- chatbot.append(((image_path,), None))
76
- return chatbot
77
-
78
-
79
- def user(text_prompt: str, chatbot: CHAT_HISTORY):
80
- if text_prompt:
81
- chatbot.append((text_prompt, None))
82
- return "", chatbot
83
-
84
-
85
- def bot(
86
- google_key: str,
87
- files: Optional[List[str]],
88
- temperature: float,
89
- max_output_tokens: int,
90
- stop_sequences: str,
91
- top_k: int,
92
- top_p: float,
93
- chatbot: CHAT_HISTORY
94
- ):
95
- if len(chatbot) == 0:
96
- return chatbot
97
-
98
- google_key = google_key if google_key else GOOGLE_API_KEY
99
- if not google_key:
100
- raise ValueError(
101
- "GOOGLE_API_KEY is not set. "
102
- "Please follow the instructions in the README to set it up.")
103
-
104
- genai.configure(api_key=google_key)
105
- generation_config = genai.types.GenerationConfig(
106
- temperature=temperature,
107
- max_output_tokens=max_output_tokens,
108
- stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
109
- top_k=top_k,
110
- top_p=top_p)
111
-
112
- if files:
113
- text_prompt = [chatbot[-1][0]] \
114
- if chatbot[-1][0] and isinstance(chatbot[-1][0], str) \
115
- else []
116
- image_prompt = [Image.open(file).convert('RGB') for file in files]
117
- model = genai.GenerativeModel('gemini-pro-vision')
118
- response = model.generate_content(
119
- text_prompt + image_prompt,
120
- stream=True,
121
- generation_config=generation_config)
122
- else:
123
- messages = preprocess_chat_history(chatbot)
124
- model = genai.GenerativeModel('gemini-pro')
125
- response = model.generate_content(
126
- messages,
127
- stream=True,
128
- generation_config=generation_config)
129
-
130
- # streaming effect
131
- chatbot[-1][1] = ""
132
- for chunk in response:
133
- for i in range(0, len(chunk.text), 10):
134
- section = chunk.text[i:i + 10]
135
- chatbot[-1][1] += section
136
- time.sleep(0.01)
137
- yield chatbot
138
-
139
-
140
- google_key_component = gr.Textbox(
141
- label="GOOGLE API KEY",
142
- value="",
143
- type="password",
144
- placeholder="...",
145
- info="You have to provide your own GOOGLE_API_KEY for this app to function properly",
146
- visible=GOOGLE_API_KEY is None
147
- )
148
- chatbot_component = gr.Chatbot(
149
- label='Gemini',
150
- bubble_full_width=False,
151
- avatar_images=AVATAR_IMAGES,
152
- scale=2,
153
- height=400
154
- )
155
- text_prompt_component = gr.Textbox(
156
- placeholder="Hi there! [press Enter]", show_label=False, autofocus=True, scale=8
157
- )
158
- upload_button_component = gr.UploadButton(
159
- label="Upload Images", file_count="multiple", file_types=["image"], scale=1
160
- )
161
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
162
- temperature_component = gr.Slider(
163
- )
164
- max_output_tokens_component = gr.Slider(
165
- )
166
- stop_sequences_component = gr.Textbox(
167
- label="Add stop sequence",
168
- value="",
169
- type="text",
170
- placeholder="STOP, END",
171
- info=(
172
- "A stop sequence is a series of characters (including spaces) that stops "
173
- "response generation if the model encounters it. The sequence is not included "
174
- "as part of the response. You can add up to five stop sequences."
175
- ))
176
- top_k_component = gr.Slider(
177
- )
178
- top_p_component = gr.Slider(
179
- )
180
-
181
- user_inputs = [
182
- text_prompt_component,
183
- chatbot_component
184
- ]
185
-
186
- bot_inputs = [
187
- google_key_component,
188
- upload_button_component,
189
- temperature_component,
190
- max_output_tokens_component,
191
- stop_sequences_component,
192
- top_k_component,
193
- top_p_component,
194
- chatbot_component
195
- ]
196
-
197
- with gr.Blocks() as demo:
198
- gr.HTML(TITLE)
199
- gr.HTML(SUBTITLE)
200
- gr.HTML(DUPLICATE)
201
- with gr.Column():
202
- google_key_component.render()
203
- chatbot_component.render()
204
- with gr.Row():
205
- text_prompt_component.render()
206
- upload_button_component.render()
207
- run_button_component.render()
208
-
209
- run_button_component.click(
210
- fn=user,
211
- inputs=user_inputs,
212
- outputs=[text_prompt_component, chatbot_component],
213
- queue=False
214
- ).then(
215
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
216
- )
217
-
218
- text_prompt_component.submit(
219
- fn=user,
220
- inputs=user_inputs,
221
- outputs=[text_prompt_component, chatbot_component],
222
- queue=False
223
- ).then(
224
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
225
- )
226
-
227
- upload_button_component.upload(
228
- fn=upload,
229
- inputs=[upload_button_component, chatbot_component],
230
- outputs=[chatbot_component],
231
- queue=False
232
- )
233
-
234
- demo.queue(max_size=99).launch(debug=False, show_error=True)