Liu Hong Yuan Tom commited on
Commit
e54f8ed
1 Parent(s): 9c1ce00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -5
app.py CHANGED
@@ -85,6 +85,63 @@ def user(text_prompt: str, chatbot: CHAT_HISTORY):
85
  return "", chatbot
86
 
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  def bot(
89
  google_key: str,
90
  model_name: str,
@@ -134,11 +191,23 @@ def bot(
134
  # streaming effect
135
  chatbot[-1][1] = ""
136
  for chunk in response:
137
- for i in range(0, len(chunk.text), 10):
138
- section = chunk.text[i:i + 10]
139
- chatbot[-1][1] += section
140
- time.sleep(0.01)
141
- yield chatbot
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  model_selection = gr.Dropdown(
144
  ["gemini-1.5-flash",
 
85
  return "", chatbot
86
 
87
 
88
+ # def bot(
89
+ # google_key: str,
90
+ # model_name: str,
91
+ # files: Optional[List[str]],
92
+ # temperature: float,
93
+ # max_output_tokens: int,
94
+ # stop_sequences: str,
95
+ # top_k: int,
96
+ # top_p: float,
97
+ # chatbot: CHAT_HISTORY
98
+ # ):
99
+ # if len(chatbot) == 0:
100
+ # return chatbot
101
+
102
+ # google_key = google_key if google_key else GOOGLE_API_KEY
103
+ # if not google_key:
104
+ # raise ValueError(
105
+ # "GOOGLE_API_KEY is not set. "
106
+ # "Please follow the instructions in the README to set it up.")
107
+
108
+ # genai.configure(api_key=google_key)
109
+ # generation_config = genai.types.GenerationConfig(
110
+ # temperature=temperature,
111
+ # max_output_tokens=max_output_tokens,
112
+ # stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
113
+ # top_k=top_k,
114
+ # top_p=top_p)
115
+
116
+ # if files:
117
+ # text_prompt = [chatbot[-1][0]] \
118
+ # if chatbot[-1][0] and isinstance(chatbot[-1][0], str) \
119
+ # else []
120
+ # image_prompt = [Image.open(file).convert('RGB') for file in files]
121
+ # model = genai.GenerativeModel(model_name)
122
+ # response = model.generate_content(
123
+ # text_prompt + image_prompt,
124
+ # stream=True,
125
+ # generation_config=generation_config)
126
+ # else:
127
+ # messages = preprocess_chat_history(chatbot)
128
+ # model = genai.GenerativeModel(model_name)
129
+ # response = model.generate_content(
130
+ # messages,
131
+ # stream=True,
132
+ # generation_config=generation_config)
133
+
134
+ # # streaming effect
135
+ # chatbot[-1][1] = ""
136
+ # for chunk in response:
137
+ # for i in range(0, len(chunk.text), 10):
138
+ # section = chunk.text[i:i + 10]
139
+ # chatbot[-1][1] += section
140
+ # time.sleep(0.01)
141
+ # yield chatbot
142
+
143
+ # -------------------------------------------------------------------
144
+
145
  def bot(
146
  google_key: str,
147
  model_name: str,
 
191
  # streaming effect
192
  chatbot[-1][1] = ""
193
  for chunk in response:
194
+ if not chunk.text:
195
+ print("chunk.text is empty")
196
+ continue
197
+
198
+ print(f"chunk.text: {chunk.text}")
199
+
200
+ try:
201
+ for i in range(0, len(chunk.text), 10):
202
+ section = chunk.text[i:i + 10]
203
+ chatbot[-1][1] += section
204
+ time.sleep(0.01)
205
+ yield chatbot
206
+ except IndexError as e:
207
+ print(f"IndexError: {e}")
208
+ # Handle the error appropriately
209
+
210
+ # -------------------------------------------------------------------
211
 
212
  model_selection = gr.Dropdown(
213
  ["gemini-1.5-flash",