Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
import random
|
@@ -6,7 +7,6 @@ import logging
|
|
6 |
from typing import Iterator
|
7 |
|
8 |
import google.generativeai as genai
|
9 |
-
from gradio import ChatMessage # ChatMessage ꡬ쑰 μ¬μ© (Thinking/Response κ΅¬λΆ κ°λ₯)
|
10 |
|
11 |
logging.basicConfig(
|
12 |
level=logging.INFO,
|
@@ -18,17 +18,14 @@ logging.basicConfig(
|
|
18 |
)
|
19 |
logger = logging.getLogger("idea_generator")
|
20 |
|
21 |
-
# Gemini API
|
22 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
23 |
genai.configure(api_key=GEMINI_API_KEY)
|
24 |
|
25 |
-
# μ¬μ©ν Gemini
|
26 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
27 |
|
28 |
-
|
29 |
-
##############################################################################
|
30 |
-
# λ³ν λ¬Έμμ΄μμ μ¬λμ("/")λ‘ κ΅¬λΆλ λ μ΅μ
μ€ νλ μ ν
|
31 |
-
##############################################################################
|
32 |
def choose_alternative(transformation):
|
33 |
if "/" not in transformation:
|
34 |
return transformation
|
@@ -51,6 +48,7 @@ def choose_alternative(transformation):
|
|
51 |
return random.choice([left, right])
|
52 |
|
53 |
|
|
|
54 |
##############################################################################
|
55 |
# μΉ΄ν
κ³ λ¦¬ μ¬μ
|
56 |
# (μλ μμμμλ λͺ¨λ μΉ΄ν
κ³ λ¦¬λ₯Ό ν¬ν¨μμΌ°μ§λ§, νμμ λ°λΌ λ²μλ₯Ό μ‘°μ νμΈμ.)
|
@@ -197,16 +195,14 @@ physical_transformation_categories = {
|
|
197 |
}
|
198 |
|
199 |
|
200 |
-
|
201 |
-
#
|
202 |
-
# - 'Thinking' λ¨κ³(μμ΄λμ΄ λ΄λΆ μΆλ‘ )μ μ΅μ’
'Response' λ¨κ³λ‘ ꡬμ±
|
203 |
-
##############################################################################
|
204 |
def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
205 |
"""
|
206 |
-
Gemini 2.0 Flash
|
207 |
-
|
208 |
"""
|
209 |
-
#
|
210 |
chat = model.start_chat(history=[])
|
211 |
response = chat.send_message(prompt, stream=True)
|
212 |
|
@@ -217,33 +213,32 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
|
217 |
for chunk in response:
|
218 |
parts = chunk.candidates[0].content.parts
|
219 |
|
220 |
-
# partsκ° 2κ°μ΄λ©΄ (0: Thinking, 1: Response μμ)
|
221 |
if len(parts) == 2 and not thinking_complete:
|
|
|
222 |
thought_buffer += parts[0].text
|
223 |
yield f"[Thinking Chunk] {parts[0].text}"
|
224 |
-
|
|
|
225 |
response_buffer = parts[1].text
|
226 |
yield f"[Response Start] {parts[1].text}"
|
227 |
-
|
228 |
thinking_complete = True
|
229 |
elif thinking_complete:
|
230 |
-
#
|
231 |
current_chunk = parts[0].text
|
232 |
response_buffer += current_chunk
|
233 |
yield current_chunk
|
234 |
else:
|
235 |
-
#
|
236 |
current_chunk = parts[0].text
|
237 |
thought_buffer += current_chunk
|
238 |
yield f"[Thinking Chunk] {current_chunk}"
|
239 |
|
240 |
-
#
|
241 |
yield f"\n[Final Response]\n{response_buffer}"
|
242 |
|
243 |
|
244 |
-
|
245 |
-
# μΉ΄ν
κ³ λ¦¬λ³ κ°λ¨ μ€λͺ
μ 'Thinking' + 'Response'λ‘ νμ₯ (μ€νΈλ¦¬λ°)
|
246 |
-
##############################################################################
|
247 |
def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
|
248 |
prompt = f"""
|
249 |
λ€μμ '{obj_name}'μ '{category}' κ΄λ ¨ κ°λ¨ν μ€λͺ
μ
λλ€:
|
@@ -257,9 +252,7 @@ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[st
|
|
257 |
yield chunk
|
258 |
|
259 |
|
260 |
-
|
261 |
-
# ν ν€μλ(μ€λΈμ νΈ)μ λν κΈ°λ³Έ μμ΄λμ΄(μΉ΄ν
κ³ λ¦¬λ³) μμ±
|
262 |
-
##############################################################################
|
263 |
def generate_single_object_transformations(obj):
|
264 |
results = {}
|
265 |
for category, transformations in physical_transformation_categories.items():
|
@@ -268,9 +261,7 @@ def generate_single_object_transformations(obj):
|
|
268 |
results[category] = {"base": base_description, "enhanced": ""}
|
269 |
return results
|
270 |
|
271 |
-
|
272 |
-
# 2κ° ν€μλ μνΈμμ©
|
273 |
-
##############################################################################
|
274 |
def generate_two_objects_interaction(obj1, obj2):
|
275 |
results = {}
|
276 |
for category, transformations in physical_transformation_categories.items():
|
@@ -283,9 +274,7 @@ def generate_two_objects_interaction(obj1, obj2):
|
|
283 |
results[category] = {"base": base_description, "enhanced": ""}
|
284 |
return results
|
285 |
|
286 |
-
|
287 |
-
# 3κ° ν€μλ μνΈμμ©
|
288 |
-
##############################################################################
|
289 |
def generate_three_objects_interaction(obj1, obj2, obj3):
|
290 |
results = {}
|
291 |
for category, transformations in physical_transformation_categories.items():
|
@@ -299,9 +288,6 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
|
|
299 |
return results
|
300 |
|
301 |
|
302 |
-
##############################################################################
|
303 |
-
# μ€μ λ³ν μμ± λ‘μ§
|
304 |
-
##############################################################################
|
305 |
def generate_transformations(text1, text2=None, text3=None):
|
306 |
if text2 and text3:
|
307 |
results = generate_three_objects_interaction(text1, text2, text3)
|
@@ -314,85 +300,88 @@ def generate_transformations(text1, text2=None, text3=None):
|
|
314 |
objects = [text1]
|
315 |
return results, objects
|
316 |
|
317 |
-
|
318 |
-
#
|
319 |
-
##############################################################################
|
320 |
def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
321 |
"""
|
322 |
-
Gradio
|
323 |
-
[
|
324 |
"""
|
325 |
-
# 1) μ
λ ₯κ°
|
326 |
-
yield [
|
327 |
time.sleep(0.3)
|
328 |
|
329 |
text1 = text1.strip() if text1 else None
|
330 |
text2 = text2.strip() if text2 else None
|
331 |
text3 = text3.strip() if text3 else None
|
332 |
if not text1:
|
333 |
-
yield [
|
334 |
return
|
335 |
|
336 |
# 2) μμ΄λμ΄ μμ±
|
337 |
-
yield [
|
338 |
time.sleep(0.3)
|
339 |
results, objects = generate_transformations(text1, text2, text3)
|
|
|
340 |
obj_name = " λ° ".join([obj for obj in objects if obj])
|
341 |
|
342 |
-
# μΉ΄ν
κ³ λ¦¬λ³ μ€νΈλ¦¬λ°
|
343 |
-
for i, (category,
|
344 |
-
base_desc =
|
345 |
-
yield [
|
|
|
|
|
|
|
346 |
time.sleep(0.5)
|
347 |
|
348 |
thinking_text = ""
|
349 |
response_text = ""
|
350 |
-
|
351 |
|
352 |
for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
|
353 |
if chunk.startswith("[Thinking Chunk]"):
|
354 |
-
#
|
355 |
thinking_text += chunk.replace("[Thinking Chunk]", "")
|
356 |
-
yield [
|
|
|
357 |
elif chunk.startswith("[Response Start]"):
|
358 |
-
#
|
359 |
-
|
360 |
partial = chunk.replace("[Response Start]", "")
|
361 |
response_text += partial
|
362 |
-
yield [
|
|
|
363 |
elif chunk.startswith("[Final Response]"):
|
364 |
-
# μλ΅
|
365 |
final = chunk.replace("[Final Response]", "")
|
366 |
response_text += f"\n{final}"
|
367 |
-
yield [
|
|
|
368 |
else:
|
369 |
# μΌλ° μλ΅ μ€νΈλ¦¬λ°
|
370 |
-
if
|
371 |
response_text += chunk
|
372 |
-
yield [
|
373 |
else:
|
374 |
thinking_text += chunk
|
375 |
-
yield [
|
376 |
|
377 |
-
|
378 |
|
379 |
-
#
|
380 |
-
yield [
|
381 |
|
382 |
|
383 |
##############################################################################
|
384 |
-
# Gradio UI
|
385 |
##############################################################################
|
386 |
-
with gr.Blocks(title="
|
387 |
-
|
388 |
-
|
389 |
-
gr.Markdown("# π ν€μλ κΈ°λ° μ°½μμ λ³ν μμ΄λμ΄ (Gemini 2.0 Flash Thinking, Streaming)")
|
390 |
-
gr.Markdown("ν€μλ 1~3κ°λ₯Ό μ
λ ₯νλ©΄, **μΉ΄ν
κ³ λ¦¬λ³λ‘** 'Thinking'κ³Ό 'Response'κ° μ€μκ° μ€νΈλ¦¬λ°λ©λλ€.")
|
391 |
|
392 |
-
# Chatbotμμ type="tuples"λ‘ μμ
|
393 |
chatbot = gr.Chatbot(
|
394 |
-
label="μΉ΄ν
κ³ λ¦¬λ³
|
395 |
-
type="
|
396 |
render_markdown=True
|
397 |
)
|
398 |
|
@@ -402,29 +391,28 @@ with gr.Blocks(title="μ€νΈλ¦¬λ° μμ : Gemini 2.0 Flash Thinking",
|
|
402 |
text_input2 = gr.Textbox(label="ν€μλ 2 (μ ν)", placeholder="μ: λ‘λ΄")
|
403 |
text_input3 = gr.Textbox(label="ν€μλ 3 (μ ν)", placeholder="μ: μΈκ³΅μ§λ₯")
|
404 |
submit_button = gr.Button("μμ΄λμ΄ μμ±νκΈ°")
|
405 |
-
|
406 |
clear_button = gr.Button("λν μ§μ°κΈ°")
|
407 |
-
|
408 |
with gr.Column(scale=2):
|
409 |
-
pass
|
410 |
|
411 |
def clear_chat():
|
412 |
return []
|
413 |
|
|
|
414 |
examples = [
|
415 |
["μλμ°¨", "", ""],
|
416 |
["μ€λ§νΈν°", "μΈκ³΅μ§λ₯", ""],
|
417 |
["λλ‘ ", "μΈκ³΅μ§λ₯", ""],
|
418 |
-
["μ΄λν", "μ¨μ΄λ¬λΈ", "건κ°"],
|
419 |
]
|
420 |
gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
|
421 |
|
422 |
-
# μ€νΈλ¦¬λ° μ²λ¦¬
|
423 |
submit_button.click(
|
424 |
fn=process_inputs_stream,
|
425 |
inputs=[text_input1, text_input2, text_input3],
|
426 |
outputs=chatbot,
|
427 |
-
stream=True #
|
428 |
)
|
429 |
|
430 |
clear_button.click(
|
|
|
1 |
+
|
2 |
import os
|
3 |
import gradio as gr
|
4 |
import random
|
|
|
7 |
from typing import Iterator
|
8 |
|
9 |
import google.generativeai as genai
|
|
|
10 |
|
11 |
logging.basicConfig(
|
12 |
level=logging.INFO,
|
|
|
18 |
)
|
19 |
logger = logging.getLogger("idea_generator")
|
20 |
|
21 |
+
# ====== Gemini API μ€μ ======
|
22 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
23 |
genai.configure(api_key=GEMINI_API_KEY)
|
24 |
|
25 |
+
# ====== μ¬μ©ν Gemini λͺ¨λΈ ======
|
26 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
27 |
|
28 |
+
# μ¬λμ("/")λ‘ κ΅¬λΆλ λ³ν λ¬Έμμ΄μμ ν μ΅μ
λ§ μ ν
|
|
|
|
|
|
|
29 |
def choose_alternative(transformation):
|
30 |
if "/" not in transformation:
|
31 |
return transformation
|
|
|
48 |
return random.choice([left, right])
|
49 |
|
50 |
|
51 |
+
|
52 |
##############################################################################
|
53 |
# μΉ΄ν
κ³ λ¦¬ μ¬μ
|
54 |
# (μλ μμμμλ λͺ¨λ μΉ΄ν
κ³ λ¦¬λ₯Ό ν¬ν¨μμΌ°μ§λ§, νμμ λ°λΌ λ²μλ₯Ό μ‘°μ νμΈμ.)
|
|
|
195 |
}
|
196 |
|
197 |
|
198 |
+
|
199 |
+
# ====== Gemini μ€νΈλ¦¬λ° API ν¨μ ======
|
|
|
|
|
200 |
def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
201 |
"""
|
202 |
+
Gemini 2.0 Flash λͺ¨λΈμμ Thinking(μ¬κ³ κ³Όμ ) + Response(μ΅μ’
λ΅λ³)λ₯Ό
|
203 |
+
stream=Trueλ‘ λ°μ, chunk λ¨μλ‘ yield.
|
204 |
"""
|
205 |
+
# λν μ΄λ ₯ μμ΄ λ¨μ νΈμΆ
|
206 |
chat = model.start_chat(history=[])
|
207 |
response = chat.send_message(prompt, stream=True)
|
208 |
|
|
|
213 |
for chunk in response:
|
214 |
parts = chunk.candidates[0].content.parts
|
215 |
|
|
|
216 |
if len(parts) == 2 and not thinking_complete:
|
217 |
+
# 첫 λ²μ§Έ part: Thinking
|
218 |
thought_buffer += parts[0].text
|
219 |
yield f"[Thinking Chunk] {parts[0].text}"
|
220 |
+
|
221 |
+
# λ λ²μ§Έ part: Response
|
222 |
response_buffer = parts[1].text
|
223 |
yield f"[Response Start] {parts[1].text}"
|
224 |
+
|
225 |
thinking_complete = True
|
226 |
elif thinking_complete:
|
227 |
+
# μ΄λ―Έ μλ΅ λ¨κ³μ μμ
|
228 |
current_chunk = parts[0].text
|
229 |
response_buffer += current_chunk
|
230 |
yield current_chunk
|
231 |
else:
|
232 |
+
# μμ§ Thinking λ¨κ³
|
233 |
current_chunk = parts[0].text
|
234 |
thought_buffer += current_chunk
|
235 |
yield f"[Thinking Chunk] {current_chunk}"
|
236 |
|
237 |
+
# λ§μ§λ§μ μ 체 μ΅μ’
μλ΅
|
238 |
yield f"\n[Final Response]\n{response_buffer}"
|
239 |
|
240 |
|
241 |
+
# ====== μ€λͺ
νμ₯ ν¨μ (μ€νΈλ¦¬λ°) ======
|
|
|
|
|
242 |
def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
|
243 |
prompt = f"""
|
244 |
λ€μμ '{obj_name}'μ '{category}' κ΄λ ¨ κ°λ¨ν μ€λͺ
μ
λλ€:
|
|
|
252 |
yield chunk
|
253 |
|
254 |
|
255 |
+
# ====== λ¨μΌ ν€μλ(μ€λΈμ νΈ)μ λν μμ΄λμ΄ μμ± ======
|
|
|
|
|
256 |
def generate_single_object_transformations(obj):
|
257 |
results = {}
|
258 |
for category, transformations in physical_transformation_categories.items():
|
|
|
261 |
results[category] = {"base": base_description, "enhanced": ""}
|
262 |
return results
|
263 |
|
264 |
+
# ====== 2κ° μ€λΈμ νΈ μνΈμμ© ======
|
|
|
|
|
265 |
def generate_two_objects_interaction(obj1, obj2):
|
266 |
results = {}
|
267 |
for category, transformations in physical_transformation_categories.items():
|
|
|
274 |
results[category] = {"base": base_description, "enhanced": ""}
|
275 |
return results
|
276 |
|
277 |
+
# ====== 3κ° μ€λΈμ νΈ μνΈμμ© ======
|
|
|
|
|
278 |
def generate_three_objects_interaction(obj1, obj2, obj3):
|
279 |
results = {}
|
280 |
for category, transformations in physical_transformation_categories.items():
|
|
|
288 |
return results
|
289 |
|
290 |
|
|
|
|
|
|
|
291 |
def generate_transformations(text1, text2=None, text3=None):
|
292 |
if text2 and text3:
|
293 |
results = generate_three_objects_interaction(text1, text2, text3)
|
|
|
300 |
objects = [text1]
|
301 |
return results, objects
|
302 |
|
303 |
+
|
304 |
+
# ====== μ€νΈλ¦¬λ°μΌλ‘ κ° μΉ΄ν
κ³ λ¦¬λ₯Ό μννλ©° Thinking + Response νμ ======
|
|
|
305 |
def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
306 |
"""
|
307 |
+
Gradio 3.27+ μ΄μμμλ§ stream=Trueλ‘ μ€μκ° μ
λ°μ΄νΈ κ°λ₯.
|
308 |
+
λ©μμ§λ [{'role': 'assistant', 'content': ...}, ...] ννλ‘ λ°ν.
|
309 |
"""
|
310 |
+
# 1) μ
λ ₯κ° κ²μ¦
|
311 |
+
yield [{"role": "assistant", "content": "μ
λ ₯κ° νμΈ μ€..."}]
|
312 |
time.sleep(0.3)
|
313 |
|
314 |
text1 = text1.strip() if text1 else None
|
315 |
text2 = text2.strip() if text2 else None
|
316 |
text3 = text3.strip() if text3 else None
|
317 |
if not text1:
|
318 |
+
yield [{"role": "assistant", "content": "μ€λ₯: μ΅μ νλμ ν€μλλ₯Ό μ
λ ₯ν΄μ£ΌμΈμ."}]
|
319 |
return
|
320 |
|
321 |
# 2) μμ΄λμ΄ μμ±
|
322 |
+
yield [{"role": "assistant", "content": "μ°½μμ μΈ λͺ¨λΈ/컨μ
/νμ λ³ν μμ΄λμ΄ μμ± μ€... (μΉ΄ν
κ³ λ¦¬λ³ λΆμ)"}]
|
323 |
time.sleep(0.3)
|
324 |
results, objects = generate_transformations(text1, text2, text3)
|
325 |
+
|
326 |
obj_name = " λ° ".join([obj for obj in objects if obj])
|
327 |
|
328 |
+
# μΉ΄ν
κ³ λ¦¬λ³ μ€νΈλ¦¬λ°
|
329 |
+
for i, (category, info) in enumerate(results.items(), start=1):
|
330 |
+
base_desc = info["base"]
|
331 |
+
yield [{
|
332 |
+
"role": "assistant",
|
333 |
+
"content": f"**[{i}/{len(results)}] μΉ΄ν
κ³ λ¦¬:** {category}\n\nκΈ°λ³Έ μμ΄λμ΄: {base_desc}\n\nThinking + Response μ€νΈλ¦¬λ° μμ..."
|
334 |
+
}]
|
335 |
time.sleep(0.5)
|
336 |
|
337 |
thinking_text = ""
|
338 |
response_text = ""
|
339 |
+
thinking_done = False
|
340 |
|
341 |
for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
|
342 |
if chunk.startswith("[Thinking Chunk]"):
|
343 |
+
# Thinking λ¨κ³
|
344 |
thinking_text += chunk.replace("[Thinking Chunk]", "")
|
345 |
+
yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
|
346 |
+
|
347 |
elif chunk.startswith("[Response Start]"):
|
348 |
+
# Response μμ
|
349 |
+
thinking_done = True
|
350 |
partial = chunk.replace("[Response Start]", "")
|
351 |
response_text += partial
|
352 |
+
yield [{"role": "assistant", "content": f"**[Response μμ]**\n{partial}"}]
|
353 |
+
|
354 |
elif chunk.startswith("[Final Response]"):
|
355 |
+
# μ΅μ’
μλ΅
|
356 |
final = chunk.replace("[Final Response]", "")
|
357 |
response_text += f"\n{final}"
|
358 |
+
yield [{"role": "assistant", "content": f"**[μ΅μ’
Response]**\n{response_text.strip()}"}]
|
359 |
+
|
360 |
else:
|
361 |
# μΌλ° μλ΅ μ€νΈλ¦¬λ°
|
362 |
+
if thinking_done:
|
363 |
response_text += chunk
|
364 |
+
yield [{"role": "assistant", "content": f"**[μλ΅ μ§ν]**\n{response_text}"}]
|
365 |
else:
|
366 |
thinking_text += chunk
|
367 |
+
yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
|
368 |
|
369 |
+
info["enhanced"] = response_text
|
370 |
|
371 |
+
# μλ£ μλ¦Ό
|
372 |
+
yield [{"role": "assistant", "content": "**λͺ¨λ μΉ΄ν
κ³ λ¦¬μ λν μ€νΈλ¦¬λ°μ΄ μλ£λμμ΅λλ€!**"}]
|
373 |
|
374 |
|
375 |
##############################################################################
|
376 |
+
# Gradio UI (Chatbot: type='messages')
|
377 |
##############################################################################
|
378 |
+
with gr.Blocks(title="Gemini Flash Thinking (Stream)", theme=gr.themes.Soft(primary_hue="teal")) as demo:
|
379 |
+
gr.Markdown("# π ν€μλ κΈ°λ° μ°½μμ λ³ν μμ΄λμ΄ (Gemini 2.0 Flash Thinking, Streaming)\n"+
|
380 |
+
"ν€μλ 1~3κ°λ₯Ό μ
λ ₯νλ©΄, **μΉ΄ν
κ³ λ¦¬λ³**λ‘ 'Thinking'κ³Ό 'Response'κ° μ€μκ° μ€νΈλ¦¬λ°λ©λλ€.")
|
|
|
|
|
381 |
|
|
|
382 |
chatbot = gr.Chatbot(
|
383 |
+
label="μΉ΄ν
κ³ λ¦¬λ³ μ€νΈλ¦¬λ°",
|
384 |
+
type="messages", # OpenAI μ€νμΌ {"role":"assistant", "content":...} ν¬λ§·
|
385 |
render_markdown=True
|
386 |
)
|
387 |
|
|
|
391 |
text_input2 = gr.Textbox(label="ν€μλ 2 (μ ν)", placeholder="μ: λ‘λ΄")
|
392 |
text_input3 = gr.Textbox(label="ν€μλ 3 (μ ν)", placeholder="μ: μΈκ³΅μ§λ₯")
|
393 |
submit_button = gr.Button("μμ΄λμ΄ μμ±νκΈ°")
|
|
|
394 |
clear_button = gr.Button("λν μ§μ°κΈ°")
|
395 |
+
|
396 |
with gr.Column(scale=2):
|
397 |
+
pass
|
398 |
|
399 |
def clear_chat():
|
400 |
return []
|
401 |
|
402 |
+
# μμ
|
403 |
examples = [
|
404 |
["μλμ°¨", "", ""],
|
405 |
["μ€λ§νΈν°", "μΈκ³΅μ§λ₯", ""],
|
406 |
["λλ‘ ", "μΈκ³΅μ§λ₯", ""],
|
407 |
+
["μ΄λν", "μ¨μ΄λ¬λΈ", "건κ°"],
|
408 |
]
|
409 |
gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
|
410 |
|
|
|
411 |
submit_button.click(
|
412 |
fn=process_inputs_stream,
|
413 |
inputs=[text_input1, text_input2, text_input3],
|
414 |
outputs=chatbot,
|
415 |
+
stream=True # μ΅μ Gradio(3.27+)μμλ§ μ§μ
|
416 |
)
|
417 |
|
418 |
clear_button.click(
|