aiqcamp commited on
Commit
9be8a74
Β·
verified Β·
1 Parent(s): 1c3714b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -77
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import gradio as gr
3
  import random
@@ -6,7 +7,6 @@ import logging
6
  from typing import Iterator
7
 
8
  import google.generativeai as genai
9
- from gradio import ChatMessage # ChatMessage ꡬ쑰 μ‚¬μš© (Thinking/Response ꡬ뢄 κ°€λŠ₯)
10
 
11
  logging.basicConfig(
12
  level=logging.INFO,
@@ -18,17 +18,14 @@ logging.basicConfig(
18
  )
19
  logger = logging.getLogger("idea_generator")
20
 
21
- # Gemini API ν‚€ μ„€μ •
22
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
23
  genai.configure(api_key=GEMINI_API_KEY)
24
 
25
- # μ‚¬μš©ν•  Gemini 2.0 Flash λͺ¨λΈ (Thinking κΈ°λŠ₯ 포함)
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
-
29
- ##############################################################################
30
- # λ³€ν™˜ λ¬Έμžμ—΄μ—μ„œ μŠ¬λž˜μ‹œ("/")둜 κ΅¬λΆ„λœ 두 μ˜΅μ…˜ 쀑 ν•˜λ‚˜ 선택
31
- ##############################################################################
32
  def choose_alternative(transformation):
33
  if "/" not in transformation:
34
  return transformation
@@ -51,6 +48,7 @@ def choose_alternative(transformation):
51
  return random.choice([left, right])
52
 
53
 
 
54
  ##############################################################################
55
  # μΉ΄ν…Œκ³ λ¦¬ 사전
56
  # (μ•„λž˜ μ˜ˆμ‹œμ—μ„œλŠ” λͺ¨λ“  μΉ΄ν…Œκ³ λ¦¬λ₯Ό ν¬ν•¨μ‹œμΌ°μ§€λ§Œ, ν•„μš”μ— 따라 λ²”μœ„λ₯Ό μ‘°μ •ν•˜μ„Έμš”.)
@@ -197,16 +195,14 @@ physical_transformation_categories = {
197
  }
198
 
199
 
200
- ##############################################################################
201
- # 슀트리밍용 Gemini API ν•¨μˆ˜:
202
- # - 'Thinking' 단계(아이디어 λ‚΄λΆ€ μΆ”λ‘ )와 μ΅œμ’… 'Response' λ‹¨κ³„λ‘œ ꡬ성
203
- ##############################################################################
204
  def query_gemini_api_stream(prompt: str) -> Iterator[str]:
205
  """
206
- Gemini 2.0 Flash with 'Thinking' λΆ€λΆ„κ³Ό 'Response' 뢀뢄을
207
- λΆ„λ¦¬ν•˜μ—¬ 슀트리밍(Chunk)으둜 μ œκ³΅ν•œλ‹€.
208
  """
209
- # chat μ΄ˆκΈ°ν™” (history 없이 λ‹¨λ°œμ„± 호좜)
210
  chat = model.start_chat(history=[])
211
  response = chat.send_message(prompt, stream=True)
212
 
@@ -217,33 +213,32 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
217
  for chunk in response:
218
  parts = chunk.candidates[0].content.parts
219
 
220
- # partsκ°€ 2개이면 (0: Thinking, 1: Response μ‹œμž‘)
221
  if len(parts) == 2 and not thinking_complete:
 
222
  thought_buffer += parts[0].text
223
  yield f"[Thinking Chunk] {parts[0].text}"
224
-
 
225
  response_buffer = parts[1].text
226
  yield f"[Response Start] {parts[1].text}"
227
-
228
  thinking_complete = True
229
  elif thinking_complete:
230
- # Already in response phase
231
  current_chunk = parts[0].text
232
  response_buffer += current_chunk
233
  yield current_chunk
234
  else:
235
- # Still in thinking phase
236
  current_chunk = parts[0].text
237
  thought_buffer += current_chunk
238
  yield f"[Thinking Chunk] {current_chunk}"
239
 
240
- # 슀트리밍 μ™„λ£Œ ν›„ μ΅œμ’… κ²°κ³Ό ν•œλ²ˆμ— 제곡
241
  yield f"\n[Final Response]\n{response_buffer}"
242
 
243
 
244
- ##############################################################################
245
- # μΉ΄ν…Œκ³ λ¦¬λ³„ 간단 μ„€λͺ…을 'Thinking' + 'Response'둜 ν™•μž₯ (슀트리밍)
246
- ##############################################################################
247
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
248
  prompt = f"""
249
  λ‹€μŒμ€ '{obj_name}'의 '{category}' κ΄€λ ¨ κ°„λ‹¨ν•œ μ„€λͺ…μž…λ‹ˆλ‹€:
@@ -257,9 +252,7 @@ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[st
257
  yield chunk
258
 
259
 
260
- ##############################################################################
261
- # ν•œ ν‚€μ›Œλ“œ(였브젝트)에 λŒ€ν•œ κΈ°λ³Έ 아이디어(μΉ΄ν…Œκ³ λ¦¬λ³„) 생성
262
- ##############################################################################
263
  def generate_single_object_transformations(obj):
264
  results = {}
265
  for category, transformations in physical_transformation_categories.items():
@@ -268,9 +261,7 @@ def generate_single_object_transformations(obj):
268
  results[category] = {"base": base_description, "enhanced": ""}
269
  return results
270
 
271
- ##############################################################################
272
- # 2개 ν‚€μ›Œλ“œ μƒν˜Έμž‘μš©
273
- ##############################################################################
274
  def generate_two_objects_interaction(obj1, obj2):
275
  results = {}
276
  for category, transformations in physical_transformation_categories.items():
@@ -283,9 +274,7 @@ def generate_two_objects_interaction(obj1, obj2):
283
  results[category] = {"base": base_description, "enhanced": ""}
284
  return results
285
 
286
- ##############################################################################
287
- # 3개 ν‚€μ›Œλ“œ μƒν˜Έμž‘μš©
288
- ##############################################################################
289
  def generate_three_objects_interaction(obj1, obj2, obj3):
290
  results = {}
291
  for category, transformations in physical_transformation_categories.items():
@@ -299,9 +288,6 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
299
  return results
300
 
301
 
302
- ##############################################################################
303
- # μ‹€μ œ λ³€ν™˜ 생성 둜직
304
- ##############################################################################
305
  def generate_transformations(text1, text2=None, text3=None):
306
  if text2 and text3:
307
  results = generate_three_objects_interaction(text1, text2, text3)
@@ -314,85 +300,88 @@ def generate_transformations(text1, text2=None, text3=None):
314
  objects = [text1]
315
  return results, objects
316
 
317
- ##############################################################################
318
- # 슀트리밍: 각 μΉ΄ν…Œκ³ λ¦¬λ³„λ‘œ 'Thinking' + 'Response' 뢀뢄을 μ‹€μ‹œκ°„ 전달
319
- ##############################################################################
320
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
321
  """
322
- Gradio의 Chatbot μ»΄ν¬λ„ŒνŠΈμ— λ§žμΆ°μ„œ
323
- [(role='assistant', content=...)] ν˜•νƒœμ˜ 리슀트λ₯Ό yield.
324
  """
325
- # 1) μž…λ ₯κ°’ 확인
326
- yield [("assistant", "μž…λ ₯κ°’ 확인 쀑...")]
327
  time.sleep(0.3)
328
 
329
  text1 = text1.strip() if text1 else None
330
  text2 = text2.strip() if text2 else None
331
  text3 = text3.strip() if text3 else None
332
  if not text1:
333
- yield [("assistant", "였λ₯˜: μ΅œμ†Œ ν•˜λ‚˜μ˜ ν‚€μ›Œλ“œλ₯Ό μž…λ ₯ν•΄μ£Όμ„Έμš”.")]
334
  return
335
 
336
  # 2) 아이디어 생성
337
- yield [("assistant", "창의적인 λͺ¨λΈ/컨셉/ν˜•μƒ λ³€ν™” 아이디어 생성 쀑... (μΉ΄ν…Œκ³ λ¦¬λ³„ 뢄석)") ]
338
  time.sleep(0.3)
339
  results, objects = generate_transformations(text1, text2, text3)
 
340
  obj_name = " 및 ".join([obj for obj in objects if obj])
341
 
342
- # μΉ΄ν…Œκ³ λ¦¬λ³„ 슀트리밍 처리
343
- for i, (category, result_dict) in enumerate(results.items(), start=1):
344
- base_desc = result_dict["base"]
345
- yield [("assistant", f"**[{i}/{len(results)}] μΉ΄ν…Œκ³ λ¦¬:** {category}\n\nκΈ°λ³Έ 아이디어: {base_desc}\n\nμ§€κΈˆλΆ€ν„° Thinking + Responseλ₯Ό λ‹¨κ³„μ μœΌλ‘œ μŠ€νŠΈλ¦¬λ°ν•©λ‹ˆλ‹€...")]
 
 
 
346
  time.sleep(0.5)
347
 
348
  thinking_text = ""
349
  response_text = ""
350
- is_thinking_done = False
351
 
352
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
353
  if chunk.startswith("[Thinking Chunk]"):
354
- # 생각 단계
355
  thinking_text += chunk.replace("[Thinking Chunk]", "")
356
- yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
 
357
  elif chunk.startswith("[Response Start]"):
358
- # 응닡 μ‹œμž‘
359
- is_thinking_done = True
360
  partial = chunk.replace("[Response Start]", "")
361
  response_text += partial
362
- yield [("assistant", f"**[Final Response μ‹œμž‘]**\n{partial}")]
 
363
  elif chunk.startswith("[Final Response]"):
364
- # 응닡 μ΅œμ’…
365
  final = chunk.replace("[Final Response]", "")
366
  response_text += f"\n{final}"
367
- yield [("assistant", f"**[μ΅œμ’… Response]**\n{response_text.strip()}")]
 
368
  else:
369
  # 일반 응닡 슀트리밍
370
- if is_thinking_done:
371
  response_text += chunk
372
- yield [("assistant", f"**[응닡 μ§„ν–‰]**\n{response_text}")]
373
  else:
374
  thinking_text += chunk
375
- yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
376
 
377
- result_dict["enhanced"] = response_text
378
 
379
- # 3) μ™„λ£Œ λ©”μ‹œμ§€
380
- yield [("assistant", "**λͺ¨λ“  μΉ΄ν…Œκ³ λ¦¬μ— λŒ€ν•œ 슀트리밍이 μ™„λ£Œλ˜μ—ˆμŠ΅λ‹ˆλ‹€!**")]
381
 
382
 
383
  ##############################################################################
384
- # Gradio UI
385
  ##############################################################################
386
- with gr.Blocks(title="슀트리밍 예제: Gemini 2.0 Flash Thinking",
387
- theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
388
-
389
- gr.Markdown("# πŸš€ ν‚€μ›Œλ“œ 기반 창의적 λ³€ν™” 아이디어 (Gemini 2.0 Flash Thinking, Streaming)")
390
- gr.Markdown("ν‚€μ›Œλ“œ 1~3개λ₯Ό μž…λ ₯ν•˜λ©΄, **μΉ΄ν…Œκ³ λ¦¬λ³„λ‘œ** 'Thinking'κ³Ό 'Response'κ°€ μ‹€μ‹œκ°„ μŠ€νŠΈλ¦¬λ°λ©λ‹ˆλ‹€.")
391
 
392
- # Chatbotμ—μ„œ type="tuples"둜 μˆ˜μ •
393
  chatbot = gr.Chatbot(
394
- label="μΉ΄ν…Œκ³ λ¦¬λ³„ 아이디어(Thinking + Response) 슀트리밍",
395
- type="tuples", # <-- "tuple"이 μ•„λ‹ˆλΌ "tuples"둜 μˆ˜μ •
396
  render_markdown=True
397
  )
398
 
@@ -402,29 +391,28 @@ with gr.Blocks(title="슀트리밍 예제: Gemini 2.0 Flash Thinking",
402
  text_input2 = gr.Textbox(label="ν‚€μ›Œλ“œ 2 (선택)", placeholder="예: λ‘œλ΄‡")
403
  text_input3 = gr.Textbox(label="ν‚€μ›Œλ“œ 3 (선택)", placeholder="예: 인곡지λŠ₯")
404
  submit_button = gr.Button("아이디어 μƒμ„±ν•˜κΈ°")
405
-
406
  clear_button = gr.Button("λŒ€ν™” μ§€μš°κΈ°")
407
-
408
  with gr.Column(scale=2):
409
- pass # 이미 chatbot이 우츑 μ˜μ—­μ— 할당됨
410
 
411
  def clear_chat():
412
  return []
413
 
 
414
  examples = [
415
  ["μžλ™μ°¨", "", ""],
416
  ["슀마트폰", "인곡지λŠ₯", ""],
417
  ["λ“œλ‘ ", "인곡지λŠ₯", ""],
418
- ["μš΄λ™ν™”", "μ›¨μ–΄λŸ¬λΈ”", "건강"],
419
  ]
420
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
421
 
422
- # 슀트리밍 처리
423
  submit_button.click(
424
  fn=process_inputs_stream,
425
  inputs=[text_input1, text_input2, text_input3],
426
  outputs=chatbot,
427
- stream=True # stream=True둜 μ‹€μ‹œκ°„ 슀트리밍
428
  )
429
 
430
  clear_button.click(
 
1
+
2
  import os
3
  import gradio as gr
4
  import random
 
7
  from typing import Iterator
8
 
9
  import google.generativeai as genai
 
10
 
11
  logging.basicConfig(
12
  level=logging.INFO,
 
18
  )
19
  logger = logging.getLogger("idea_generator")
20
 
21
+ # ====== Gemini API μ„€μ • ======
22
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
23
  genai.configure(api_key=GEMINI_API_KEY)
24
 
25
+ # ====== μ‚¬μš©ν•  Gemini λͺ¨λΈ ======
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
+ # μŠ¬λž˜μ‹œ("/")둜 κ΅¬λΆ„λœ λ³€ν™˜ λ¬Έμžμ—΄μ—μ„œ ν•œ μ˜΅μ…˜λ§Œ 선택
 
 
 
29
  def choose_alternative(transformation):
30
  if "/" not in transformation:
31
  return transformation
 
48
  return random.choice([left, right])
49
 
50
 
51
+
52
  ##############################################################################
53
  # μΉ΄ν…Œκ³ λ¦¬ 사전
54
  # (μ•„λž˜ μ˜ˆμ‹œμ—μ„œλŠ” λͺ¨λ“  μΉ΄ν…Œκ³ λ¦¬λ₯Ό ν¬ν•¨μ‹œμΌ°μ§€λ§Œ, ν•„μš”μ— 따라 λ²”μœ„λ₯Ό μ‘°μ •ν•˜μ„Έμš”.)
 
195
  }
196
 
197
 
198
+
199
+ # ====== Gemini 슀트리밍 API ν•¨μˆ˜ ======
 
 
200
  def query_gemini_api_stream(prompt: str) -> Iterator[str]:
201
  """
202
+ Gemini 2.0 Flash λͺ¨λΈμ—μ„œ Thinking(사고 κ³Όμ •) + Response(μ΅œμ’… λ‹΅λ³€)λ₯Ό
203
+ stream=True둜 λ°›μ•„, chunk λ‹¨μœ„λ‘œ yield.
204
  """
205
+ # λŒ€ν™” 이λ ₯ 없이 λ‹¨μˆœ 호좜
206
  chat = model.start_chat(history=[])
207
  response = chat.send_message(prompt, stream=True)
208
 
 
213
  for chunk in response:
214
  parts = chunk.candidates[0].content.parts
215
 
 
216
  if len(parts) == 2 and not thinking_complete:
217
+ # 첫 번째 part: Thinking
218
  thought_buffer += parts[0].text
219
  yield f"[Thinking Chunk] {parts[0].text}"
220
+
221
+ # 두 번째 part: Response
222
  response_buffer = parts[1].text
223
  yield f"[Response Start] {parts[1].text}"
224
+
225
  thinking_complete = True
226
  elif thinking_complete:
227
+ # 이미 응닡 단계에 있음
228
  current_chunk = parts[0].text
229
  response_buffer += current_chunk
230
  yield current_chunk
231
  else:
232
+ # 아직 Thinking 단계
233
  current_chunk = parts[0].text
234
  thought_buffer += current_chunk
235
  yield f"[Thinking Chunk] {current_chunk}"
236
 
237
+ # λ§ˆμ§€λ§‰μ— 전체 μ΅œμ’… 응닡
238
  yield f"\n[Final Response]\n{response_buffer}"
239
 
240
 
241
+ # ====== μ„€λͺ… ν™•μž₯ ν•¨μˆ˜ (슀트리밍) ======
 
 
242
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
243
  prompt = f"""
244
  λ‹€μŒμ€ '{obj_name}'의 '{category}' κ΄€λ ¨ κ°„λ‹¨ν•œ μ„€λͺ…μž…λ‹ˆλ‹€:
 
252
  yield chunk
253
 
254
 
255
+ # ====== 단일 ν‚€μ›Œλ“œ(였브젝트)에 λŒ€ν•œ 아이디어 생성 ======
 
 
256
  def generate_single_object_transformations(obj):
257
  results = {}
258
  for category, transformations in physical_transformation_categories.items():
 
261
  results[category] = {"base": base_description, "enhanced": ""}
262
  return results
263
 
264
+ # ====== 2개 였브젝트 μƒν˜Έμž‘μš© ======
 
 
265
  def generate_two_objects_interaction(obj1, obj2):
266
  results = {}
267
  for category, transformations in physical_transformation_categories.items():
 
274
  results[category] = {"base": base_description, "enhanced": ""}
275
  return results
276
 
277
+ # ====== 3개 였브젝트 μƒν˜Έμž‘μš© ======
 
 
278
  def generate_three_objects_interaction(obj1, obj2, obj3):
279
  results = {}
280
  for category, transformations in physical_transformation_categories.items():
 
288
  return results
289
 
290
 
 
 
 
291
  def generate_transformations(text1, text2=None, text3=None):
292
  if text2 and text3:
293
  results = generate_three_objects_interaction(text1, text2, text3)
 
300
  objects = [text1]
301
  return results, objects
302
 
303
+
304
+ # ====== 슀트리밍으둜 각 μΉ΄ν…Œκ³ λ¦¬λ₯Ό μˆœνšŒν•˜λ©° Thinking + Response ν‘œμ‹œ ======
 
305
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
306
  """
307
+ Gradio 3.27+ μ΄μƒμ—μ„œλ§Œ stream=True둜 μ‹€μ‹œκ°„ μ—…λ°μ΄νŠΈ κ°€λŠ₯.
308
+ λ©”μ‹œμ§€λŠ” [{'role': 'assistant', 'content': ...}, ...] ν˜•νƒœλ‘œ λ°˜ν™˜.
309
  """
310
+ # 1) μž…λ ₯κ°’ 검증
311
+ yield [{"role": "assistant", "content": "μž…λ ₯κ°’ 확인 쀑..."}]
312
  time.sleep(0.3)
313
 
314
  text1 = text1.strip() if text1 else None
315
  text2 = text2.strip() if text2 else None
316
  text3 = text3.strip() if text3 else None
317
  if not text1:
318
+ yield [{"role": "assistant", "content": "였λ₯˜: μ΅œμ†Œ ν•˜λ‚˜μ˜ ν‚€μ›Œλ“œλ₯Ό μž…λ ₯ν•΄μ£Όμ„Έμš”."}]
319
  return
320
 
321
  # 2) 아이디어 생성
322
+ yield [{"role": "assistant", "content": "창의적인 λͺ¨λΈ/컨셉/ν˜•μƒ λ³€ν™” 아이디어 생성 쀑... (μΉ΄ν…Œκ³ λ¦¬λ³„ 뢄석)"}]
323
  time.sleep(0.3)
324
  results, objects = generate_transformations(text1, text2, text3)
325
+
326
  obj_name = " 및 ".join([obj for obj in objects if obj])
327
 
328
+ # μΉ΄ν…Œκ³ λ¦¬λ³„ 슀트리밍
329
+ for i, (category, info) in enumerate(results.items(), start=1):
330
+ base_desc = info["base"]
331
+ yield [{
332
+ "role": "assistant",
333
+ "content": f"**[{i}/{len(results)}] μΉ΄ν…Œκ³ λ¦¬:** {category}\n\nκΈ°λ³Έ 아이디어: {base_desc}\n\nThinking + Response 슀트리밍 μ‹œμž‘..."
334
+ }]
335
  time.sleep(0.5)
336
 
337
  thinking_text = ""
338
  response_text = ""
339
+ thinking_done = False
340
 
341
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
342
  if chunk.startswith("[Thinking Chunk]"):
343
+ # Thinking 단계
344
  thinking_text += chunk.replace("[Thinking Chunk]", "")
345
+ yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
346
+
347
  elif chunk.startswith("[Response Start]"):
348
+ # Response μ‹œμž‘
349
+ thinking_done = True
350
  partial = chunk.replace("[Response Start]", "")
351
  response_text += partial
352
+ yield [{"role": "assistant", "content": f"**[Response μ‹œμž‘]**\n{partial}"}]
353
+
354
  elif chunk.startswith("[Final Response]"):
355
+ # μ΅œμ’… 응닡
356
  final = chunk.replace("[Final Response]", "")
357
  response_text += f"\n{final}"
358
+ yield [{"role": "assistant", "content": f"**[μ΅œμ’… Response]**\n{response_text.strip()}"}]
359
+
360
  else:
361
  # 일반 응닡 슀트리밍
362
+ if thinking_done:
363
  response_text += chunk
364
+ yield [{"role": "assistant", "content": f"**[응닡 μ§„ν–‰]**\n{response_text}"}]
365
  else:
366
  thinking_text += chunk
367
+ yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
368
 
369
+ info["enhanced"] = response_text
370
 
371
+ # μ™„λ£Œ μ•Œλ¦Ό
372
+ yield [{"role": "assistant", "content": "**λͺ¨λ“  μΉ΄ν…Œκ³ λ¦¬μ— λŒ€ν•œ 슀트리밍이 μ™„λ£Œλ˜μ—ˆμŠ΅λ‹ˆλ‹€!**"}]
373
 
374
 
375
  ##############################################################################
376
+ # Gradio UI (Chatbot: type='messages')
377
  ##############################################################################
378
+ with gr.Blocks(title="Gemini Flash Thinking (Stream)", theme=gr.themes.Soft(primary_hue="teal")) as demo:
379
+ gr.Markdown("# πŸš€ ν‚€μ›Œλ“œ 기반 창의적 λ³€ν™” 아이디어 (Gemini 2.0 Flash Thinking, Streaming)\n"+
380
+ "ν‚€μ›Œλ“œ 1~3개λ₯Ό μž…λ ₯ν•˜λ©΄, **μΉ΄ν…Œκ³ λ¦¬λ³„**둜 'Thinking'κ³Ό 'Response'κ°€ μ‹€μ‹œκ°„ μŠ€νŠΈλ¦¬λ°λ©λ‹ˆλ‹€.")
 
 
381
 
 
382
  chatbot = gr.Chatbot(
383
+ label="μΉ΄ν…Œκ³ λ¦¬λ³„ 슀트리밍",
384
+ type="messages", # OpenAI μŠ€νƒ€μΌ {"role":"assistant", "content":...} 포맷
385
  render_markdown=True
386
  )
387
 
 
391
  text_input2 = gr.Textbox(label="ν‚€μ›Œλ“œ 2 (선택)", placeholder="예: λ‘œλ΄‡")
392
  text_input3 = gr.Textbox(label="ν‚€μ›Œλ“œ 3 (선택)", placeholder="예: 인곡지λŠ₯")
393
  submit_button = gr.Button("아이디어 μƒμ„±ν•˜κΈ°")
 
394
  clear_button = gr.Button("λŒ€ν™” μ§€μš°κΈ°")
395
+
396
  with gr.Column(scale=2):
397
+ pass
398
 
399
  def clear_chat():
400
  return []
401
 
402
+ # μ˜ˆμ‹œ
403
  examples = [
404
  ["μžλ™μ°¨", "", ""],
405
  ["슀마트폰", "인곡지λŠ₯", ""],
406
  ["λ“œλ‘ ", "인곡지λŠ₯", ""],
407
+ ["μš΄λ™ν™”", "μ›¨μ–΄λŸ¬λΈ”", "건강"],
408
  ]
409
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
410
 
 
411
  submit_button.click(
412
  fn=process_inputs_stream,
413
  inputs=[text_input1, text_input2, text_input3],
414
  outputs=chatbot,
415
+ stream=True # μ΅œμ‹  Gradio(3.27+)μ—μ„œλ§Œ 지원
416
  )
417
 
418
  clear_button.click(