Docfile commited on
Commit
e79be93
·
verified ·
1 Parent(s): f9726ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -5,7 +5,6 @@ import os
5
  from PIL import Image
6
  import io
7
  import base64
8
- import time
9
  import json
10
 
11
  app = Flask(__name__)
@@ -32,24 +31,30 @@ def solve():
32
  img_str = base64.b64encode(buffered.getvalue()).decode()
33
 
34
  def generate():
 
35
  try:
36
- response_stream = client.models.generate_content_stream(
37
  model="gemini-2.0-flash-thinking-exp-01-21",
38
  config={'thinking_config': {'include_thoughts': True}},
39
  contents=[
40
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
41
- "Résous ce problème?"
42
  ]
43
  )
44
 
45
- for chunk in response_stream:
46
  for part in chunk.candidates[0].content.parts:
47
- if hasattr(part, 'thought') and part.thought:
48
- yield f'data: {json.dumps({"thought": part.text})}\n\n'
 
 
49
  else:
50
- yield f'data: {json.dumps({"answer": part.text})}\n\n'
51
- time.sleep(0.1) # Légère pause pour contrôler le flux
52
-
 
 
 
53
  except Exception as e:
54
  print(f"Error during generation: {e}")
55
  yield f'data: {json.dumps({"error": str(e)})}\n\n'
 
5
  from PIL import Image
6
  import io
7
  import base64
 
8
  import json
9
 
10
  app = Flask(__name__)
 
31
  img_str = base64.b64encode(buffered.getvalue()).decode()
32
 
33
  def generate():
34
+ mode = 'starting'
35
  try:
36
+ response = client.models.generate_content_stream(
37
  model="gemini-2.0-flash-thinking-exp-01-21",
38
  config={'thinking_config': {'include_thoughts': True}},
39
  contents=[
40
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
41
+ "Résous ce problème mathématique étape par étape."
42
  ]
43
  )
44
 
45
+ for chunk in response:
46
  for part in chunk.candidates[0].content.parts:
47
+ if part.thought:
48
+ if mode != "thinking":
49
+ yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
50
+ mode = "thinking"
51
  else:
52
+ if mode != "answering":
53
+ yield f'data: {json.dumps({"mode": "answering"})}\n\n'
54
+ mode = "answering"
55
+
56
+ yield f'data: {json.dumps({"content": part.text})}\n\n'
57
+
58
  except Exception as e:
59
  print(f"Error during generation: {e}")
60
  yield f'data: {json.dumps({"error": str(e)})}\n\n'