Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,13 @@ client = genai.Client(
|
|
20 |
def index():
|
21 |
return render_template('index.html')
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
@app.route('/solve', methods=['POST'])
|
24 |
def solve():
|
25 |
try:
|
@@ -71,5 +78,60 @@ def solve():
|
|
71 |
except Exception as e:
|
72 |
return jsonify({'error': str(e)}), 500
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
if __name__ == '__main__':
|
75 |
app.run(debug=True)
|
|
|
20 |
def index():
|
21 |
return render_template('index.html')
|
22 |
|
23 |
+
|
24 |
+
@app.route('/free')
|
25 |
+
def index():
|
26 |
+
return render_template('maj.html')
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
@app.route('/solve', methods=['POST'])
|
31 |
def solve():
|
32 |
try:
|
|
|
78 |
except Exception as e:
|
79 |
return jsonify({'error': str(e)}), 500
|
80 |
|
81 |
+
|
82 |
+
|
83 |
+
@app.route('/solved', methods=['POST'])
|
84 |
+
def solve():
|
85 |
+
try:
|
86 |
+
image_data = request.files['image'].read()
|
87 |
+
img = Image.open(io.BytesIO(image_data))
|
88 |
+
|
89 |
+
buffered = io.BytesIO()
|
90 |
+
img.save(buffered, format="PNG")
|
91 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
92 |
+
|
93 |
+
def generate():
|
94 |
+
mode = 'starting'
|
95 |
+
try:
|
96 |
+
response = client.models.generate_content_stream(
|
97 |
+
model="gemini-2.0-flash-exp",
|
98 |
+
contents=[
|
99 |
+
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
|
100 |
+
" Résous ça. Write you answer with rendering Latex."
|
101 |
+
]
|
102 |
+
)
|
103 |
+
#Resous cette exercice. ça doit être bien présentable et espacé. je veux un jolie rendu
|
104 |
+
for chunk in response:
|
105 |
+
for part in chunk.candidates[0].content.parts:
|
106 |
+
if part.thought:
|
107 |
+
if mode != "thinking":
|
108 |
+
yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
|
109 |
+
mode = "thinking"
|
110 |
+
else:
|
111 |
+
if mode != "answering":
|
112 |
+
yield f'data: {json.dumps({"mode": "answering"})}\n\n'
|
113 |
+
mode = "answering"
|
114 |
+
|
115 |
+
yield f'data: {json.dumps({"content": part.text})}\n\n'
|
116 |
+
|
117 |
+
except Exception as e:
|
118 |
+
print(f"Error during generation: {e}")
|
119 |
+
yield f'data: {json.dumps({"error": str(e)})}\n\n'
|
120 |
+
|
121 |
+
return Response(
|
122 |
+
stream_with_context(generate()),
|
123 |
+
mimetype='text/event-stream',
|
124 |
+
headers={
|
125 |
+
'Cache-Control': 'no-cache',
|
126 |
+
'X-Accel-Buffering': 'no'
|
127 |
+
}
|
128 |
+
)
|
129 |
+
|
130 |
+
except Exception as e:
|
131 |
+
return jsonify({'error': str(e)}), 500
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
if __name__ == '__main__':
|
137 |
app.run(debug=True)
|