File size: 2,755 Bytes
c3883a9
 
 
 
 
612150f
c3883a9
 
 
 
 
612150f
 
c3883a9
 
 
612150f
 
 
 
c3883a9
 
 
 
 
 
612150f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3883a9
 
 
 
 
 
 
 
612150f
c3883a9
612150f
 
c3883a9
 
 
 
 
 
612150f
 
 
 
 
c3883a9
612150f
 
c3883a9
 
612150f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from flask import Flask, request, render_template, jsonify
import PIL.Image
import google.generativeai as genai
import os
from tempfile import NamedTemporaryFile
from gradio_client import Client, handle_file  # Importez gradio_client

app = Flask(__name__)

# Configuration de Gemini
generation_config = {
    "temperature": 1,
    "max_output_tokens": 8192,
}

safety_settings = [
    {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]

GOOGLE_API_KEY = os.environ.get("TOKEN")

genai.configure(api_key=GOOGLE_API_KEY)

# Fonction pour interroger Gemini
def query_gemini(image_path, prompt="Résous ce problème mathématiques. Je veux qu'en réponse tu me donnes un rendu complet en utilisant du Latex."):
    img = PIL.Image.open(image_path)
    model = genai.GenerativeModel(
        model_name="gemini-1.5-pro-002",
        generation_config=generation_config,
        safety_settings=safety_settings
    )
    try:
        response = model.generate_content([prompt, img], request_options={"timeout": 600})
        return response.text
    except Exception as e:
        return str(e)

# Fonction pour interroger Qwen2
def query_qwen2(image_path, question="Résous ce problème mathématiques. Donne la réponse en utilisant LaTeX."):
    try:
        client = Client("Qwen/Qwen2-Math-Demo")
        result = client.predict(
            image=handle_file(image_path),
            sketchpad=None,
            question=question,
            api_name="/math_chat_bot"
        )
        return result
    except Exception as e:
        return str(e)

@app.route('/')
def index():
    return render_template('math.html')

@app.route('/upload', methods=['POST'])
def upload_image():
    if 'image' not in request.files:
        return jsonify({'error': 'Aucune image fournie'}), 400

    file = request.files['image']
    model_choice = request.form.get('model_choice', 'gemini')  # Obtient le choix du modèle

    if file.filename == '':
        return jsonify({'error': 'Aucun fichier sélectionné'}), 400

    with NamedTemporaryFile(delete=False) as temp_file:
        file.save(temp_file.name)
        try:
            if model_choice == "mariam's":
                result = query_gemini(temp_file.name)
            else:
                result = query_qwen2(temp_file.name)

            os.unlink(temp_file.name)
            return jsonify({'result': result, 'model': model_choice})

        except Exception as e:
            return jsonify({'error': str(e)}), 500