Update app.py
Browse files
app.py
CHANGED
@@ -3,22 +3,18 @@
|
|
3 |
# === Imports ===
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
7 |
from datetime import datetime
|
8 |
import os
|
9 |
import json
|
10 |
import logging
|
11 |
from huggingface_hub import login
|
12 |
-
|
13 |
-
# --- Imports spécifiques pour l'AgentResearcher ---
|
14 |
import requests
|
15 |
from bs4 import BeautifulSoup
|
16 |
-
import logging
|
17 |
from concurrent.futures import ThreadPoolExecutor
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
# === Configuration du logger ===
|
22 |
logging.basicConfig(
|
23 |
level=logging.INFO,
|
24 |
format="%(asctime)s - %(levelname)s - %(message)s",
|
@@ -28,8 +24,14 @@ logging.basicConfig(
|
|
28 |
]
|
29 |
)
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# === Chargement des modèles ===
|
32 |
-
#
|
33 |
manager_model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
34 |
manager_model = AutoModelForCausalLM.from_pretrained(
|
35 |
manager_model_name,
|
@@ -56,7 +58,6 @@ analyzer_model = AutoModelForCausalLM.from_pretrained(
|
|
56 |
)
|
57 |
analyzer_tokenizer = AutoTokenizer.from_pretrained(analyzer_model_name)
|
58 |
|
59 |
-
# AgentCoder
|
60 |
# AgentCoder
|
61 |
coder_model_name = "Qwen/Qwen2.5-Coder-14B-Instruct"
|
62 |
coder_model = AutoModelForCausalLM.from_pretrained(
|
@@ -113,18 +114,7 @@ Vous êtes un assistant d'analyse. Vos tâches sont :
|
|
113 |
5. Votre réponse doit commencer par 'Validité: Oui' ou 'Validité: Non', suivi du rapport d'analyse.
|
114 |
"""
|
115 |
|
116 |
-
|
117 |
-
System: Vous êtes un assistant de codage. Votre tâche est de :
|
118 |
-
1. Générer du code basé sur le résumé structuré validé suivant :
|
119 |
-
{structured_summary}
|
120 |
-
2. Incorporer les résultats de recherche suivants :
|
121 |
-
{search_results}
|
122 |
-
"""
|
123 |
-
|
124 |
-
# === Définition des fonctions pour chaque agent ===
|
125 |
-
|
126 |
-
|
127 |
-
# === Fonctions Utilitaires de l'agentManager ===
|
128 |
def get_variables_context():
|
129 |
variables = {}
|
130 |
for agent, data in project_state.items():
|
@@ -143,10 +133,8 @@ def update_project_state(modifications):
|
|
143 |
target[keys[-1]] = value
|
144 |
|
145 |
def extract_modifications(user_input):
|
146 |
-
# Extraction simplifiée pour l'exemple
|
147 |
modifications = {}
|
148 |
if "modifie" in user_input.lower():
|
149 |
-
import re
|
150 |
matches = re.findall(r"modifie la variable (\w+(?:\.\w+)*) à (.+)", user_input, re.IGNORECASE)
|
151 |
for match in matches:
|
152 |
var_name, var_value = match
|
@@ -189,17 +177,15 @@ def agent_manager(chat_history, user_input):
|
|
189 |
return response, chat_history, False
|
190 |
|
191 |
# Générer la réponse
|
192 |
-
prompt =
|
193 |
-
for msg in conversation:
|
194 |
-
prompt += f"{msg['role']}: {msg['content']}\n"
|
195 |
|
196 |
-
input_ids = manager_tokenizer
|
197 |
output_ids = manager_model.generate(
|
198 |
-
input_ids,
|
199 |
max_new_tokens=256,
|
200 |
eos_token_id=manager_tokenizer.eos_token_id,
|
201 |
pad_token_id=manager_tokenizer.pad_token_id,
|
202 |
-
attention_mask=input_ids
|
203 |
)
|
204 |
response = manager_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
205 |
|
@@ -214,12 +200,7 @@ def agent_manager(chat_history, user_input):
|
|
214 |
return response, chat_history, False
|
215 |
|
216 |
# --- AgentResearcher ---
|
217 |
-
# Fonctions spécifiques pour les recherches dynamiques
|
218 |
-
|
219 |
def fetch_webpage(url: str) -> str:
|
220 |
-
"""
|
221 |
-
Télécharge le contenu HTML d'une URL donnée.
|
222 |
-
"""
|
223 |
try:
|
224 |
response = requests.get(url, timeout=10)
|
225 |
response.raise_for_status()
|
@@ -230,9 +211,6 @@ def fetch_webpage(url: str) -> str:
|
|
230 |
return ""
|
231 |
|
232 |
def extract_information_from_html(html: str, keyword: str) -> list:
|
233 |
-
"""
|
234 |
-
Extrait des informations pertinentes depuis le HTML en fonction d'un mot-clé.
|
235 |
-
"""
|
236 |
try:
|
237 |
soup = BeautifulSoup(html, "html.parser")
|
238 |
results = []
|
@@ -246,9 +224,6 @@ def extract_information_from_html(html: str, keyword: str) -> list:
|
|
246 |
return []
|
247 |
|
248 |
def search_gradio_docs(query: str) -> dict:
|
249 |
-
"""
|
250 |
-
Recherche dans la documentation Gradio les sections pertinentes pour une requête donnée.
|
251 |
-
"""
|
252 |
url = "https://gradio.app/docs/"
|
253 |
logging.info(f"Lancement de la recherche pour la requête : {query}")
|
254 |
html_content = fetch_webpage(url)
|
@@ -282,14 +257,14 @@ def agent_researcher():
|
|
282 |
]
|
283 |
|
284 |
output_ids = researcher_model.generate(
|
285 |
-
input_ids,
|
286 |
max_new_tokens=512,
|
287 |
eos_token_id=terminators,
|
288 |
do_sample=True,
|
289 |
temperature=0.6,
|
290 |
top_p=0.9,
|
291 |
)
|
292 |
-
response_ids = output_ids[0][input_ids.shape[-1]:]
|
293 |
response = researcher_tokenizer.decode(response_ids, skip_special_tokens=True)
|
294 |
|
295 |
# Parser la réponse JSON
|
@@ -331,7 +306,7 @@ def agent_analyzer():
|
|
331 |
prompt = analyzer_tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
332 |
|
333 |
# Création du pipeline
|
334 |
-
analyzer_pipeline =
|
335 |
"text-generation",
|
336 |
model=analyzer_model,
|
337 |
tokenizer=analyzer_tokenizer,
|
@@ -435,20 +410,128 @@ def user_interaction(message, chat_history):
|
|
435 |
|
436 |
# === Interface Gradio ===
|
437 |
with gr.Blocks() as interface:
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
|
443 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
updated_chat_history, _ = user_interaction(message, chat_history)
|
445 |
-
bot_message = updated_chat_history[-1][
|
446 |
-
|
447 |
-
|
|
|
448 |
|
449 |
-
|
450 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
451 |
|
|
|
452 |
if __name__ == "__main__":
|
453 |
-
interface.launch()
|
454 |
-
|
|
|
3 |
# === Imports ===
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, pipeline
|
7 |
from datetime import datetime
|
8 |
import os
|
9 |
import json
|
10 |
import logging
|
11 |
from huggingface_hub import login
|
|
|
|
|
12 |
import requests
|
13 |
from bs4 import BeautifulSoup
|
|
|
14 |
from concurrent.futures import ThreadPoolExecutor
|
15 |
+
import re
|
16 |
|
17 |
+
# --- Configuration du logger ---
|
|
|
|
|
18 |
logging.basicConfig(
|
19 |
level=logging.INFO,
|
20 |
format="%(asctime)s - %(levelname)s - %(message)s",
|
|
|
24 |
]
|
25 |
)
|
26 |
|
27 |
+
# --- Authentification Hugging Face ---
|
28 |
+
# Assurez-vous que la variable d'environnement HF_TOKEN est définie avec votre token Hugging Face
|
29 |
+
# Sinon, vous pouvez la définir directement ici
|
30 |
+
# os.environ["HF_TOKEN"] = "votre_token_huggingface"
|
31 |
+
login(token=os.environ["HF_TOKEN"])
|
32 |
+
|
33 |
# === Chargement des modèles ===
|
34 |
+
# AgentManager
|
35 |
manager_model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
36 |
manager_model = AutoModelForCausalLM.from_pretrained(
|
37 |
manager_model_name,
|
|
|
58 |
)
|
59 |
analyzer_tokenizer = AutoTokenizer.from_pretrained(analyzer_model_name)
|
60 |
|
|
|
61 |
# AgentCoder
|
62 |
coder_model_name = "Qwen/Qwen2.5-Coder-14B-Instruct"
|
63 |
coder_model = AutoModelForCausalLM.from_pretrained(
|
|
|
114 |
5. Votre réponse doit commencer par 'Validité: Oui' ou 'Validité: Non', suivi du rapport d'analyse.
|
115 |
"""
|
116 |
|
117 |
+
# === Fonctions Utilitaires de l'AgentManager ===
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
def get_variables_context():
|
119 |
variables = {}
|
120 |
for agent, data in project_state.items():
|
|
|
133 |
target[keys[-1]] = value
|
134 |
|
135 |
def extract_modifications(user_input):
|
|
|
136 |
modifications = {}
|
137 |
if "modifie" in user_input.lower():
|
|
|
138 |
matches = re.findall(r"modifie la variable (\w+(?:\.\w+)*) à (.+)", user_input, re.IGNORECASE)
|
139 |
for match in matches:
|
140 |
var_name, var_value = match
|
|
|
177 |
return response, chat_history, False
|
178 |
|
179 |
# Générer la réponse
|
180 |
+
prompt = manager_tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
|
|
|
|
181 |
|
182 |
+
input_ids = manager_tokenizer(prompt, return_tensors="pt").to(manager_model.device)
|
183 |
output_ids = manager_model.generate(
|
184 |
+
input_ids["input_ids"],
|
185 |
max_new_tokens=256,
|
186 |
eos_token_id=manager_tokenizer.eos_token_id,
|
187 |
pad_token_id=manager_tokenizer.pad_token_id,
|
188 |
+
attention_mask=input_ids["attention_mask"]
|
189 |
)
|
190 |
response = manager_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
191 |
|
|
|
200 |
return response, chat_history, False
|
201 |
|
202 |
# --- AgentResearcher ---
|
|
|
|
|
203 |
def fetch_webpage(url: str) -> str:
|
|
|
|
|
|
|
204 |
try:
|
205 |
response = requests.get(url, timeout=10)
|
206 |
response.raise_for_status()
|
|
|
211 |
return ""
|
212 |
|
213 |
def extract_information_from_html(html: str, keyword: str) -> list:
|
|
|
|
|
|
|
214 |
try:
|
215 |
soup = BeautifulSoup(html, "html.parser")
|
216 |
results = []
|
|
|
224 |
return []
|
225 |
|
226 |
def search_gradio_docs(query: str) -> dict:
|
|
|
|
|
|
|
227 |
url = "https://gradio.app/docs/"
|
228 |
logging.info(f"Lancement de la recherche pour la requête : {query}")
|
229 |
html_content = fetch_webpage(url)
|
|
|
257 |
]
|
258 |
|
259 |
output_ids = researcher_model.generate(
|
260 |
+
input_ids["input_ids"],
|
261 |
max_new_tokens=512,
|
262 |
eos_token_id=terminators,
|
263 |
do_sample=True,
|
264 |
temperature=0.6,
|
265 |
top_p=0.9,
|
266 |
)
|
267 |
+
response_ids = output_ids[0][input_ids["input_ids"].shape[-1]:]
|
268 |
response = researcher_tokenizer.decode(response_ids, skip_special_tokens=True)
|
269 |
|
270 |
# Parser la réponse JSON
|
|
|
306 |
prompt = analyzer_tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
307 |
|
308 |
# Création du pipeline
|
309 |
+
analyzer_pipeline = pipeline(
|
310 |
"text-generation",
|
311 |
model=analyzer_model,
|
312 |
tokenizer=analyzer_tokenizer,
|
|
|
410 |
|
411 |
# === Interface Gradio ===
|
412 |
with gr.Blocks() as interface:
|
413 |
+
with gr.Tabs():
|
414 |
+
# Onglet "Chat"
|
415 |
+
with gr.Tab("Chat"):
|
416 |
+
with gr.Row():
|
417 |
+
# Colonne gauche : Chat principal
|
418 |
+
with gr.Column(scale=3):
|
419 |
+
chatbot = gr.Chatbot(label="Chat Principal")
|
420 |
+
state = gr.State([]) # Historique des messages
|
421 |
+
msg = gr.Textbox(placeholder="Entrez votre message ici...")
|
422 |
+
send_btn = gr.Button("Envoyer")
|
423 |
+
|
424 |
+
# Colonne droite : Statut des agents et logs
|
425 |
+
with gr.Column(scale=2):
|
426 |
+
agent_status_chat = gr.Chatbot(label="Suivi des Agents")
|
427 |
+
logs_box = gr.Textbox(
|
428 |
+
value="",
|
429 |
+
lines=10,
|
430 |
+
interactive=False,
|
431 |
+
placeholder="Logs d'exécution",
|
432 |
+
label="Logs",
|
433 |
+
)
|
434 |
+
|
435 |
+
# Onglet "Output"
|
436 |
+
with gr.Tab("Output"):
|
437 |
+
output_code = gr.Code(
|
438 |
+
value="# Le code généré sera affiché ici.\n",
|
439 |
+
language="python",
|
440 |
+
label="Code Final",
|
441 |
+
)
|
442 |
+
|
443 |
+
# === Fonctions de mise à jour des statuts et logs ===
|
444 |
+
def update_agent_status_and_logs(chat_history):
|
445 |
+
"""
|
446 |
+
Met à jour les messages des agents et les logs d'exécution.
|
447 |
+
"""
|
448 |
+
# Initialisation des messages
|
449 |
+
agent_status_messages = []
|
450 |
+
|
451 |
+
# AgentManager
|
452 |
+
structured_summary = project_state["AgentManager"]["structured_summary"]
|
453 |
+
if structured_summary:
|
454 |
+
manager_message = f"AgentManager : Résumé structuré disponible.\n{structured_summary}"
|
455 |
+
else:
|
456 |
+
manager_message = "AgentManager : En attente d'informations de l'utilisateur."
|
457 |
+
agent_status_messages.append(("AgentManager", manager_message))
|
458 |
+
|
459 |
+
# AgentResearcher
|
460 |
+
researcher_result = project_state["AgentResearcher"]["search_results"]
|
461 |
+
if researcher_result:
|
462 |
+
researcher_message = (
|
463 |
+
f"AgentResearcher : Résultats obtenus\n"
|
464 |
+
f"Documentation : {researcher_result.get('documentation', 'N/A')}\n"
|
465 |
+
f"Extraits de code : {researcher_result.get('extraits_code', 'N/A')}"
|
466 |
+
)
|
467 |
+
else:
|
468 |
+
researcher_message = "AgentResearcher : Recherche en cours..."
|
469 |
+
agent_status_messages.append(("AgentResearcher", researcher_message))
|
470 |
+
|
471 |
+
# AgentAnalyzer
|
472 |
+
analysis_report = project_state["AgentAnalyzer"]["analysis_report"]
|
473 |
+
if analysis_report:
|
474 |
+
analyzer_message = (
|
475 |
+
f"AgentAnalyzer : Analyse terminée\n"
|
476 |
+
f"{analysis_report}"
|
477 |
+
)
|
478 |
+
else:
|
479 |
+
analyzer_message = "AgentAnalyzer : Analyse en cours..."
|
480 |
+
agent_status_messages.append(("AgentAnalyzer", analyzer_message))
|
481 |
|
482 |
+
# AgentCoder
|
483 |
+
final_code = project_state["AgentCoder"]["final_code"]
|
484 |
+
if final_code:
|
485 |
+
coder_message = "AgentCoder : Code généré avec succès ✔️"
|
486 |
+
else:
|
487 |
+
coder_message = "AgentCoder : En attente des instructions."
|
488 |
+
agent_status_messages.append(("AgentCoder", coder_message))
|
489 |
+
|
490 |
+
# Logs
|
491 |
+
logs = ""
|
492 |
+
with open("project.log", "r") as log_file:
|
493 |
+
logs = log_file.read()
|
494 |
+
|
495 |
+
return agent_status_messages, logs
|
496 |
+
|
497 |
+
# === Fonction principale de réponse ===
|
498 |
+
def respond(message, chat_history, agent_chat):
|
499 |
+
"""
|
500 |
+
Gestion des interactions principales et mise à jour des statuts/logs.
|
501 |
+
"""
|
502 |
+
# Mettre à jour le chat principal
|
503 |
updated_chat_history, _ = user_interaction(message, chat_history)
|
504 |
+
bot_message = updated_chat_history[-1]["assistant"]
|
505 |
+
|
506 |
+
# Mettre à jour le statut des agents et les logs
|
507 |
+
agent_status, logs = update_agent_status_and_logs(updated_chat_history)
|
508 |
|
509 |
+
# Mettre à jour le chatbot des agents
|
510 |
+
agent_chat.clear()
|
511 |
+
for agent_name, msg_content in agent_status:
|
512 |
+
agent_chat.append((agent_name, msg_content))
|
513 |
+
|
514 |
+
# Générer le code final si disponible
|
515 |
+
generated_code = project_state["AgentCoder"].get("final_code", "")
|
516 |
+
if not generated_code:
|
517 |
+
generated_code = "# Aucun code n'a encore été généré."
|
518 |
+
else:
|
519 |
+
generated_code = f"{generated_code}"
|
520 |
+
|
521 |
+
return chatbot.update([(message, bot_message)]), updated_chat_history, agent_chat.update(), logs, generated_code
|
522 |
+
|
523 |
+
# === Actions des boutons et soumission ===
|
524 |
+
send_btn.click(
|
525 |
+
respond,
|
526 |
+
inputs=[msg, state, agent_status_chat],
|
527 |
+
outputs=[chatbot, state, agent_status_chat, logs_box, output_code],
|
528 |
+
)
|
529 |
+
msg.submit(
|
530 |
+
respond,
|
531 |
+
inputs=[msg, state, agent_status_chat],
|
532 |
+
outputs=[chatbot, state, agent_status_chat, logs_box, output_code],
|
533 |
+
)
|
534 |
|
535 |
+
# Lancer l'interface
|
536 |
if __name__ == "__main__":
|
537 |
+
interface.launch()
|
|