Spaces:
Sleeping
Sleeping
from dotenv import load_dotenv | |
import gradio as gr | |
import openai | |
import os | |
load_dotenv() | |
MAX_HISTORY_LENGTH = 10 | |
def load_css(): | |
try: | |
with open('custom.css', 'r', encoding='utf-8') as file: | |
return file.read() | |
except FileNotFoundError: | |
logging.warning("⚠️ custom.css non trouvé, chargement ignoré.") | |
return "" | |
def format_history_for_api(history): | |
""" | |
Convertit l'historique Gradio en format OpenAI. | |
""" | |
formatted_messages = [] | |
for message in history: | |
formatted_messages.extend([ | |
{"role": "user", "content": str(message[0])}, | |
{"role": "assistant", "content": str(message[1])} | |
]) | |
return formatted_messages | |
def rag_with_reasoner(user_query: str, history: list) -> dict: | |
""" | |
Outil RAG qui prend une requête utilisateur et renvoie une réponse basée sur le contexte historique. | |
""" | |
client = openai.OpenAI( | |
base_url=os.getenv("AGENT_ENDPOINT"), | |
api_key=os.getenv("AGENT_KEY"), | |
) | |
# Formatage des messages pour l'API | |
messages = format_history_for_api(history) if history else [] | |
messages.append({"role": "user", "content": str(user_query)}) | |
try: | |
response = client.chat.completions.create( | |
model="n/a", | |
messages=messages | |
) | |
return {"role": "assistant", "content": response.choices[0].message.content} | |
except Exception as e: | |
print(f"Error calling OpenAI API: {e}") | |
return {"role": "assistant", "content": f"Une erreur s'est produite: {str(e)}"} | |
def chat_function(message, history): | |
""" | |
Fonction de chat pour Gradio. | |
""" | |
try: | |
# Limiter la taille de l'historique | |
if len(history) > MAX_HISTORY_LENGTH: | |
history = history[-MAX_HISTORY_LENGTH:] | |
response = rag_with_reasoner(message, history) | |
return response["content"] | |
except Exception as e: | |
print(f"Error in chat function: {e}") | |
return f"Une erreur s'est produite: {str(e)}" | |
# Configuration de l'interface Gradio | |
css = """ | |
.message.tool-call { background-color: #f0f8ff; } | |
.message.tool-result { background-color: #f5f5f5; } | |
.message.error { background-color: #fff0f0; } | |
.chat-header { | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
gap: 10px; | |
background: linear-gradient(90deg, #0000d3, #010272); | |
} | |
.chat-header img { | |
height: 40px; | |
} | |
""" | |
# Création de l'interface de chat avec Gradio | |
chat_interface = gr.ChatInterface( | |
fn=chat_function, | |
title="<div class='chat-header'><img src='https://www.vie-publique.sn/_nuxt/vie-publique-logo-4.BGl8Bhgm.svg' alt='Logo'><span>🤖</span></div>", | |
description="Discutez avec un agent IA pour des analyses précises et des conseils sur la vie publique du Sénégal 🇸🇳", | |
examples=["Sénégal 2050", "Budget 2025", "Constitution du Sénégal", "Rapport sur la situation des finances exercice 2019 au 31 mars 2024"], | |
textbox=gr.Textbox( | |
placeholder="Posez votre question ici...", | |
submit_btn="Envoyer 📤", | |
container=False, | |
scale=7 | |
), | |
theme=gr.themes.Soft(), | |
css=css + load_css() | |
) | |
def main(): | |
chat_interface.launch() | |
if __name__ == "__main__": | |
main() |