# app.py import gradio as gr from src.agent import Agent from src.create_database import load_and_process_dataset # Import from create_database.py import os import uuid import requests import logging import subprocess from llama_cpp import Llama # Import Llama from llama_cpp import spacy # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # Function to install requirements def install_requirements(): try: subprocess.check_call([os.sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt']) logging.info("Requirements installed successfully.") except subprocess.CalledProcessError as e: logging.error(f"Failed to install requirements: {e}") # Function to download the spaCy model def download_spacy_model(model_name): try: subprocess.check_call([os.sys.executable, '-m', 'spacy', 'download', model_name]) logging.info(f"SpaCy model {model_name} downloaded successfully.") except subprocess.CalledProcessError as e: logging.error(f"Failed to download SpaCy model {model_name}: {e}") # Install requirements install_requirements() # Download the spaCy model if it doesn't exist if not spacy.util.is_package('en_core_web_lg'): download_spacy_model('en_core_web_lg') # Create the directory if it doesn't exist local_dir = "models" os.makedirs(local_dir, exist_ok=True) # Specify the filename for the model filename = "unsloth.Q4_K_M.gguf" model_path = os.path.join(local_dir, filename) # Function to download the model file def download_model(repo_id, filename, save_path): # Construct the URL for the model file url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}" # Download the model file response = requests.get(url) if response.status_code == 200: with open(save_path, 'wb') as f: f.write(response.content) print(f"Model downloaded to {save_path}") else: print(f"Failed to download model: {response.status_code}") # Download the model if it doesn't exist if not os.path.exists(model_path): download_model("PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", filename, model_path) def respond( message, history: list[tuple[str, str]], system_message, ): model_path = "models/unsloth.Q4_K_M.gguf" # Path to the downloaded model db_path = "agent.db" system_prompt = system_message # Check if the database exists, if not, initialize it if not os.path.exists(db_path): data_update_path = "data-update.txt" keyword_dir = "keyword" # Updated keyword directory load_and_process_dataset(data_update_path, keyword_dir, db_path) # Load the model with the maximum context length and control the maximum tokens in the response llm = Llama( model_path=model_path, n_ctx=500, # Set the maximum context length max_tokens=500 # Control the maximum number of tokens generated in the response ) agent = Agent(llm, db_path, system_prompt) user_id = str(uuid.uuid4()) # Generate a unique user ID for each session response = agent.process_query(user_id, message) return response """ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface """ demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="Vous êtes l'assistant intelligent de Les Chronique MTC. Votre rôle est d'aider les visiteurs en expliquant le contenu des Chroniques, Flash Infos et Chronique-FAQ de Michel Thomas. Utilisez le contexte fourni pour améliorer vos réponses et veillez à ce qu'elles soient précises et pertinentes.", label="System message"), ], ) if __name__ == "__main__": demo.launch()