Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,769 Bytes
87928b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from llama_cpp import Llama
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import uvicorn
from dotenv import load_dotenv
from difflib import SequenceMatcher
import re
from spaces import GPU
import httpx
# Cargar variables de entorno
load_dotenv()
# Inicializar aplicaci贸n FastAPI
app = FastAPI()
# Diccionario global para almacenar los modelos
global_data = {
'models': []
}
# Configuraci贸n de los modelos (incluyendo los nuevos)
model_configs = [
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"},
# Otros modelos omitidos por espacio
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"},
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"}
]
# Clase para gestionar modelos
class ModelManager:
def __init__(self):
self.models = []
def load_model(self, model_config):
print(f"Cargando modelo: {model_config['name']}...")
return {"model": Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename']), "name": model_config['name']}
@GPU(duration=0)
def load_all_models(self):
print("Iniciando carga de modelos...")
with ThreadPoolExecutor(max_workers=len(model_configs)) as executor:
futures = [executor.submit(self.load_model, config) for config in model_configs]
models = []
for future in tqdm(as_completed(futures), total=len(model_configs), desc="Cargando modelos", unit="modelo"):
try:
model = future.result()
models.append(model)
print(f"Modelo cargado exitosamente: {model['name']}")
except Exception as e:
print(f"Error al cargar el modelo: {e}")
print("Todos los modelos han sido cargados.")
return models
# Instanciar ModelManager y cargar modelos una sola vez
model_manager = ModelManager()
global_data['models'] = model_manager.load_all_models()
# Modelo global para la solicitud de chat
class ChatRequest(BaseModel):
message: str
top_k: int = 50
top_p: float = 0.95
temperature: float = 0.7
# Funci贸n para generar respuestas de chat
def generate_chat_response(request, model_data):
try:
user_input = normalize_input(request.message)
llm = model_data['model']
response = llm.create_chat_completion(
messages=[{"role": "user", "content": user_input}],
top_k=request.top_k,
top_p=request.top_p,
temperature=request.temperature
)
reply = response['choices'][0]['message']['content']
return {"response": reply, "literal": user_input, "model_name": model_data['name']}
except Exception as e:
return {"response": f"Error: {str(e)}", "literal": user_input, "model_name": model_data['name']}
def normalize_input(input_text):
return input_text.strip()
def remove_duplicates(text):
text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text)
text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text)
text = text.replace('[/INST]', '')
lines = text.split('\n')
unique_lines = list(dict.fromkeys(lines))
return '\n'.join(unique_lines).strip()
def remove_repetitive_responses(responses):
seen = set()
unique_responses = []
for response in responses:
normalized_response = remove_duplicates(response['response'])
if normalized_response not in seen:
seen.add(normalized_response)
unique_responses.append(response)
return unique_responses
# Manejo de errores en la inicializaci贸n de modelos (traza mencionada en el error)
def handle_initialization_error(allow_token):
try:
client = httpx.Client()
pid = 0 # Variable que simula el proceso actual
assert client.allow(allow_token=allow_token, pid=pid) == httpx.codes.OK
except AssertionError:
raise HTTPException(status_code=500, detail="Error en la inicializaci贸n del cliente Spaces")
# Ruta para generar chat en m煤ltiples modelos
@app.post("/chat/")
async def chat(request: ChatRequest):
try:
# Simulaci贸n del error `AssertionError` durante la inicializaci贸n
allow_token = "test_token"
handle_initialization_error(allow_token)
with ThreadPoolExecutor() as executor:
futures = [executor.submit(generate_chat_response, request, model) for model in global_data['models']]
responses = [future.result() for future in as_completed(futures)]
unique_responses = remove_repetitive_responses(responses)
return {"responses": unique_responses}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error procesando la solicitud: {str(e)}")
# Uso de template `chat_template.default`
chat_template = """
User: {message}
Bot: {response}
"""
# Plantilla de respuesta de chat
def render_chat_template(message, response):
return chat_template.format(message=message, response=response)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|