Spaces:
Running
on
Zero
Running
on
Zero
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from llama_cpp import Llama | |
from concurrent.futures import ThreadPoolExecutor | |
import uvicorn | |
from dotenv import load_dotenv | |
from difflib import SequenceMatcher | |
load_dotenv() | |
app = FastAPI() | |
# Inicialización de los modelos | |
models = [ | |
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf"}, | |
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf"}, | |
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf"}, | |
{"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf"}, | |
] | |
# Cargar modelos en memoria | |
llms = [Llama.from_pretrained(repo_id=model['repo_id'], filename=model['filename']) for model in models] | |
class ChatRequest(BaseModel): | |
message: str | |
top_k: int = 50 | |
top_p: float = 0.95 | |
temperature: float = 0.7 | |
def generate_chat_response(request, llm): | |
try: | |
user_input = request.message | |
response = llm.create_chat_completion( | |
messages=[{"role": "user", "content": user_input}], | |
top_k=request.top_k, | |
top_p=request.top_p, | |
temperature=request.temperature | |
) | |
reply = response['choices'][0]['message']['content'] | |
return reply | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def select_best_response(responses, request): | |
coherent_responses = filter_by_coherence(responses, request) | |
best_response = filter_by_similarity(coherent_responses) | |
return best_response | |
def filter_by_coherence(responses, request): | |
# Puedes implementar un filtro más sofisticado si es necesario | |
return responses | |
def filter_by_similarity(responses): | |
responses.sort(key=len, reverse=True) | |
best_response = responses[0] | |
for i in range(1, len(responses)): | |
ratio = SequenceMatcher(None, best_response, responses[i]).ratio() | |
if ratio < 0.9: | |
best_response = responses[i] | |
break | |
return best_response | |
async def generate_chat(request: ChatRequest): | |
with ThreadPoolExecutor() as executor: | |
# Ejecutar las tareas en paralelo | |
futures = [executor.submit(generate_chat_response, request, llm) for llm in llms] | |
responses = [future.result() for future in futures] | |
if any("Error" in response for response in responses): | |
error_response = next(response for response in responses if "Error" in response) | |
raise HTTPException(status_code=500, detail=error_response) | |
# Seleccionar la mejor respuesta | |
best_response = select_best_response(responses, request) | |
return {"response": best_response} | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |