Text-servicegggg / appprin.py
Uhhy's picture
Rename app.py to appprin.py
dcc90f1 verified
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from llama_cpp import Llama
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import uvicorn
from dotenv import load_dotenv
from difflib import SequenceMatcher
import re
import spaces
load_dotenv()
app = FastAPI()
global_data = {
'models': []
}
model_configs = [
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"},
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"},
{"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"},
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"},
{"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"},
{"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"},
{"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-q2_k.gguf", "name": "Meta Llama 3.1-70B"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"},
{"repo_id": "Ffftdtd5dtft/Hermes-3-Llama-3.1-8B-IQ1_S-GGUF", "filename": "hermes-3-llama-3.1-8b-iq1_s-imat.gguf", "name": "Hermes 3 Llama 3.1-8B"},
{"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf", "name": "Phi 3.5 Mini Instruct"},
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"},
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"},
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-IQ2_XXS-GGUF", "filename": "phi-3-mini-128k-instruct-iq2_xxs-imat.gguf", "name": "Phi 3 Mini 128K Instruct XXS"},
{"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf", "name": "TinyLlama 1.1B Chat"},
{"repo_id": "Ffftdtd5dtft/Mistral-NeMo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf", "name": "Mistral NeMo Minitron 8B Base"},
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"}
]
class ModelManager:
def __init__(self):
self.models = []
self.loaded = False
def load_model(self, model_config):
print(f"Cargando modelo: {model_config['name']}...")
return {"model": Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename']), "name": model_config['name']}
def load_all_models(self):
if self.loaded:
print("Modelos ya están cargados. No es necesario volver a cargarlos.")
return self.models
print("Iniciando carga de modelos...")
with ThreadPoolExecutor() as executor:
futures = [executor.submit(self.load_model, config) for config in model_configs]
models = []
for future in tqdm(as_completed(futures), total=len(model_configs), desc="Cargando modelos", unit="modelo"):
try:
model = future.result()
models.append(model)
print(f"Modelo cargado exitosamente: {model['name']}")
except Exception as e:
print(f"Error al cargar el modelo: {e}")
self.models = models
self.loaded = True
print("Todos los modelos han sido cargados.")
return self.models
model_manager = ModelManager()
global_data['models'] = model_manager.load_all_models()
class ChatRequest(BaseModel):
message: str
top_k: int = 50
top_p: float = 0.95
temperature: float = 0.7
@spaces.GPU(duration=0)
def generate_chat_response(request, model_data):
try:
user_input = normalize_input(request.message)
llm = model_data['model']
response = llm.create_chat_completion(
messages=[{"role": "user", "content": user_input}],
top_k=request.top_k,
top_p=request.top_p,
temperature=request.temperature
)
reply = response['choices'][0]['message']['content']
return {"response": reply, "literal": user_input, "model_name": model_data['name']}
except Exception as e:
return {"response": f"Error: {str(e)}", "literal": user_input, "model_name": model_data['name']}
def normalize_input(input_text):
return input_text.strip()
def remove_duplicates(text):
text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text)
text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text)
text = text.replace('[/INST]', '')
lines = text.split('\n')
unique_lines = list(dict.fromkeys(lines))
return '\n'.join(unique_lines).strip()
def remove_repetitive_responses(responses):
seen = set()
unique_responses = []
for response in responses:
normalized_response = remove_duplicates(response['response'])
if normalized_response not in seen:
seen.add(normalized_response)
unique_responses.append(response)
return unique_responses
def select_best_response(responses):
print("Filtrando respuestas...")
responses = remove_repetitive_responses(responses)
responses = [remove_duplicates(response['response']) for response in responses]
unique_responses = list(dict.fromkeys(responses))
sorted_responses = sorted(unique_responses, key=lambda r: len(r), reverse=True)
return sorted_responses[0]
@app.post("/generate_chat")
async def generate_chat(request: ChatRequest):
if not request.message.strip():
raise HTTPException(status_code=400, detail="The message cannot be empty.")
print(f"Procesando solicitud: {request.message}")
responses = []
num_models = len(global_data['models'])
with ThreadPoolExecutor() as executor:
futures = [executor.submit(generate_chat_response, request, model_data) for model_data in global_data['models']]
for future in tqdm(as_completed(futures), total=num_models, desc="Generando respuestas", unit="modelo"):
try:
response = future.result()
responses.append(response)
except Exception as exc:
print(f"Error en la generación de respuesta: {exc}")
if not responses:
raise HTTPException(status_code=500, detail="Error: No se generaron respuestas.")
best_response = select_best_response(responses)
print(f"Mejor respuesta seleccionada: {best_response}")
return {
"best_response": best_response,
"all_responses": responses
}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)