from some_base_method.bx.BaseModelManager import BaseModelManager import json import time import requests class DegptModel(BaseModelManager): """ 具体实现类:继承自 BaseModelManager """ def __init__(self): self.last_request_time = 0 self.cache_duration = 14400 # 缓存有效期:4小时 self.cached_models = None def reload_check(self): self.get_models() def get_models(self): current_time = time.time() if self.cached_models is None or (current_time - self.last_request_time) > self.cache_duration: self.get_alive_models() return json.dumps(self.cached_models) def get_alive_models(self): url = 'https://www.degpt.ai/api/config' headers = {'Content-Type': 'application/json'} response = requests.get(url, headers=headers) if response.status_code == 200: data = response.json() default_models = data.get("default_models", "").split(",") timestamp_in_milliseconds = int(time.time() * 1000) models = { "object": "list", "version": data.get("version", ""), "provider": data.get("name", ""), "time": timestamp_in_milliseconds, "data": [ {"id": model.strip(), "object": "model", "created": 0, "owned_by": model.split("-")[0]} for model in default_models ] } self.cached_models = models self.last_request_time = time.time() else: raise ValueError(f"请求失败,状态码: {response.status_code}") def is_model_available(self, model_id): models_data = json.loads(self.get_models()).get("data", []) return any(model["id"] == model_id for model in models_data) def get_auto_model(self, model=None): models_data = json.loads(self.get_models()).get("data", []) valid_ids = [m["id"] for m in models_data] if model and model in valid_ids: return model return models_data[0]["id"] if models_data else None def get_model_by_autoupdate(self, model_id=None): models_data = json.loads(self.get_models()).get("data", []) valid_ids = [model["id"] for model in models_data] return model_id if model_id in valid_ids else models_data[0]["id"] def chat_completion(self, url, headers, payload): try: response = requests.post(url, headers=headers, json=payload) response.encoding = 'utf-8' response.raise_for_status() return response.json() except requests.exceptions.RequestException as e: print(f"请求失败: {e}") return {"error": "请求失败,请检查网络或参数配置。"} def chat_completion_message(self, user_prompt, **kwargs): messages = [ {"role": "system", "content": kwargs.get("system_prompt", "You are a helpful assistant.")}, {"role": "user", "content": user_prompt} ] return self.chat_completion_messages(messages, **kwargs) def chat_completion_messages(self, messages, **kwargs): url = 'https://usa-chat.degpt.ai/api/v0/chat/completion/proxy' headers = { 'sec-ch-ua-platform': '"macOS"', 'Referer': 'https://www.degpt.ai/', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', 'sec-ch-ua': 'Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"', 'DNT': '1', 'Content-Type': 'application/json', 'sec-ch-ua-mobile': '?0' } payload = { "model": self.get_model_by_autoupdate(kwargs.get("model")), "messages": messages, "project": kwargs.get("project", "DecentralGPT"), "stream": kwargs.get("stream", False), "temperature": kwargs.get("temperature", 0.3), "max_tokens": kwargs.get("max_tokens", 1024), "top_p": kwargs.get("top_p", 0.5), "frequency_penalty": kwargs.get("frequency_penalty", 0), "presence_penalty": kwargs.get("presence_penalty", 0) } return self.chat_completion(url, headers, payload)