import os from huggingface_hub import HfApi # Info to change for your repository # ---------------------------------- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org #OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format! #OWNER = "BAAI/open_flageval_vlm_leaderboard" # Change to your org - don't forget to create a results and request dataset, with the correct format! # ---------------------------------- #REPO_ID = OWNER #REPO_ID = f"{OWNER}/leaderboard" #QUEUE_REPO = f"{OWNER}/requests" #RESULTS_REPO = f"{OWNER}/results" #DYNAMIC_INFO_REPO = f"{OWNER}/dynamic_model_information" REPO_ID = "BAAI/open_flageval_vlm_leaderboard" QUEUE_REPO = "open-cn-llm-leaderboard/vlm_requests" DYNAMIC_INFO_REPO = "open-cn-llm-leaderboard/vlm_dynamic_model_information" RESULTS_REPO = "open-cn-llm-leaderboard/vlm_results" IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True)) # If you setup a cache later, just change HF_HOME CACHE_PATH=os.getenv("HF_HOME", ".") # Local caches EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") DYNAMIC_INFO_PATH = os.path.join(CACHE_PATH, "dynamic-info") DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json") PATH_TO_COLLECTION = "open-cn-llm-leaderboard/flageval-vlm-leaderboard-best-models-677e51cdc44f8123e02cbda1" # Rate limit variables RATE_LIMIT_PERIOD = 7 RATE_LIMIT_QUOTA = 5 HAS_HIGHER_RATE_LIMIT = ["TheBloke"] API = HfApi(token=TOKEN)