|
import os |
|
|
|
from huggingface_hub import HfApi |
|
|
|
|
|
|
|
TOKEN = os.environ.get("HF_TOKEN") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
REPO_ID = "BAAI/open_flageval_vlm_leaderboard" |
|
QUEUE_REPO = "open-cn-llm-leaderboard/vlm_requests" |
|
DYNAMIC_INFO_REPO = "open-cn-llm-leaderboard/vlm_dynamic_model_information" |
|
RESULTS_REPO = "open-cn-llm-leaderboard/vlm_results" |
|
|
|
IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True)) |
|
|
|
CACHE_PATH=os.getenv("HF_HOME", ".") |
|
|
|
|
|
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") |
|
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") |
|
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") |
|
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") |
|
DYNAMIC_INFO_PATH = os.path.join(CACHE_PATH, "dynamic-info") |
|
DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json") |
|
|
|
PATH_TO_COLLECTION = "open-cn-llm-leaderboard/flageval-vlm-leaderboard-best-models-677e51cdc44f8123e02cbda1" |
|
|
|
|
|
RATE_LIMIT_PERIOD = 7 |
|
RATE_LIMIT_QUOTA = 5 |
|
HAS_HIGHER_RATE_LIMIT = ["TheBloke"] |
|
|
|
API = HfApi(token=TOKEN) |
|
|