eduagarcia commited on
Commit
c2a925b
·
verified ·
1 Parent(s): 4b418e3

Retry 27 FAILED models

Browse files
Files changed (27) hide show
  1. AIJUUD/QWEN2_70B_JUUD_V1_eval_request_False_float16_Original.json +2 -4
  2. BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B_eval_request_False_bfloat16_Original.json +2 -4
  3. CohereForAI/c4ai-command-r-plus_eval_request_False_float16_Original.json +2 -4
  4. EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +2 -4
  5. EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code_eval_request_False_float16_Original.json +2 -4
  6. J-AI/Phi_3-CREWAI-PTBR_eval_request_False_float16_Original.json +2 -4
  7. J-LAB/AIFody_eval_request_False_float16_Original.json +2 -4
  8. J-LAB/BRisa_Mistral_7b_PTBR_eval_request_False_float16_Original.json +2 -4
  9. J-LAB/BRisa_v0.1_eval_request_False_float16_Original.json +2 -4
  10. J-LAB/FluxiIA-Small_v3_eval_request_False_float16_Original.json +2 -4
  11. JJhooww/Qwen7b_reload_v5_eval_request_False_float16_Original.json +2 -4
  12. Qwen/Qwen2.5-14B-Instruct-1M_eval_request_False_bfloat16_Original.json +2 -4
  13. Replete-AI/Replete-LLM-V2.5-Qwen-32b_eval_request_False_bfloat16_Original.json +2 -4
  14. Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R_eval_request_False_float16_Original.json +2 -4
  15. SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct_eval_request_9e243d0_False_float16_Original.json +2 -4
  16. WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json +2 -4
  17. WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json +2 -4
  18. chargoddard/internlm2-20b-llama_eval_request_False_bfloat16_Original.json +2 -4
  19. chargoddard/internlm2-7b-llama_eval_request_False_bfloat16_Original.json +2 -4
  20. chargoddard/internlm2-base-20b-llama_eval_request_False_bfloat16_Original.json +2 -4
  21. chargoddard/internlm2-base-7b-llama_eval_request_False_bfloat16_Original.json +2 -4
  22. kevinpro/Hydra-LLaMA3-8B-0513-preview_eval_request_False_bfloat16_Original.json +2 -4
  23. kevinpro/Hydra-LLaMA3-8B-0531-preview_eval_request_False_bfloat16_Original.json +2 -4
  24. kevinpro/Hydra-LLaMA3-8B-v0.1_eval_request_False_bfloat16_Original.json +2 -4
  25. kevinpro/Hydra-LLaMA3-8B-v0.2_eval_request_False_bfloat16_Original.json +2 -4
  26. paloalma/ECE-TW3-JRGL-V2_eval_request_False_bfloat16_Original.json +2 -4
  27. vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json +2 -4
AIJUUD/QWEN2_70B_JUUD_V1_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-06-24T00:34:13Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 874,
16
- "job_start_time": "2024-07-06T01-17-13.595586",
17
- "error_msg": "AIJUUD/QWEN2_70B_JUUD_V1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/AIJUUD/QWEN2_70B_JUUD_V1/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 402, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-66889b1d-27101813763cd3ea6ed4effd;5c654091-6d35-44f6-967f-60248e634102)\n\nRepository Not Found for url: https://huggingface.co/AIJUUD/QWEN2_70B_JUUD_V1/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 201, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 965, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 425, in cached_file\n raise EnvironmentError(\nOSError: AIJUUD/QWEN2_70B_JUUD_V1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-06-24T00:34:13Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 874,
16
+ "job_start_time": "2024-07-06T01-17-13.595586"
 
 
17
  }
BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-07-18T22:23:09Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 922,
16
- "job_start_time": "2024-07-19T01-31-48.200936",
17
- "error_msg": "BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B does not appear to have a file named model-00004-of-00004.safetensors. Checkout 'https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B/tree/main' for available files.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B/resolve/main/model-00004-of-00004.safetensors\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 402, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1282, in _hf_hub_download_to_cache_dir\n (url_to_download, etag, commit_hash, expected_size, head_call_error) = _get_metadata_or_catch_error(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 315, in hf_raise_for_status\n raise EntryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6699c39d-4bf1c53f685f82de4cad1802;934e530d-550b-4916-a865-5fa828c8d37b)\n\nEntry Not Found for url: https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B/resolve/main/model-00004-of-00004.safetensors.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3593, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1079, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 456, in cached_file\n raise EnvironmentError(\nOSError: BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B does not appear to have a file named model-00004-of-00004.safetensors. Checkout 'https://huggingface.co/BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B/tree/main' for available files.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-07-18T22:23:09Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 922,
16
+ "job_start_time": "2024-07-19T01-31-48.200936"
 
 
17
  }
CohereForAI/c4ai-command-r-plus_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "CohereForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-07T18:08:25Z",
13
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 996,
16
- "job_start_time": "2024-08-10T16-28-15.102154",
17
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'CohereForAI/c4ai-command-r-plus'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'CohereForAI/c4ai-command-r-plus' is the correct path to a directory containing a config.json file",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 197, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'CohereForAI/c4ai-command-r-plus'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'CohereForAI/c4ai-command-r-plus' is the correct path to a directory containing a config.json file\n"
19
  }
 
8
  "architectures": "CohereForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-07T18:08:25Z",
13
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 996,
16
+ "job_start_time": "2024-08-10T16-28-15.102154"
 
 
17
  }
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
14
  "job_id": 465,
15
  "job_start_time": "2024-04-15T22-28-42.463373",
16
- "main_language": "Other",
17
- "error_msg": "We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like EleutherAI/polyglot-ko-12.8b is not the path to a directory containing a file named model-00010-of-00028.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 536, in _make_request\n response = conn.getresponse()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 461, in getresponse\n httplib_response = super().getresponse()\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 1390, in getresponse\n response.begin()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 325, in begin\n version, status, reason = self._read_status()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 286, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1314, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1166, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 844, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 470, in increment\n raise reraise(type(error), error, _stacktrace)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/util.py\", line 39, in reraise\n raise value\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 790, in urlopen\n response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 538, in _make_request\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 370, in _raise_timeout\n raise ReadTimeoutError(\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 392, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 68, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 532, in send\n raise ReadTimeout(e, request=request)\nrequests.exceptions.ReadTimeout: (ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: eb3144a2-8a03-478f-8f52-5d5159bcdbad)')\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1406, in hf_hub_download\n raise LocalEntryNotFoundError(\nhuggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/modeling_utils.py\", line 3436, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 441, in cached_file\n raise EnvironmentError(\nOSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like EleutherAI/polyglot-ko-12.8b is not the path to a directory containing a file named model-00010-of-00028.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.\n"
19
  }
 
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
14
  "job_id": 465,
15
  "job_start_time": "2024-04-15T22-28-42.463373",
16
+ "main_language": "Other"
 
 
17
  }
EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-10-08T04:12:02Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1229,
16
- "job_start_time": "2024-10-18T05-25-14.640504",
17
- "error_msg": "EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code is a gated repository. Make sure to request access at https://huggingface.co/EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code and pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code/resolve/main/model.safetensors.index.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 676, in has_file\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 321, in hf_raise_for_status\n raise GatedRepoError(message, response) from e\nhuggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: Root=1-6711f13c-25d8ad443489eb6431576873;fc9448ed-b20e-4ecb-a3eb-344e6df44d70)\n\nCannot access gated repo for url https://huggingface.co/EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code/resolve/main/model.safetensors.index.json.\nAccess to model EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code is restricted and you are not in the authorized list. Visit https://huggingface.co/EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code to ask for access.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3664, in from_pretrained\n if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 680, in has_file\n raise EnvironmentError(\nOSError: EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code is a gated repository. Make sure to request access at https://huggingface.co/EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K-code and pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-10-08T04:12:02Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1229,
16
+ "job_start_time": "2024-10-18T05-25-14.640504"
 
 
17
  }
J-AI/Phi_3-CREWAI-PTBR_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-15T20:00:58Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": -1,
16
- "job_start_time": null,
17
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'J-AI/Phi_3-CREWAI-PTBR'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'J-AI/Phi_3-CREWAI-PTBR' is the correct path to a directory containing a config.json file",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 204, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Can't load the configuration of 'J-AI/Phi_3-CREWAI-PTBR'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'J-AI/Phi_3-CREWAI-PTBR' is the correct path to a directory containing a config.json file\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-15T20:00:58Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": -1,
16
+ "job_start_time": null
 
 
17
  }
J-LAB/AIFody_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-15T17:41:34Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1072,
16
- "job_start_time": "2024-09-19T06-14-37.509410",
17
- "error_msg": "J-LAB/AIFody is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/J-LAB/AIFody/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 402, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-66ebc14e-604270f66dff396250dda03a;ca29aa94-60d0-4626-96ac-e3571ee49571)\n\nRepository Not Found for url: https://huggingface.co/J-LAB/AIFody/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 972, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 425, in cached_file\n raise EnvironmentError(\nOSError: J-LAB/AIFody is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-15T17:41:34Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1072,
16
+ "job_start_time": "2024-09-19T06-14-37.509410"
 
 
17
  }
J-LAB/BRisa_Mistral_7b_PTBR_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-06-24T05:02:32Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 850,
16
- "job_start_time": "2024-06-29T01-29-23.034640",
17
- "error_msg": "J-LAB/BRisa_Mistral_7b_PTBR is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/J-LAB/BRisa_Mistral_7b_PTBR/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667f6375-6662808113b16cb963611012;4a570199-b55d-4817-a920-41560790854e)\n\nRepository Not Found for url: https://huggingface.co/J-LAB/BRisa_Mistral_7b_PTBR/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 201, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 934, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 422, in cached_file\n raise EnvironmentError(\nOSError: J-LAB/BRisa_Mistral_7b_PTBR is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Portuguese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-06-24T05:02:32Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 850,
16
+ "job_start_time": "2024-06-29T01-29-23.034640"
 
 
17
  }
J-LAB/BRisa_v0.1_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-10-12T01:43:15Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1203,
16
- "job_start_time": "2024-10-13T08-52-34.166103",
17
- "error_msg": "J-LAB/BRisa_v0.1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/J-LAB/BRisa_v0.1/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 403, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-670b8a52-2d5fc4381884173c428d37dc;9255b09f-cf6c-4252-8821-1c6db7d54424)\n\nRepository Not Found for url: https://huggingface.co/J-LAB/BRisa_v0.1/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 1008, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 567, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 626, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 426, in cached_file\n raise EnvironmentError(\nOSError: J-LAB/BRisa_v0.1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-10-12T01:43:15Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1203,
16
+ "job_start_time": "2024-10-13T08-52-34.166103"
 
 
17
  }
J-LAB/FluxiIA-Small_v3_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-11-14T19:58:37Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1263,
16
- "job_start_time": "2024-11-16T01-35-27.458047",
17
- "error_msg": "J-LAB/FluxiIA-Small_v3 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/J-LAB/FluxiIA-Small_v3/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 403, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6737f6e1-69531a355808ec73798d1f4c;9016ff46-4fb2-463a-b9c3-ff3d639b0a81)\n\nRepository Not Found for url: https://huggingface.co/J-LAB/FluxiIA-Small_v3/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 1008, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 567, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 626, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 426, in cached_file\n raise EnvironmentError(\nOSError: J-LAB/FluxiIA-Small_v3 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-11-14T19:58:37Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1263,
16
+ "job_start_time": "2024-11-16T01-35-27.458047"
 
 
17
  }
JJhooww/Qwen7b_reload_v5_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-06-23T17:01:31Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 852,
16
- "job_start_time": "2024-06-29T02-17-21.504575",
17
- "error_msg": "JJhooww/Qwen7b_reload_v5 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/JJhooww/Qwen7b_reload_v5/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667f6eb2-3d9e64b53686366870f7d2d9;40324c03-afd6-477f-8fac-e7542a878c3b)\n\nRepository Not Found for url: https://huggingface.co/JJhooww/Qwen7b_reload_v5/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 201, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 934, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 422, in cached_file\n raise EnvironmentError(\nOSError: JJhooww/Qwen7b_reload_v5 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-06-23T17:01:31Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 852,
16
+ "job_start_time": "2024-06-29T02-17-21.504575"
 
 
17
  }
Qwen/Qwen2.5-14B-Instruct-1M_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2025-01-31T18:51:32Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1397,
16
- "job_start_time": "2025-02-01T00-49-35.041873",
17
- "error_msg": "We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like Qwen/Qwen2.5-14B-Instruct-1M is not the path to a directory containing a file named model-00002-of-00008.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 467, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 3, in raise_from\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 462, in _make_request\n httplib_response = conn.getresponse()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 1386, in getresponse\n response.begin()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 325, in begin\n version, status, reason = self._read_status()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 286, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 550, in increment\n raise six.reraise(type(error), error, _stacktrace)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/packages/six.py\", line 770, in reraise\n raise value\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 469, in _make_request\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 358, in _raise_timeout\n raise ReadTimeoutError(\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1376, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1296, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 277, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 300, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 93, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 532, in send\n raise ReadTimeout(e, request=request)\nrequests.exceptions.ReadTimeout: (ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: a8cf49f2-4b57-4de7-9f69-c0e94efec767)')\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 363, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 862, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 969, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1487, in _raise_on_head_call_error\n raise LocalEntryNotFoundError(\nhuggingface_hub.errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 102, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 305, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 621, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3979, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1058, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 406, in cached_file\n raise EnvironmentError(\nOSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like Qwen/Qwen2.5-14B-Instruct-1M is not the path to a directory containing a file named model-00002-of-00008.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2025-01-31T18:51:32Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1397,
16
+ "job_start_time": "2025-02-01T00-49-35.041873"
 
 
17
  }
Replete-AI/Replete-LLM-V2.5-Qwen-32b_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-10-01T16:52:55Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1296,
16
- "job_start_time": "2024-12-03T07-49-02.419706",
17
- "error_msg": "Replete-AI/Replete-LLM-V2.5-Qwen-32b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 406, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/Replete-AI/Replete-LLM-V2.5-Qwen-32b/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 403, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 862, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 969, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1484, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1376, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1296, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 277, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 301, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 454, in hf_raise_for_status\n raise _format(RepositoryNotFoundError, message, response) from e\nhuggingface_hub.errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-674eb7ef-5826a3e947727ce84dc87753;bf341e40-b880-43d0-9f89-ba522ca268c1)\n\nRepository Not Found for url: https://huggingface.co/Replete-AI/Replete-LLM-V2.5-Qwen-32b/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 1019, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 590, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 649, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 426, in cached_file\n raise EnvironmentError(\nOSError: Replete-AI/Replete-LLM-V2.5-Qwen-32b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-10-01T16:52:55Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 1296,
16
+ "job_start_time": "2024-12-03T07-49-02.419706"
 
 
17
  }
Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-15T22:03:08Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 648,
16
- "job_start_time": "2024-05-19T05-09-53.457664",
17
- "error_msg": "Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1403, in hf_hub_download\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664989a2-485468881c3a36442a9771b6;b18a3488-9008-49bc-aa5a-4ca27e4d3d66)\n\nRepository Not Found for url: https://huggingface.co/Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 190, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 69, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 284, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 537, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/configuration_auto.py\", line 931, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 631, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 686, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 421, in cached_file\n raise EnvironmentError(\nOSError: Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-15T22:03:08Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 648,
16
+ "job_start_time": "2024-05-19T05-09-53.457664"
 
 
17
  }
SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct_eval_request_9e243d0_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-09-21T21:12:29Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1136,
16
- "job_start_time": "2024-10-02T08-58-33.768020",
17
- "error_msg": "SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct/resolve/9e243d0853bbb975e365e73c0ba4b7593f5c0d17/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 403, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-66fd0b3a-08ceb5812ce4f9326382e7d6;cfa5c9c0-adcc-4c34-bb13-4608e1e69ae6)\n\nRepository Not Found for url: https://huggingface.co/SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct/resolve/9e243d0853bbb975e365e73c0ba4b7593f5c0d17/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 1008, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 567, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 626, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 426, in cached_file\n raise EnvironmentError(\nOSError: SicariusSicariiStuff/Qwen2.5-14B_Uncensored_Instruct is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-09-21T21:12:29Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 1136,
16
+ "job_start_time": "2024-10-02T08-58-33.768020"
 
 
17
  }
WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 676,
15
  "job_start_time": "2024-05-20T22-32-40.265135",
16
- "main_language": "English",
17
- "error_msg": "WizardLM/WizardLM-13B-V1.2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/WizardLM/WizardLM-13B-V1.2/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664bcf89-146ae2e3606106fe4c833ba6;782d6f41-569a-4239-9c50-016f5f68a527)\n\nRepository Not Found for url: https://huggingface.co/WizardLM/WizardLM-13B-V1.2/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 70, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 934, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 422, in cached_file\n raise EnvironmentError(\nOSError: WizardLM/WizardLM-13B-V1.2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 676,
15
  "job_start_time": "2024-05-20T22-32-40.265135",
16
+ "main_language": "English"
 
 
17
  }
WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:38:21Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 675,
15
  "job_start_time": "2024-05-20T22-30-54.562709",
16
- "main_language": "English",
17
- "error_msg": "WizardLM/WizardLM-7B-V1.0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/WizardLM/WizardLM-7B-V1.0/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1823, in _raise_on_head_call_error\n raise head_call_error\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 396, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664bcf20-7942bbf953b588f0775c3b0a;e3d2c174-c83f-47c7-89db-3b7fd0d69530)\n\nRepository Not Found for url: https://huggingface.co/WizardLM/WizardLM-7B-V1.0/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 70, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 291, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 545, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py\", line 934, in from_pretrained\n config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 632, in get_config_dict\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/configuration_utils.py\", line 689, in _get_config_dict\n resolved_config_file = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 422, in cached_file\n raise EnvironmentError(\nOSError: WizardLM/WizardLM-7B-V1.0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:38:21Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 675,
15
  "job_start_time": "2024-05-20T22-30-54.562709",
16
+ "main_language": "English"
 
 
17
  }
chargoddard/internlm2-20b-llama_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-21T18:04:05Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 537,
16
- "job_start_time": "2024-04-23T23-42-53.701212",
17
- "error_msg": "internlm/internlm2-20b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-20b/main' for available files.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/internlm/internlm2-20b/resolve/main/tokenization_internlm.py\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1238, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1631, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 385, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 409, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 296, in hf_raise_for_status\n raise EntryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-66284b39-7c5607cb2296bfd140085e05;650dab88-e6fb-4af5-a864-50342aeec32a)\n\nEntry Not Found for url: https://huggingface.co/internlm/internlm2-20b/resolve/main/tokenization_internlm.py.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 215, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 69, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 329, in __init__\n self._create_tokenizer(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 685, in _create_tokenizer\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/tokenization_auto.py\", line 843, in from_pretrained\n tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 489, in get_class_from_dynamic_module\n final_module = get_cached_module_file(\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 294, in get_cached_module_file\n resolved_module_file = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 452, in cached_file\n raise EnvironmentError(\nOSError: internlm/internlm2-20b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-20b/main' for available files.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-21T18:04:05Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 537,
16
+ "job_start_time": "2024-04-23T23-42-53.701212"
 
 
17
  }
chargoddard/internlm2-7b-llama_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-21T18:04:38Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 538,
16
- "job_start_time": "2024-04-23T00-02-56.937107",
17
- "error_msg": "internlm/internlm2-7b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-7b/main' for available files.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/internlm/internlm2-7b/resolve/main/tokenization_internlm.py\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 315, in hf_raise_for_status\n raise EntryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6626fc28-7f6f95253bb43df73fa17de0;0012ff82-9f5c-41b6-b8aa-efc822ea5565)\n\nEntry Not Found for url: https://huggingface.co/internlm/internlm2-7b/resolve/main/tokenization_internlm.py.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 214, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 329, in __init__\n self._create_tokenizer(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 685, in _create_tokenizer\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/tokenization_auto.py\", line 835, in from_pretrained\n tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 489, in get_class_from_dynamic_module\n final_module = get_cached_module_file(\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 294, in get_cached_module_file\n resolved_module_file = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 452, in cached_file\n raise EnvironmentError(\nOSError: internlm/internlm2-7b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-7b/main' for available files.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-21T18:04:38Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 538,
16
+ "job_start_time": "2024-04-23T00-02-56.937107"
 
 
17
  }
chargoddard/internlm2-base-20b-llama_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-21T18:04:19Z",
13
  "model_type": "🟢 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 538,
16
- "job_start_time": "2024-04-23T23-59-00.639742",
17
- "error_msg": "internlm/internlm2-base-20b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-base-20b/main' for available files.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/internlm/internlm2-base-20b/resolve/main/tokenization_internlm.py\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1238, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1631, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 385, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 409, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 296, in hf_raise_for_status\n raise EntryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-66284f78-214be2cc6c1f8f8c2da282cd;8071863f-b86b-4ef8-95c1-280d718dc758)\n\nEntry Not Found for url: https://huggingface.co/internlm/internlm2-base-20b/resolve/main/tokenization_internlm.py.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 215, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 69, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 329, in __init__\n self._create_tokenizer(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 685, in _create_tokenizer\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/tokenization_auto.py\", line 843, in from_pretrained\n tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 489, in get_class_from_dynamic_module\n final_module = get_cached_module_file(\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 294, in get_cached_module_file\n resolved_module_file = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 452, in cached_file\n raise EnvironmentError(\nOSError: internlm/internlm2-base-20b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-base-20b/main' for available files.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-21T18:04:19Z",
13
  "model_type": "🟢 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 538,
16
+ "job_start_time": "2024-04-23T23-59-00.639742"
 
 
17
  }
chargoddard/internlm2-base-7b-llama_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-21T18:04:47Z",
13
  "model_type": "🟢 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 533,
16
- "job_start_time": "2024-04-22T11-06-43.377245",
17
- "error_msg": "internlm/internlm2-base-7b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-base-7b/main' for available files.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/internlm/internlm2-base-7b/resolve/main/tokenization_internlm.py\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n metadata = get_hf_file_metadata(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n hf_raise_for_status(response)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 315, in hf_raise_for_status\n raise EntryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6626464b-681e16330179656d0e1f0087;730b1acc-ab7f-4480-87ef-6b0e40d16d8f)\n\nEntry Not Found for url: https://huggingface.co/internlm/internlm2-base-7b/resolve/main/tokenization_internlm.py.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 214, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 329, in __init__\n self._create_tokenizer(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 685, in _create_tokenizer\n self.tokenizer = transformers.AutoTokenizer.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/tokenization_auto.py\", line 835, in from_pretrained\n tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 489, in get_class_from_dynamic_module\n final_module = get_cached_module_file(\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/dynamic_module_utils.py\", line 294, in get_cached_module_file\n resolved_module_file = cached_file(\n ^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 452, in cached_file\n raise EnvironmentError(\nOSError: internlm/internlm2-base-7b does not appear to have a file named tokenization_internlm.py. Checkout 'https://huggingface.co/internlm/internlm2-base-7b/main' for available files.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "Chinese",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-21T18:04:47Z",
13
  "model_type": "🟢 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 533,
16
+ "job_start_time": "2024-04-22T11-06-43.377245"
 
 
17
  }
kevinpro/Hydra-LLaMA3-8B-0513-preview_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-31T11:47:35Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 789,
16
- "job_start_time": "2024-06-12T18-33-51.765315",
17
- "error_msg": "kevinpro/Hydra-LLaMA3-8B-0513-preview is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-0513-preview/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1403, in hf_hub_download\n Method should not be called directly. Please use `hf_hub_download` instead.\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n repo_type: str,\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6669ea10-0fcdb20e297e3c1468807abd;f2540637-64ea-4862-a494-7f7d718a1807)\n\nRepository Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-0513-preview/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 284, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 537, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/configuration_auto.py\", line 931, in from_pretrained\n >>> config.output_attentions\n \n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 631, in get_config_dict\n # Get config dict associated with the base config file\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 686, in _get_config_dict\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 421, in cached_file\n \"You are trying to access a gated repo.\\nMake sure to have access to it at \"\nOSError: kevinpro/Hydra-LLaMA3-8B-0513-preview is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-31T11:47:35Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 789,
16
+ "job_start_time": "2024-06-12T18-33-51.765315"
 
 
17
  }
kevinpro/Hydra-LLaMA3-8B-0531-preview_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-31T16:20:48Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 790,
16
- "job_start_time": "2024-06-12T18-34-11.207900",
17
- "error_msg": "kevinpro/Hydra-LLaMA3-8B-0531-preview is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-0531-preview/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1403, in hf_hub_download\n Method should not be called directly. Please use `hf_hub_download` instead.\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n repo_type: str,\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6669ea23-6a8223c643e4c46151069b77;5bf39800-6e21-45a2-a24a-27588f469913)\n\nRepository Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-0531-preview/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 284, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 537, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/configuration_auto.py\", line 931, in from_pretrained\n >>> config.output_attentions\n \n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 631, in get_config_dict\n # Get config dict associated with the base config file\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 686, in _get_config_dict\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 421, in cached_file\n \"You are trying to access a gated repo.\\nMake sure to have access to it at \"\nOSError: kevinpro/Hydra-LLaMA3-8B-0531-preview is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-31T16:20:48Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 790,
16
+ "job_start_time": "2024-06-12T18-34-11.207900"
 
 
17
  }
kevinpro/Hydra-LLaMA3-8B-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaModel",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-30T18:39:34Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 786,
16
- "job_start_time": "2024-06-12T15-31-40.784147",
17
- "error_msg": "kevinpro/Hydra-LLaMA3-8B-v0.1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-v0.1/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1403, in hf_hub_download\n Method should not be called directly. Please use `hf_hub_download` instead.\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n repo_type: str,\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6669bf5d-234f87383482aefa3cafec33;550c1d56-2648-4c2a-b1b0-48a93cea00fb)\n\nRepository Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-v0.1/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 284, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 537, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/configuration_auto.py\", line 931, in from_pretrained\n >>> config.output_attentions\n \n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 631, in get_config_dict\n # Get config dict associated with the base config file\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 686, in _get_config_dict\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 421, in cached_file\n \"You are trying to access a gated repo.\\nMake sure to have access to it at \"\nOSError: kevinpro/Hydra-LLaMA3-8B-v0.1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "LlamaModel",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-30T18:39:34Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 786,
16
+ "job_start_time": "2024-06-12T15-31-40.784147"
 
 
17
  }
kevinpro/Hydra-LLaMA3-8B-v0.2_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaModel",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-05-31T02:21:01Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 787,
16
- "job_start_time": "2024-06-12T15-32-02.665662",
17
- "error_msg": "kevinpro/Hydra-LLaMA3-8B-v0.2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 304, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-v0.2/resolve/main/config.json\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 398, in cached_file\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1403, in hf_hub_download\n Method should not be called directly. Please use `hf_hub_download` instead.\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1261, in hf_hub_download\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 119, in _inner_fn\n def validate_repo_id(repo_id: str) -> None:\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1674, in get_hf_file_metadata\n repo_type: str,\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 369, in _request_wrapper\n \"\"\"\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 393, in _request_wrapper\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 352, in hf_raise_for_status\n raise RepositoryNotFoundError(message, response) from e\nhuggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-6669bf73-552313dc77810fda14c6cc5e;4838a74a-8954-4164-bb0a-887719c828c4)\n\nRepository Not Found for url: https://huggingface.co/kevinpro/Hydra-LLaMA3-8B-v0.2/resolve/main/config.json.\nPlease make sure you specified the correct `repo_id` and `repo_type`.\nIf you are trying to access a private or gated repo, make sure you are authenticated.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 199, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 284, in __init__\n self._get_config(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 537, in _get_config\n self._config = transformers.AutoConfig.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/auto/configuration_auto.py\", line 931, in from_pretrained\n >>> config.output_attentions\n \n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 631, in get_config_dict\n # Get config dict associated with the base config file\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/configuration_utils.py\", line 686, in _get_config_dict\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/utils/hub.py\", line 421, in cached_file\n \"You are trying to access a gated repo.\\nMake sure to have access to it at \"\nOSError: kevinpro/Hydra-LLaMA3-8B-v0.2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=<your_token>`\n"
19
  }
 
8
  "architectures": "LlamaModel",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-05-31T02:21:01Z",
13
  "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
  "source": "leaderboard",
15
  "job_id": 787,
16
+ "job_start_time": "2024-06-12T15-32-02.665662"
 
 
17
  }
paloalma/ECE-TW3-JRGL-V2_eval_request_False_bfloat16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-26T16:50:02Z",
13
  "model_type": "🤝 : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 713,
16
- "job_start_time": "2024-05-24T02-00-48.001351",
17
- "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/41/f7/41f730c219a7b7cc08f7d453a9350771b343535fd6c2725f6aab27495cd2344d/60c9e956baba1f585a46925b08a1aedf315d80b79312dbc5c9f52a03976ab1bb?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00009-of-00082.safetensors%3B+filename%3D%22model-00009-of-00082.safetensors%22%3B&Expires=1716776042&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxNjc3NjA0Mn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQxL2Y3LzQxZjczMGMyMTlhN2I3Y2MwOGY3ZDQ1M2E5MzUwNzcxYjM0MzUzNWZkNmMyNzI1ZjZhYWIyNzQ5NWNkMjM0NGQvNjBjOWU5NTZiYWJhMWY1ODVhNDY5MjViMDhhMWFlZGYzMTVkODBiNzkzMTJkYmM1YzlmNTJhMDM5NzZhYjFiYj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=SdnAyWPvKbtBW8qpZ9EXGD49Y948FS6o8TJQQ1ldbcf2EZYm8ueYEBLb7gPbGSzn10ZB3IQzcc7AX~dd5Xz1P~1QbJRLE6rwUI2jEBccUNEwG7zPjMqZasJc4lI6hzasW3K1Qtz0AJOK0nGH5qEQtbNB0pPXjU9JTjO3NxKZPrK3UBrQNfQhrgBEWxJPc-0XLE3JKRDoTAU2MAvgnMfl8iy58naY7rGd-DA-8XabBgqDPh9jr2EZSi2qcG73PNdAkngsXjq5OVFl0u3Z80Ie7BGYMYs4JFf8WSpJ5qlZLxv8Dp4cfc9bFm-0BEWPQvx-eE8TrSqht92OUoMwHJAPLA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f90c619c090>: Failed to establish a new connection: [Errno -2] Name or service not known'))\"), '(Request ID: d51631b8-2b32-4572-86c6-6d24ef7c0663)')",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 72, in create_connection\n for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 962, in getaddrinfo\n for res in _socket.getaddrinfo(host, port, family, type, proto, flags):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nsocket.gaierror: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x7f90c619c090>: Failed to establish a new connection: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/41/f7/41f730c219a7b7cc08f7d453a9350771b343535fd6c2725f6aab27495cd2344d/60c9e956baba1f585a46925b08a1aedf315d80b79312dbc5c9f52a03976ab1bb?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00009-of-00082.safetensors%3B+filename%3D%22model-00009-of-00082.safetensors%22%3B&Expires=1716776042&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxNjc3NjA0Mn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQxL2Y3LzQxZjczMGMyMTlhN2I3Y2MwOGY3ZDQ1M2E5MzUwNzcxYjM0MzUzNWZkNmMyNzI1ZjZhYWIyNzQ5NWNkMjM0NGQvNjBjOWU5NTZiYWJhMWY1ODVhNDY5MjViMDhhMWFlZGYzMTVkODBiNzkzMTJkYmM1YzlmNTJhMDM5NzZhYjFiYj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=SdnAyWPvKbtBW8qpZ9EXGD49Y948FS6o8TJQQ1ldbcf2EZYm8ueYEBLb7gPbGSzn10ZB3IQzcc7AX~dd5Xz1P~1QbJRLE6rwUI2jEBccUNEwG7zPjMqZasJc4lI6hzasW3K1Qtz0AJOK0nGH5qEQtbNB0pPXjU9JTjO3NxKZPrK3UBrQNfQhrgBEWxJPc-0XLE3JKRDoTAU2MAvgnMfl8iy58naY7rGd-DA-8XabBgqDPh9jr2EZSi2qcG73PNdAkngsXjq5OVFl0u3Z80Ie7BGYMYs4JFf8WSpJ5qlZLxv8Dp4cfc9bFm-0BEWPQvx-eE8TrSqht92OUoMwHJAPLA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f90c619c090>: Failed to establish a new connection: [Errno -2] Name or service not known'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 70, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3511, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1040, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1367, in _hf_hub_download_to_cache_dir\n _download_to_tmp_and_move(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1884, in _download_to_tmp_and_move\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 459, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 395, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 66, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/41/f7/41f730c219a7b7cc08f7d453a9350771b343535fd6c2725f6aab27495cd2344d/60c9e956baba1f585a46925b08a1aedf315d80b79312dbc5c9f52a03976ab1bb?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00009-of-00082.safetensors%3B+filename%3D%22model-00009-of-00082.safetensors%22%3B&Expires=1716776042&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxNjc3NjA0Mn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQxL2Y3LzQxZjczMGMyMTlhN2I3Y2MwOGY3ZDQ1M2E5MzUwNzcxYjM0MzUzNWZkNmMyNzI1ZjZhYWIyNzQ5NWNkMjM0NGQvNjBjOWU5NTZiYWJhMWY1ODVhNDY5MjViMDhhMWFlZGYzMTVkODBiNzkzMTJkYmM1YzlmNTJhMDM5NzZhYjFiYj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=SdnAyWPvKbtBW8qpZ9EXGD49Y948FS6o8TJQQ1ldbcf2EZYm8ueYEBLb7gPbGSzn10ZB3IQzcc7AX~dd5Xz1P~1QbJRLE6rwUI2jEBccUNEwG7zPjMqZasJc4lI6hzasW3K1Qtz0AJOK0nGH5qEQtbNB0pPXjU9JTjO3NxKZPrK3UBrQNfQhrgBEWxJPc-0XLE3JKRDoTAU2MAvgnMfl8iy58naY7rGd-DA-8XabBgqDPh9jr2EZSi2qcG73PNdAkngsXjq5OVFl0u3Z80Ie7BGYMYs4JFf8WSpJ5qlZLxv8Dp4cfc9bFm-0BEWPQvx-eE8TrSqht92OUoMwHJAPLA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f90c619c090>: Failed to establish a new connection: [Errno -2] Name or service not known'))\"), '(Request ID: d51631b8-2b32-4572-86c6-6d24ef7c0663)')\n"
19
  }
 
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-26T16:50:02Z",
13
  "model_type": "🤝 : base merges and moerges",
14
  "source": "leaderboard",
15
  "job_id": 713,
16
+ "job_start_time": "2024-05-24T02-00-48.001351"
 
 
17
  }
vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-06-12T19:45:05Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 825,
16
- "job_start_time": "2024-06-16T07-13-41.656977",
17
- "error_msg": "We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like vicgalle/ConfigurableSOLAR-10.7B is not the path to a directory containing a file named model-00001-of-00005.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.",
18
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 467, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 3, in raise_from\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 462, in _make_request\n httplib_response = conn.getresponse()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 1386, in getresponse\n response.begin()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 325, in begin\n version, status, reason = self._read_status()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 286, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 550, in increment\n raise six.reraise(type(error), error, _stacktrace)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/packages/six.py\", line 770, in reraise\n raise value\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 469, in _make_request\n self._raise_timeout(err=e, url=url, timeout_value=read_timeout)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 358, in _raise_timeout\n raise ReadTimeoutError(\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1722, in _get_metadata_or_catch_error\n metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1645, in get_hf_file_metadata\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 372, in _request_wrapper\n response = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 395, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 66, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 532, in send\n raise ReadTimeout(e, request=request)\nrequests.exceptions.ReadTimeout: (ReadTimeoutError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)\"), '(Request ID: e9009ca0-4ed2-4164-8ef8-564f925bf28b)')\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 399, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1221, in hf_hub_download\n return _hf_hub_download_to_cache_dir(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1325, in _hf_hub_download_to_cache_dir\n _raise_on_head_call_error(head_call_error, force_download, local_files_only)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1826, in _raise_on_head_call_error\n raise LocalEntryNotFoundError(\nhuggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on.\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 201, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3511, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1040, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 442, in cached_file\n raise EnvironmentError(\nOSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like vicgalle/ConfigurableSOLAR-10.7B is not the path to a directory containing a file named model-00001-of-00005.safetensors.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.\n"
19
  }
 
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-06-12T19:45:05Z",
13
  "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
  "source": "leaderboard",
15
  "job_id": 825,
16
+ "job_start_time": "2024-06-16T07-13-41.656977"
 
 
17
  }