Retry 1 FAILED models
Browse files
deepseek-ai/DeepSeek-R1-Distill-Llama-8B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,7 +8,7 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2025-01-29T02:13:52Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
@@ -28,7 +28,5 @@
|
|
28 |
"tweetsentbr": 0.0038314176245210726
|
29 |
},
|
30 |
"result_metrics_average": 0.18107456870871091,
|
31 |
-
"result_metrics_npm": -0.2224283079873639
|
32 |
-
"error_msg": "'Instance' object is not subscriptable",
|
33 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 577, in group\n for key, value in sorted(fn(ob).items())\n ^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 375, in <lambda>\n self._group_fn = lambda x: group_fn(x[1])\n ^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 369, in <lambda>\n group_fn: Callable = lambda x: x[1],\n ~^^^\nTypeError: 'Instance' object is not subscriptable\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n exception_msg = MODELS_DOWNLOADED_FAILED[f\"{request['model']}_{request['revision']}\"]\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 102, in run_request\n model_args += ',max_length=4098,enforce_eager=True'\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/vllm_causallms.py\", line 472, in generate_until\n re_ords = Collator(requests, _collate_gen, group_by=\"gen_kwargs\")\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 384, in __init__\n self._group_by_index()\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 388, in _group_by_index\n self._arr_with_indices = self.group(\n ^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 581, in group\n res[tuple(fn(ob))].append(ob)\n ^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 375, in <lambda>\n self._group_fn = lambda x: group_fn(x[1])\n ^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/utils.py\", line 369, in <lambda>\n group_fn: Callable = lambda x: x[1],\n ~^^^\nTypeError: 'Instance' object is not subscriptable\n"
|
34 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2025-01-29T02:13:52Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
|
|
28 |
"tweetsentbr": 0.0038314176245210726
|
29 |
},
|
30 |
"result_metrics_average": 0.18107456870871091,
|
31 |
+
"result_metrics_npm": -0.2224283079873639
|
|
|
|
|
32 |
}
|