run_id
large_stringlengths
64
64
timestamp
unknown
model_name_or_path
large_stringclasses
5 values
unitxt_recipe
large_stringlengths
326
371
quantization_type
large_stringclasses
1 value
quantization_bit_count
large_stringclasses
1 value
inference_runtime_s
float64
1.05
37.4
generation_args
large_stringclasses
1 value
model_args
large_stringclasses
5 values
inference_engine
large_stringclasses
1 value
packages_versions
large_stringclasses
1 value
scores
large_stringlengths
174
240
num_gpu
int64
1
1
device
large_stringclasses
1 value
f109e9914d10839fa973e3a9021844ac8426916169a15428d70536b404ad8526
"2024-12-22T17:42:36.739000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.32597
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.39, 'score': 0.39, 'score_name': 'accuracy', 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.49, 'score_ci_low': 0.3, 'score_ci_high': 0.49}
1
a100_80gb
9620254e798e09c5d949e166b7114a0efb5708660c69760aaa9f79df4f7173da
"2024-12-22T17:42:38.911000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.352821
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.41, 'score': 0.41, 'score_name': 'accuracy', 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.52, 'score_ci_low': 0.32, 'score_ci_high': 0.52}
1
a100_80gb
ff655f7e1b00642ad71b60bb810022bd39144735b7befaf301e326b8259d0644
"2024-12-22T17:42:42.390000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.680885
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.24, 'score_ci_high': 0.43}
1
a100_80gb
162b03053e92f8bc29a0f3e583feeff22099638dd9db5fa3f21f03c9515d8ef9
"2024-12-22T17:42:45.951000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.907731
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
3a6a26d08500c49d0e90e3911274234cb352b3446a60e042edf0884660e54328
"2024-12-22T17:42:48.629000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.875733
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.32, 'score': 0.32, 'score_name': 'accuracy', 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.24, 'score_ci_high': 0.41}
1
a100_80gb
42de95008e7ee392939d6929ccc879951808e8aa543bd9bd41064edc9a3c796f
"2024-12-22T17:42:50.772000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.343195
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.25, 'score_ci_high': 0.44}
1
a100_80gb
509bb4dfa7f9ccf58889d79c97fd632c56697cf21e278d6953ebe3577dd3d1a5
"2024-12-22T17:42:52.901000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.329925
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_ci_low': 0.19, 'score_ci_high': 0.36}
1
a100_80gb
8d8ab3537abbe5621ea192dad7f9b1c150e915b5a1cb9cd9eb3e557ff7d23517
"2024-12-22T17:42:55.041000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.3346
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.26, 'score_ci_high': 0.44}
1
a100_80gb
68ae7bbacf1fcab0e348804eb4abd36fe259681239411cd5206abfc008c3bcee
"2024-12-22T17:42:58.094000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.248915
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
ec9333744335d7dea75f7daf29c4101a4f867c98f429057348432b0b55c6a841
"2024-12-22T17:43:00.233000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.337241
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.21, 'score_ci_high': 0.39}
1
a100_80gb
60da602be4b70fa27a802b920e398466763376649d25bfb2f69108051a45fd6a
"2024-12-22T17:42:11.469000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.362703
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.41, 'score': 0.41, 'score_name': 'accuracy', 'accuracy_ci_low': 0.32860663885346236, 'accuracy_ci_high': 0.51, 'score_ci_low': 0.32860663885346236, 'score_ci_high': 0.51}
1
a100_80gb
1b6ecb3f406ba502b1472940111e0e0642b2e349c9f2cae66bc7899290480f5f
"2024-12-22T17:42:13.724000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.371316
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44205790382953636, 'score_ci_low': 0.26, 'score_ci_high': 0.44205790382953636}
1
a100_80gb
6798e2ab132ba7a6b21ce5062ad1a6acb8dd6d8bc38d771b22724259496edb63
"2024-12-22T17:42:15.961000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.365618
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.42, 'score': 0.42, 'score_name': 'accuracy', 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_ci_low': 0.33, 'score_ci_high': 0.52}
1
a100_80gb
d0d0527094e31183a0c550f5f90eaadae587d2c9f871f27fef2d83fe09220bff
"2024-12-22T17:42:19.657000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.877026
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.37, 'score': 0.37, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.47, 'score_ci_low': 0.27, 'score_ci_high': 0.47}
1
a100_80gb
5794cd4480bce65740e99a32a7b3ba8a1482e44a2264865df442a17721d557a7
"2024-12-22T17:42:21.857000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.354878
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.27, 'score_ci_high': 0.46}
1
a100_80gb
275c66865773c5c19e9de503dedcf29de2d22fee4fe7b0e780e6ecf2d29dcb8c
"2024-12-22T17:42:24.516000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.846442
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.27, 'score_ci_high': 0.46}
1
a100_80gb
32f164353e772b5a35e7db46a037dacd1e32e3995ffa6c74dca68aa60a4846b4
"2024-12-22T17:42:28.149000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.366226
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.25, 'score_ci_high': 0.43}
1
a100_80gb
2885bd5966bde647fcf3d4abe787388ee49fa892a8a1b0aaf9fa70073aead14a
"2024-12-22T17:42:30.371000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.405214
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.26, 'score_ci_high': 0.43}
1
a100_80gb
527c600e1114bca5b824346f22ff75b7f8df205f85885ada8ba2223e4cef52d7
"2024-12-22T17:42:32.504000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.331116
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.32, 'score': 0.32, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23859898805902766, 'accuracy_ci_high': 0.42, 'score_ci_low': 0.23859898805902766, 'score_ci_high': 0.42}
1
a100_80gb
7d368dee404eaca564551f50d5de6bb468d8629d93553b4b57ffb3a18435720f
"2024-12-22T17:42:34.622000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.319797
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.2, 'score_ci_high': 0.38}
1
a100_80gb
a21cd9bc6d1eb98812a098034d3612059450113be2e4b8ba6adf5765ff51c9e8
"2024-12-22T17:41:45.851000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.34396
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_ci_low': 0.19, 'score_ci_high': 0.36}
1
a100_80gb
1aebaab9262f523af550627527029aa4d3a75a0f5bd670061d9ba7090c297022
"2024-12-22T17:41:47.973000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.331236
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_ci_low': 0.19, 'score_ci_high': 0.36}
1
a100_80gb
a8daf97540b030ddf06263bcb2a9c79dfdc09615d75b7213d46bf7e342445d5e
"2024-12-22T17:41:50.063000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.302095
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.32, 'score': 0.32, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.42, 'score_ci_low': 0.23, 'score_ci_high': 0.42}
1
a100_80gb
2db41091d4406459e16c7e8a36309c1e640c059cf87416cbb414dc8f6bab1e59
"2024-12-22T17:41:52.261000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.402451
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.26, 'score_ci_high': 0.46}
1
a100_80gb
c5e4965b68420052a250446689977d398b6a043719712861a031e8975f787a6d
"2024-12-22T17:41:55.899000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.832699
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.38, 'score': 0.38, 'score_name': 'accuracy', 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.47, 'score_ci_low': 0.29, 'score_ci_high': 0.47}
1
a100_80gb
dbf8ab627f1941b5041e448d0ca87201153a5644210821db92f0569989f411d1
"2024-12-22T17:41:58.065000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.339437
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.25, 'score_ci_high': 0.43}
1
a100_80gb
49a6658e00dea3a52364d67c17b1a61ea048c761aaf43f92541690d02005985f
"2024-12-22T17:42:01.391000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.524441
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.51, 'score': 0.51, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.61, 'score_ci_low': 0.4, 'score_ci_high': 0.61}
1
a100_80gb
f2a49b4e7a26853bdc1066c7b2eae075146c40ec1420116cc54b57dd7e1a2ef3
"2024-12-22T17:42:03.579000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.349083
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.4, 'score': 0.4, 'score_name': 'accuracy', 'accuracy_ci_low': 0.3046331840789312, 'accuracy_ci_high': 0.51, 'score_ci_low': 0.3046331840789312, 'score_ci_high': 0.51}
1
a100_80gb
7e398b92ebc560a393adb274623ab706cf32d814530159e4faa7fa2295408312
"2024-12-22T17:42:07.087000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.694435
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.43, 'score': 0.43, 'score_name': 'accuracy', 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.53, 'score_ci_low': 0.34, 'score_ci_high': 0.53}
1
a100_80gb
211f4a51f09aaae05273295bc26ab47aba5f90bd0bd364a70b4aca9ff4963c13
"2024-12-22T17:42:09.291000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_greek_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.388419
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.4544191276899824, 'score_ci_low': 0.27, 'score_ci_high': 0.4544191276899824}
1
a100_80gb
6e8f3e965e206ac54ab7bb8336ed76e2989782da96dfa95aafd44586a5ae3a41
"2024-12-22T17:41:20.758000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.318868
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.26, 'score': 0.26, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.18, 'score_ci_high': 0.34}
1
a100_80gb
cfe866528cc28948622bd326534d54ef21496dc49260be4060e48608ca7e1730
"2024-12-22T17:41:23.721000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.166234
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.38, 'score': 0.38, 'score_name': 'accuracy', 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.47, 'score_ci_low': 0.29, 'score_ci_high': 0.47}
1
a100_80gb
4050300571f602de0d10d9b5da9e5775fb87939dec6e5763ecee6d3cdbe6c9b5
"2024-12-22T17:41:25.849000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.328174
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.25, 'score_ci_high': 0.43}
1
a100_80gb
33a51ca8bf34f1adbb3729584c7512b0dbbe37e17b9367beacee47448bbfa9a1
"2024-12-22T17:41:27.985000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.342328
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.21, 'score_ci_high': 0.38}
1
a100_80gb
a0097d5086225355a87bb4e29e617483c517af72920b101957ae0935ee04c569
"2024-12-22T17:41:30.137000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.369982
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
6560b32198ca371e8081dddb1343b6b79e255ad3111f48d0998474611fd851ae
"2024-12-22T17:41:32.917000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.987347
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.26, 'score': 0.26, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.35, 'score_ci_low': 0.18, 'score_ci_high': 0.35}
1
a100_80gb
b34d69db72574e5d51dfb5c4e138dc4188eb8551161665d66318fce8b1d94112
"2024-12-22T17:41:35.502000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.808333
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.35, 'score_ci_low': 0.17, 'score_ci_high': 0.35}
1
a100_80gb
218e2d88b3f3f8c2131baa2a2c7d6f8622c4c04902abee0f39482cba494234d4
"2024-12-22T17:41:38.125000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.839016
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.21, 'score_ci_high': 0.39}
1
a100_80gb
03c56a3e6ea73cc290e0c28cc5bdf4ba95c2938677754c5581b9f1884f986bb1
"2024-12-22T17:41:40.197000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.289385
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.21, 'score_ci_high': 0.4}
1
a100_80gb
0f7541d014f1538d3d8d661d177e7ed1d19597118a2a910fc59f764a73fb3c94
"2024-12-22T17:41:43.720000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.742782
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
2f35ce60719ebb501cf23729d45c18c698021800fec20fb47e0b6cea9f420d66
"2024-12-22T17:40:55.899000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.350966
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.25, 'score_ci_high': 0.44}
1
a100_80gb
f538aeab3c1fe3b61c43ff1f2c11764252cf49d446fc1bbf6a63cf2ea74c6064
"2024-12-22T17:40:58.073000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.363224
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.21, 'score_ci_high': 0.37}
1
a100_80gb
9da62d1e0e9f1f67b615f5557913ea4f48ff000d07b77ac3bbbbb6e2eabd4355
"2024-12-22T17:41:00.290000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.342472
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.32, 'score': 0.32, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.42, 'score_ci_low': 0.23, 'score_ci_high': 0.42}
1
a100_80gb
851e2404b5e831fa72fbea0f61916e09ffaebe2e124c452761426e7edf4dbb20
"2024-12-22T17:41:02.460000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.360231
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_ci_low': 0.25, 'score_ci_high': 0.43}
1
a100_80gb
a778f17b6327fc28370b7cb77185a5d5e7938a8e9a80f422ff1ba7cf3d5e93d6
"2024-12-22T17:41:06.034000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.758402
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.25, 'score_ci_high': 0.44}
1
a100_80gb
2ca319e03cb5db55105fc563e0b62ddf99c2f42f07f2e58962ce80ed54f08b1f
"2024-12-22T17:41:08.214000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.379275
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.22, 'score_ci_high': 0.4}
1
a100_80gb
084eb35ba5f31a886b25fe45da40c8638b29e25ebed5a2fc4423a830844e96b7
"2024-12-22T17:41:10.393000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.347872
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.22, 'score_ci_high': 0.39}
1
a100_80gb
34660d5572e6518b9450061e2b0ecbcafb153d990a064f156cf9368668b70f54
"2024-12-22T17:41:13.760000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.558326
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36, 'score_ci_low': 0.19, 'score_ci_high': 0.36}
1
a100_80gb
b2cdd5fc9816682d44360abafa95c76d6d6c1008b72890dd603983914d03f4ed
"2024-12-22T17:41:16.473000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.891319
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.22, 'score_ci_high': 0.39}
1
a100_80gb
050d3c96bb4fe76072f443edce058aa37adcf808a58222655fedf8b81762e72e
"2024-12-22T17:41:18.656000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.368157
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.2, 'score_ci_high': 0.38}
1
a100_80gb
eb90aa6f13ebc1881ddd90507edeac0a26205dd5f2cf2a6f848ffa2cfa0072e0
"2024-12-22T17:40:29.115000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.499946
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.44, 'score': 0.44, 'score_name': 'accuracy', 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.34, 'score_ci_high': 0.55}
1
a100_80gb
5fd5a46194592ad0ef633efe264ccf6d667868a0ccf4e75700c70d22fc54ac7c
"2024-12-22T17:40:31.825000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.902901
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.28, 'score_ci_high': 0.46}
1
a100_80gb
4395ab220a2f75d5ebc1e339ed7e588f2826bc19d12967b00cd948b1c9324bcc
"2024-12-22T17:40:34.919000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.4
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.21, 'score_ci_high': 0.38}
1
a100_80gb
cba6b850eba8986affe8f74626d954bb8b26773d227910375e37c0b013467506
"2024-12-22T17:40:37.118000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.391555
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.2, 'score_ci_high': 0.37}
1
a100_80gb
f4569de9f8bfea6d4784dccdd480fa1a3525b2f44d0d46ef7534d06b4ae09790
"2024-12-22T17:40:39.342000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.359952
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
8b8316c76af3b3a8cc42e77adbd8d242cb8f5fff475f7aa467e52bb041ebdff4
"2024-12-22T17:40:42.192000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.37402
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.25, 'score_ci_high': 0.44}
1
a100_80gb
434ba753a461408fce30391a12f2e992835787434bf9396a58cf562d1419d9a1
"2024-12-22T17:40:44.381000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.363246
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
b133d7e6f2893e5edf1a1b8e2a061f808ca3e3071f081a60c80a2308c85fa193
"2024-12-22T17:40:47.660000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.43859
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.2, 'score_ci_high': 0.38}
1
a100_80gb
518e5f3b2614ec41c8a2d99c736c3d2513d2e672ee75e9c6e6da96ff423a14fd
"2024-12-22T17:40:51.585000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.123682
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.21, 'score_ci_high': 0.4}
1
a100_80gb
21344556f3f38228080aed970aaf8b82c49daf0c73fe771548ba8edb8bb4d370
"2024-12-22T17:40:53.741000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.353358
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.2, 'score_ci_high': 0.37}
1
a100_80gb
9d040366fb4d2b656bd9be4750c17a5c048f73c170e9654fabe6a8c5fc34a35c
"2024-12-22T17:40:04.978000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.861083
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
7bf7c581cd78e98ca9ed728c3e021049caa11365672581807cba349a87a5dbf6
"2024-12-22T17:40:07.189000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.391587
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
8afcf8f0a9eba7858b9e89ca113af997af11c94b600f0b9a73376b0d800f87e2
"2024-12-22T17:40:09.354000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.361796
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.36, 'score_ci_high': 0.56}
1
a100_80gb
1e5648976bec791bff856b28eed2e4fb0eb6b097436e291656338ce1d94ffe25
"2024-12-22T17:40:11.519000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.363932
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.37, 'score': 0.37, 'score_name': 'accuracy', 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.28, 'score_ci_high': 0.46}
1
a100_80gb
9728a09f4e2dfd7d3f8aede47ead7b02019ae11676be6a8f4bb6f71a6e651dd4
"2024-12-22T17:40:13.666000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.349313
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.45, 'score': 0.45, 'score_name': 'accuracy', 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.5530903889991999, 'score_ci_low': 0.35, 'score_ci_high': 0.5530903889991999}
1
a100_80gb
316f84c4853936c3a3dcaf87b6ba51e1c95120350223c3ec135f99f60e7ab84f
"2024-12-22T17:40:15.810000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.34693
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_ci_low': 0.24, 'score_ci_high': 0.42}
1
a100_80gb
5f6a706b27d83e1feb47830e58d7b1cab715e8ad9fb028f63e6f901d5508dad0
"2024-12-22T17:40:18.027000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.4199
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.362700545964999, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.362700545964999, 'score_ci_high': 0.56}
1
a100_80gb
5f97255ceecbdbd0d48186e00fbea31f775ec7ce65856a27cc50304cc8110524
"2024-12-22T17:40:20.187000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.357388
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.37, 'score': 0.37, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2726095036975427, 'accuracy_ci_high': 0.47, 'score_ci_low': 0.2726095036975427, 'score_ci_high': 0.47}
1
a100_80gb
0150e2909585c84f6263b5ce70cdea0c36742db1a2f3f90b5b811c65c7c15b08
"2024-12-22T17:40:23.640000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.647744
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
1d801b9f06a72a2baf4d1c8b5fb1cfab024816a9480f515cb93f38ad7d9c9262
"2024-12-22T17:40:25.815000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.350526
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.26, 'score_ci_high': 0.44}
1
a100_80gb
b160b308a3aad3fdfcabb40a0110214da84ecd96c449e378449b235b6e46331f
"2024-12-22T17:39:38.877000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.376006
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.22, 'score_ci_high': 0.39}
1
a100_80gb
bbb928e90cd8990b1db0476b7002a94c378070beb95faad267a6d2f8d277b3bf
"2024-12-22T17:39:41.040000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.358718
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
bbff546bb7105d321c08c43b7aa3b029289b0fbc3b96235bc430fa6d2fc0aa32
"2024-12-22T17:39:43.199000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.348327
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.22, 'score_ci_high': 0.4}
1
a100_80gb
033cb2d431509bc32b33309ae0835a6e7b2053f58fc5379c273af7668cf099f7
"2024-12-22T17:39:45.423000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.416516
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.35, 'score_ci_low': 0.17, 'score_ci_high': 0.35}
1
a100_80gb
cbf8c8f0e02b48aad2aa7be64a1c463fdf585d46764d0acbe374b61a55ec91e6
"2024-12-22T17:39:47.573000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.343813
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.26, 'score': 0.26, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.35, 'score_ci_low': 0.19, 'score_ci_high': 0.35}
1
a100_80gb
d33fbd68ec6fd6a5b4dbcf7516bf3a9fff122f094b0ebcac58365dcfba34b1ad
"2024-12-22T17:39:51.244000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.858266
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
a296e1237619bf0e4c94a8d26f1161e15d3e490bf089769d263b0d6218579b03
"2024-12-22T17:39:53.569000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.51952
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.17, 'score_ci_high': 0.34}
1
a100_80gb
b953d958e44594eb2383eef68be94d48f75d56ec71cb758ddee13fda7db453e1
"2024-12-22T17:39:55.767000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.365765
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.26, 'score': 0.26, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_ci_low': 0.18, 'score_ci_high': 0.36}
1
a100_80gb
aafb6d7df33256bde04c4ec03964483140aa9c48b353fdd8c68d29ff8e40283a
"2024-12-22T17:39:59.083000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.503826
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.2, 'score_ci_high': 0.37}
1
a100_80gb
58141039f5a73dd0f7524a58bf148a31c2c520e25ec267621f0cd96c5b324e87
"2024-12-22T17:40:01.272000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.36444
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.24, 'score': 0.24, 'score_name': 'accuracy', 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.33, 'score_ci_low': 0.16, 'score_ci_high': 0.33}
1
a100_80gb
30d6193df4ac837a61eb8bd1bf60e8d297222b9f8b8d7b80ddd316c8b50eac5f
"2024-12-22T17:39:12.983000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.052712
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.33, 'score': 0.33, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.4274770447489027, 'score_ci_low': 0.25, 'score_ci_high': 0.4274770447489027}
1
a100_80gb
0b3b6c6e711462c6f318c47ae0b74c4fda43ddaf91c8422c8e462a77a93728b3
"2024-12-22T17:39:16.045000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.255005
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.32, 'score': 0.32, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.42, 'score_ci_low': 0.23, 'score_ci_high': 0.42}
1
a100_80gb
f6ae0afc937a79f59a6a4936eb1eb5ce98b1b43784549ade44ba577570e2cd13
"2024-12-22T17:39:18.224000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.369562
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.2, 'score_ci_high': 0.37}
1
a100_80gb
c18a099e1db0f301adf3c27c98281547bd650a73bc5fed894f216ba908a33d6c
"2024-12-22T17:39:20.395000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.361335
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
98060887bd6b2e3732b3e8ba545d237bdb526e95bba409fced643f068cbc16a1
"2024-12-22T17:39:22.622000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.362104
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21634387667512173, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.21634387667512173, 'score_ci_high': 0.4}
1
a100_80gb
3b2b9f8216a8af936ede93db80f77c37f094044020b86643b413a949cea7b591
"2024-12-22T17:39:24.784000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.358152
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2289693808040247, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.2289693808040247, 'score_ci_high': 0.41}
1
a100_80gb
2864a715fe64b1acb44c3321f983cd961251c40fcb0d5f02994b8db0fb9dcdbe
"2024-12-22T17:39:26.942000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.3476
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.22, 'score_ci_high': 0.39}
1
a100_80gb
c2ea7b79b3d16ab261aedc9d35c20c33c1ca31723f835b9d71276f7dc68ad18a
"2024-12-22T17:39:30.544000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.790831
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.19, 'score_ci_high': 0.37}
1
a100_80gb
59c8cb05748e95cee89c47e49825a50bdb0aca71ce3f034f9f647583bf80e374
"2024-12-22T17:39:33.203000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.85416
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.18, 'score_ci_high': 0.34}
1
a100_80gb
6032a868d5c009615d2a8a14d076481bf9f21d6893ebf65f4936f94bb80cc563
"2024-12-22T17:39:36.697000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.375281
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.3590718299817456, 'score_ci_low': 0.18, 'score_ci_high': 0.3590718299817456}
1
a100_80gb
d7c3540eff83c06dbce722cd47bcccf15c7d2881a61b183e1b07f7ff8c1e45a3
"2024-12-22T17:38:36.131000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.145514
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39, 'score_ci_low': 0.21, 'score_ci_high': 0.39}
1
a100_80gb
211f252841fe77458b8f4575e58af4bc09e0592a6a838090a1c7f1d96f2f18fd
"2024-12-22T17:38:42.326000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.79427
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.24, 'score': 0.24, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.17, 'score_ci_high': 0.34}
1
a100_80gb
a481e3938870fa80a36b15188054d27363c3d5226cf98bc41da0e1e9c42b5b03
"2024-12-22T17:38:48.851000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.163802
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.28, 'score': 0.28, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_ci_low': 0.2, 'score_ci_high': 0.37}
1
a100_80gb
2b6585464a71c7244ec2d10c21096906ba01642b0580f63d5b747fc08ba299db
"2024-12-22T17:38:53.383000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.146961
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.2, 'score': 0.2, 'score_name': 'accuracy', 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.3, 'score_ci_low': 0.14, 'score_ci_high': 0.3}
1
a100_80gb
3ffdf248d754a87b44ff095c1d222e012a6144f168d43bb3c82f01e7c1570487
"2024-12-22T17:38:56.709000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.525531
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.27, 'score': 0.27, 'score_name': 'accuracy', 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36104402636831534, 'score_ci_low': 0.19, 'score_ci_high': 0.36104402636831534}
1
a100_80gb
140dbc62810d5a7d58fee1ad1ed0882d5e193e13fde4fbf0182e81c0485f9222
"2024-12-22T17:38:58.874000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.364798
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.3, 'score': 0.3, 'score_name': 'accuracy', 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_ci_low': 0.22, 'score_ci_high': 0.4}
1
a100_80gb
08e420d44e6c685a9b366438c0cc94e1999070a55888df0c3cf0ef9243e4836a
"2024-12-22T17:39:01.040000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.366746
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.38, 'score': 0.38, 'score_name': 'accuracy', 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_ci_low': 0.29, 'score_ci_high': 0.48}
1
a100_80gb
6a7df8c5330e6ae92953dfebc68ebcbba203b77853a1a9688fe42c7bbc1a7351
"2024-12-22T17:39:04.650000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.8033
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.4, 'score': 0.4, 'score_name': 'accuracy', 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_ci_low': 0.32, 'score_ci_high': 0.51}
1
a100_80gb
7c1ad72b0a147f4afaef8a47ccb8882de0338a34599556dfcc5aa77820d762db
"2024-12-22T17:39:06.806000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.350214
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
8a9b31aa1cb24b07bba0f677f807b9f07dd655052770a3533149dc931dc652cf
"2024-12-22T17:39:10.118000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.490945
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.39462454273170466, 'score_ci_low': 0.21, 'score_ci_high': 0.39462454273170466}
1
a100_80gb