run_id
large_stringlengths
64
64
timestamp
unknown
model_name_or_path
large_stringclasses
5 values
unitxt_recipe
large_stringlengths
326
371
quantization_type
large_stringclasses
1 value
quantization_bit_count
large_stringclasses
1 value
inference_runtime_s
float64
1.12
36.5
generation_args
large_stringclasses
1 value
model_args
large_stringclasses
5 values
inference_engine
large_stringclasses
1 value
packages_versions
large_stringclasses
1 value
scores
large_stringlengths
174
240
num_gpu
int64
1
1
device
large_stringclasses
1 value
cea4de433274e15bfe3bf79659626943605a017586628f2fcf3fc649c8dc12cd
"2024-12-22T12:31:50.341000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.804876
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.67, 'score': 0.67, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5742444686103648, 'accuracy_ci_high': 0.75, 'score_ci_low': 0.5742444686103648, 'score_ci_high': 0.75}
1
a100_80gb
e9bb271b51ebd4260807c547214b53fb4fa576938250bdd8f5cfca3088d719c7
"2024-12-22T12:31:53.681000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.617559
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.45, 'score_ci_high': 0.65}
1
a100_80gb
ccb91f267aa2732671c258a1015c03aab27e8976a81ae625388f96f746e63b87
"2024-12-22T12:31:26.432000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.217717
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.54, 'score_ci_high': 0.72}
1
a100_80gb
ed36bd8cd24c811c89cc5d9098869114fff1e420186e73a2e6836d9851aea059
"2024-12-22T12:31:30.120000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.955159
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.61, 'score': 0.61, 'score_name': 'accuracy', 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.52, 'score_ci_high': 0.7}
1
a100_80gb
f84286f274c10ccf442001555b04a156c708f71205e5332a4687f6edf3b01e66
"2024-12-22T12:31:33.278000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.411268
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.62, 'score': 0.62, 'score_name': 'accuracy', 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.52, 'score_ci_high': 0.71}
1
a100_80gb
34187db3f4c721aacd21197cfab448daa05d119eac0c44e0d51c2ac06d8250e7
"2024-12-22T12:31:36.389000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.374988
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4963584202420227, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.4963584202420227, 'score_ci_high': 0.69}
1
a100_80gb
093dea5a2284fb7496ab30e8dba901a6f895a6dbeb0bf69675818925f6f8447d
"2024-12-22T12:31:39.573000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.418187
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.58, 'score': 0.58, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.49, 'score_ci_high': 0.68}
1
a100_80gb
f06fd6f7ac9c9c9f78f5da60158287ce4c0589ffd5a8df0f0ca639bf14fe72f4
"2024-12-22T12:31:42.948000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.599346
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.53, 'score_ci_high': 0.72}
1
a100_80gb
ad1d7271fd433aaec51c96316402feaf9d351034e1fe1839292732af6a9176ee
"2024-12-22T12:31:48.055000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.317357
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.58, 'score': 0.58, 'score_name': 'accuracy', 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.48, 'score_ci_high': 0.68}
1
a100_80gb
b90b4d49c8ced52dda529588c1276e1e4e3a66cbaa804b5f1fe949ad88f9e2d6
"2024-12-22T12:31:52.232000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.418321
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.45, 'score_ci_high': 0.65}
1
a100_80gb
2f66f36d32eaede87255c378a380b315bf7c10873c4f2aa29aaf77d3fc363cf2
"2024-12-22T12:31:55.415000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.430886
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.45, 'score_ci_high': 0.65}
1
a100_80gb
07112320c6a39f2c9e6ff3fa2d5b2a7e42bd8f4167df8b64bfc30cf27a45deab
"2024-12-22T12:31:58.629000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.462627
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.46, 'score_ci_high': 0.66}
1
a100_80gb
c2342e0e98a118707542c18baff6c5dcbdf3ab0691b7ff75784e09e7127daa25
"2024-12-22T12:31:19.292000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.929626
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.39, 'score': 0.39, 'score_name': 'accuracy', 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.49, 'score_ci_low': 0.3, 'score_ci_high': 0.49}
1
a100_80gb
1eed682e954699317c861dbcb8f0ee3942538c7be60a26c3811cce14489126a7
"2024-12-22T12:31:22.504000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.480209
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.45, 'score_ci_low': 0.25, 'score_ci_high': 0.45}
1
a100_80gb
644fc5ed1d14b88263b22d252167d005b96deb1192db73f0a38d4b5d573e3723
"2024-12-22T12:31:26.068000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.825795
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.26, 'score_ci_high': 0.44}
1
a100_80gb
e40f81fdd73954eb185533709a03b28881f9c09b520c5b27f42db4580f9dde95
"2024-12-22T12:31:29.303000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.491069
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.38, 'score': 0.38, 'score_name': 'accuracy', 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.49, 'score_ci_low': 0.29, 'score_ci_high': 0.49}
1
a100_80gb
94a2abd59385b07b8dc40cb59807f04a1fafa3e972aead351e8fa5b48e01a9d0
"2024-12-22T12:31:32.550000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.505155
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.27, 'score_ci_high': 0.46}
1
a100_80gb
d2c9cae8b312c40c871fea89973b4f92f0d7a93cbabb9a9b795d3b5375e71086
"2024-12-22T12:31:36.177000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.465365
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_ci_low': 0.26, 'score_ci_high': 0.45}
1
a100_80gb
4448d1a6f9db5ef0ed250fc2ef73aee808c74951d3c801fbecda2f07240fc7f0
"2024-12-22T12:31:39.383000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.462698
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.37, 'score': 0.37, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.27, 'score_ci_high': 0.46}
1
a100_80gb
af97aa11623bb94357727097a6b4285793e54bf256f159a523e530d3bb3d0888
"2024-12-22T12:31:42.628000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.495943
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_ci_low': 0.26, 'score_ci_high': 0.45}
1
a100_80gb
141ef7f7152b1d871ca8d95c4018f426b3d8bcdcb7cc4858132fe12967721ddc
"2024-12-22T12:31:45.954000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.568435
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.38, 'score': 0.38, 'score_name': 'accuracy', 'accuracy_ci_low': 0.29, 'accuracy_ci_high': 0.48, 'score_ci_low': 0.29, 'score_ci_high': 0.48}
1
a100_80gb
48ac45152c3ec5bf63b8efc34e853c8a826bdcafa43beb6c63f7cda5e29f95df
"2024-12-22T12:31:49.630000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.92448
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.36, 'score': 0.36, 'score_name': 'accuracy', 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.46, 'score_ci_low': 0.28, 'score_ci_high': 0.46}
1
a100_80gb
f2a9028a9dbaec899f60c80b333c81d54ca3021ee80766649ad07d0cf4d479d0
"2024-12-22T12:31:10.261000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.944724
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.5, 'score_ci_high': 0.7}
1
a100_80gb
7a950bcfa81f3927e0abf1adc6966d592db4198a40dd56ce59f4b10ee42f2d31
"2024-12-22T12:31:14.949000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.490904
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.48483270052118077, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.48483270052118077, 'score_ci_high': 0.69}
1
a100_80gb
e81821da75fe8406ed760939613c39f15248c5dd2734f09b31bf2035761cde4a
"2024-12-22T12:31:19.611000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.095267
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.43, 'score_ci_high': 0.63}
1
a100_80gb
4a4054a37bfb4816c7199edefe6e7934e14b95561e9d2b8b3ab0222ddb58dd9d
"2024-12-22T12:31:23.960000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.129816
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.61, 'score': 0.61, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.51, 'score_ci_high': 0.71}
1
a100_80gb
878cd7d588fec4dea9c5bc4979677b4e81517b932c911eb039bd1be9c7cbafbd
"2024-12-22T12:31:28.674000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.099658
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.6580471996679842, 'score_ci_low': 0.45, 'score_ci_high': 0.6580471996679842}
1
a100_80gb
f9348d66e13f3e057406059165b027151ea23feff72eff5c88f7cd1fe2774b7a
"2024-12-22T12:31:32.908000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.054497
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.53, 'score_ci_high': 0.72}
1
a100_80gb
0d250d8fd099dbe3f69c88de1105cee6daa3a49ffaa78b2adb6f54fa25775c73
"2024-12-22T12:31:37.466000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.980575
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.62, 'score': 0.62, 'score_name': 'accuracy', 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.52, 'score_ci_high': 0.71}
1
a100_80gb
97f225f988ffdeae2f90f6a6edd281b1a738ade2d4e9b1757aa044b69475b74e
"2024-12-22T12:31:41.727000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.062456
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.49, 'score_ci_high': 0.68}
1
a100_80gb
c268951ff596eca1b81445b34cca2866e79986631a5f75f0c18922ef27c41936
"2024-12-22T12:31:46.433000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.05828
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.43, 'score_ci_high': 0.62}
1
a100_80gb
f9edd35bb2bbc31591fb75525049cca0708cd02e092ae6a947715817a06fc673
"2024-12-22T12:31:50.798000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.141244
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.51, 'score_ci_high': 0.7}
1
a100_80gb
756525be0e951208bd8ac6b3e970e5cf4b8fe637f95e8bd2292713eb14db056f
"2024-12-22T12:30:55.233000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.916886
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
21b29340637ea20b39bbb76c32070d31619da15f8b97ccae35ec52e9b60d18ba
"2024-12-22T12:31:00.809000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.041484
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.44, 'score_ci_high': 0.63}
1
a100_80gb
151253d160fcecc117e0ea79b3985148fdee5d6f0941b37bc9ab8d5262ed6a99
"2024-12-22T12:31:06.552000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.230822
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.47, 'score_ci_high': 0.65}
1
a100_80gb
272b0263999c3396a53a3241e7e6a1068040833ec6b51f845eefa200ee95ba90
"2024-12-22T12:31:12.395000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.354983
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.4, 'score_ci_high': 0.6}
1
a100_80gb
cb883b014e5ea6b34e22a75b368cdb3d6b10c814b43e92637fe41a5db7ec1417
"2024-12-22T12:31:18.088000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.834166
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.54, 'score': 0.54, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.44, 'score_ci_high': 0.63}
1
a100_80gb
d3c795e746458114d64e5eb1ea6418177dad238df18e708c3c7c69f44c23e7a3
"2024-12-22T12:31:23.470000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.899103
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.49, 'score': 0.49, 'score_name': 'accuracy', 'accuracy_ci_low': 0.3963017799532909, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.3963017799532909, 'score_ci_high': 0.59}
1
a100_80gb
1187494eb84c5b68640bf0171fb18b72a4562a91c8b2fbbb083fa1a23bd28a7d
"2024-12-22T12:31:29.418000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.427021
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.37862145752474374, 'accuracy_ci_high': 0.57, 'score_ci_low': 0.37862145752474374, 'score_ci_high': 0.57}
1
a100_80gb
1d4e892ab23faab40edb9d1cd19588695780063a50ab35b5833c6e8887e729bb
"2024-12-22T12:31:35.402000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.449361
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
99614a36f7ac9bc825f2099dc85c9a2be913714e66ef05b1490e3d7e211214e2
"2024-12-22T12:31:40.856000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.902851
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.43, 'score_ci_high': 0.62}
1
a100_80gb
2c8bc6929c7bf5b6c5505944894d1859e2ca2273bc6be1bb5efc4e7f9042c200
"2024-12-22T12:31:46.711000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.351329
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.43, 'score': 0.43, 'score_name': 'accuracy', 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.52, 'score_ci_low': 0.34, 'score_ci_high': 0.52}
1
a100_80gb
6d62bb1b7024f0c3da683ad6afffcb30f2048e6bcbc59d01c581ecb4eff95999
"2024-12-22T12:31:16.423000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.220121
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.44, 'score_ci_high': 0.63}
1
a100_80gb
ec639787c8cbc5ce4d11ed2b30362463b3a7164eabfbca23cacd8d2955377f37
"2024-12-22T12:31:18.723000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.617426
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.51, 'score': 0.51, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.61, 'score_ci_low': 0.42, 'score_ci_high': 0.61}
1
a100_80gb
55be51cd12b850ebd19bfcad0da66a86e1bb7a3fc190a05d09a8f1b0e79e6a67
"2024-12-22T12:31:21.387000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.969248
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47, 'score_ci_high': 0.66}
1
a100_80gb
41dc5f0eb09ec7c0c1f0784175e0b334f5b31d887710becdf05f404f908827be
"2024-12-22T12:31:23.694000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.611955
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.46, 'score_ci_high': 0.65}
1
a100_80gb
609db1c23d34f59cdf7ce14ce192d3d40b0847b91ea064a7e8e2f606c4ccc399
"2024-12-22T12:31:26.011000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.615803
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.46, 'score_ci_high': 0.65}
1
a100_80gb
8931cd078f68d7d9116a00543c75f70cb35bfe7dad815b9d9a83c4417783e173
"2024-12-22T12:31:28.361000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.647918
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.58, 'score': 0.58, 'score_name': 'accuracy', 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.6780300849136575, 'score_ci_low': 0.48, 'score_ci_high': 0.6780300849136575}
1
a100_80gb
748b5f08d19a08a3858c4a730834c2ae244cd6d76363135e7410d323cbc76a65
"2024-12-22T12:31:31.070000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.010161
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.64, 'score_ci_low': 0.44, 'score_ci_high': 0.64}
1
a100_80gb
0cdecdd7c2896fd4711145f6c3b7f7753a255959d2165998dcec60577303d47b
"2024-12-22T12:31:33.390000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.623885
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47, 'score_ci_high': 0.66}
1
a100_80gb
c55a308c6602cf99bfa9b74e954ed053d1449debd66b35ba33fdd528b5c55a53
"2024-12-22T12:31:35.689000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.606493
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.45, 'score': 0.45, 'score_name': 'accuracy', 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.54, 'score_ci_low': 0.35, 'score_ci_high': 0.54}
1
a100_80gb
8803f42b7a0f31a3586f87129d8a1265ccdec4ad7f62492d02b7135cf05ee4b9
"2024-12-22T12:31:38Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
1.61402
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.36, 'score_ci_high': 0.56}
1
a100_80gb
c4633a5ca1dca3736d94f704dd72965f1b4d12eb44dea02f078b419b85139ab4
"2024-12-22T12:31:10.785000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.913183
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.7246305519386397, 'score_ci_low': 0.54, 'score_ci_high': 0.7246305519386397}
1
a100_80gb
99d92b393231b106173d7df73ec7b873a985e94dd5d257d39fcc171a1d4ba58b
"2024-12-22T12:31:13.756000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.266454
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.67, 'score': 0.67, 'score_name': 'accuracy', 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.76, 'score_ci_low': 0.58, 'score_ci_high': 0.76}
1
a100_80gb
9f41251c8c90001e5cc060ca6920d03a125705f3ccf11feaa9beb4fd902a359f
"2024-12-22T12:31:17.106000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.625114
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.51, 'score_ci_high': 0.69}
1
a100_80gb
86cc91d621d5db3da76c814af7f12a74650b2420ba7e71240c8148b1c4d63256
"2024-12-22T12:31:20.148000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.306682
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.54, 'score_ci_high': 0.72}
1
a100_80gb
f1299faee1f81ad87ebcf002940150be36b011385af66b98668962142150ec1a
"2024-12-22T12:31:23.191000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.314387
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.53, 'score_ci_high': 0.72}
1
a100_80gb
9bdb1174f3f8757a088f3c6df8581f447bd8b4bbf2bf1d14ce08ca336d3b7fea
"2024-12-22T12:31:26.249000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.319325
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.47, 'score_ci_high': 0.65}
1
a100_80gb
c08376d01ef266788d715b28e9c3c5e8039336c750416dee4bf48499c4c27f0c
"2024-12-22T12:31:29.744000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.75906
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.44, 'score_ci_high': 0.63}
1
a100_80gb
7e84f95385b6252f375faebf60b1d5af852d8b3652c3e6bb68756e50284aa19a
"2024-12-22T12:31:32.789000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.300395
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.4, 'score_ci_high': 0.59}
1
a100_80gb
3831c663f7359d999b09f5bc21b72a3af1417ac4d4b354a674e3fb214e9a6207
"2024-12-22T12:31:35.893000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.35117
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.49, 'score': 0.49, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.4, 'score_ci_high': 0.59}
1
a100_80gb
05d3548e86dd3832f8c316f1770b5bc7f9c32f8d392b88fcc217ab2d6018b50d
"2024-12-22T12:31:38.942000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.logical_fallacies,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.314752
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.66, 'score': 0.66, 'score_name': 'accuracy', 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_ci_low': 0.56, 'score_ci_high': 0.75}
1
a100_80gb
f053645f869d0482447e08eb54aa9f106a6bd49086b1f779caa079095c999d07
"2024-12-22T12:30:59.073000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.609297
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.22, 'score': 0.22, 'score_name': 'accuracy', 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.31, 'score_ci_low': 0.14, 'score_ci_high': 0.31}
1
a100_80gb
b42902061773bcb0cfa19aee36872f377f73049e4c046470537655c6802f3d56
"2024-12-22T12:31:03.263000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.846639
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_ci_low': 0.26, 'score_ci_high': 0.45}
1
a100_80gb
b3d72ba20bf5dedd51125404fc171fee31654d308bc16ac97bcda12cf4e7fc93
"2024-12-22T12:31:07.941000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.340318
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.18, 'score_ci_high': 0.34}
1
a100_80gb
2dba86bf71e77e253e9baf33150e20f79eeabd1af82aecc384fc372946b93ad2
"2024-12-22T12:31:12.032000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.751894
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.34, 'score': 0.34, 'score_name': 'accuracy', 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.44, 'score_ci_low': 0.25, 'score_ci_high': 0.44}
1
a100_80gb
8957903409005ab45ce311151702c16273b23aee98f0784da3020b7940d85aa0
"2024-12-22T12:31:16.312000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.92245
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38870570063903276, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38870570063903276, 'score_ci_high': 0.58}
1
a100_80gb
9a806f232c04e6563b64e3e62ff019a1906701447e91fac18f9b10a070cac7b2
"2024-12-22T12:31:21.042000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.389086
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.47, 'score': 0.47, 'score_name': 'accuracy', 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.57, 'score_ci_low': 0.37, 'score_ci_high': 0.57}
1
a100_80gb
d223da1e174538ff2100831087737a193557bb900e7a006ce6321dff76091fa3
"2024-12-22T12:31:25.359000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.961568
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.35, 'score': 0.35, 'score_name': 'accuracy', 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.45, 'score_ci_low': 0.27, 'score_ci_high': 0.45}
1
a100_80gb
48edc82287e2b24abb34e79193785152e3f4d5b8f26d66dceb8e237a08fa4734
"2024-12-22T12:31:30.253000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.52284
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.43, 'score': 0.43, 'score_name': 'accuracy', 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_ci_low': 0.33, 'score_ci_high': 0.52}
1
a100_80gb
3eebb32e65379f2aa49d8337b6e999caee8517f9e39e271bb55463bb3d7d8115
"2024-12-22T12:31:34.539000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.946344
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.37, 'score_ci_high': 0.56}
1
a100_80gb
372a837a0d0cbad6fb4f70dff7306de8e482d95f10b647d5fdc69627df3a6969
"2024-12-22T12:31:38.798000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.910858
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.34943091706405394, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.34943091706405394, 'score_ci_high': 0.55}
1
a100_80gb
56c526502d15039ade367a7371f52ea83a64190294e5722671cc532b54c68c8c
"2024-12-22T12:30:51.881000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.985288
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.67, 'score_ci_low': 0.47, 'score_ci_high': 0.67}
1
a100_80gb
2eaef4e3d473b19c63b0d91913951cdd0a6abb9232ede3e468385dae34f96734
"2024-12-22T12:30:55.169000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.545743
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.45, 'score_ci_high': 0.65}
1
a100_80gb
b7153352d6a99b5df7e14573b319c15468fd5abe84d3a94c631c59de8e577a78
"2024-12-22T12:30:58.782000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.861135
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.43, 'score_ci_high': 0.62}
1
a100_80gb
e85edcded5be90e40a917eb0d7e29952a35eada6b0b2dc7f16a85a81e05b841c
"2024-12-22T12:31:02.008000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.472872
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.5, 'score_ci_high': 0.69}
1
a100_80gb
5506649ec9145cfd2e8643b52e5e91553046f48227506c7b1b78651572030b09
"2024-12-22T12:31:05.244000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.477826
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.49, 'score_ci_high': 0.69}
1
a100_80gb
b9716d03bed5c164d88e488833d91fac054f0dc59d32ae0c136a5717a693f4ee
"2024-12-22T12:31:08.503000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.49381
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.4, 'score_ci_high': 0.6}
1
a100_80gb
db8c94d7041ed494ac1459f19e98ac1028ddd5fe0486e1b0b23d8185371c4fdb
"2024-12-22T12:31:12.006000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.760803
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.5, 'score_ci_high': 0.69}
1
a100_80gb
0ddcd3ee236d636ee7a98857d600f6680def4c45f1ffb74e34332e1d87e06448
"2024-12-22T12:31:15.144000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.397813
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.42, 'score_ci_high': 0.62}
1
a100_80gb
2a4c40c668efffe20df2937d7fba363dc764b9611aa664347f5ade5b9da5a80a
"2024-12-22T12:31:18.283000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.389709
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.63, 'score': 0.63, 'score_name': 'accuracy', 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.74, 'score_ci_low': 0.54, 'score_ci_high': 0.74}
1
a100_80gb
4ca0a5ef8c352e5cb45d8bdccc1bcaf73e16c19c5424797b5e02677d5f0fac67
"2024-12-22T12:31:21.416000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.astronomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.388963
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.54, 'score': 0.54, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.64, 'score_ci_low': 0.43, 'score_ci_high': 0.64}
1
a100_80gb
68cb2dd91f3872a787a23d18d00f1ee45d482631843f983d62ad961e4b5c5468
"2024-12-22T12:30:47.385000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.235853
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.64, 'score': 0.64, 'score_name': 'accuracy', 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.72, 'score_ci_low': 0.55, 'score_ci_high': 0.72}
1
a100_80gb
e23cbc79cc1d24093adea1b1e920079aa670bf440ec29f3971f3c170a5fbc1c7
"2024-12-22T12:30:50.781000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.661785
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.65, 'score': 0.65, 'score_name': 'accuracy', 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.74, 'score_ci_low': 0.55, 'score_ci_high': 0.74}
1
a100_80gb
1daa0d9f7b7c55c1e2a9909d49b64131631ceadb4a2dab14e133fb77fa1c82a1
"2024-12-22T12:30:53.876000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.350724
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.6705291755529175, 'score_ci_low': 0.48, 'score_ci_high': 0.6705291755529175}
1
a100_80gb
9bbcb2f6c769dca97724a4551723ad898792daba4ce9cc1fc67eaa08b4049613
"2024-12-22T12:30:56.930000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.315515
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47, 'score_ci_high': 0.66}
1
a100_80gb
33e6145e132a8195e343edb7f7a487a98b22fe31bb3b26aba5413a1ce1c17f4b
"2024-12-22T12:31:00.046000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.355752
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.49, 'score_ci_high': 0.68}
1
a100_80gb
e7a9e6cbea31c681f8548ce02ebeec3d17a783004e96f0719b362a7833904882
"2024-12-22T12:31:03.496000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.701613
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47, 'score_ci_high': 0.66}
1
a100_80gb
2bd5ed5691719730ba8d52a87a91029965baef9fa6fd126a03756181176ed347
"2024-12-22T12:31:06.586000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.336996
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.61, 'score': 0.61, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.51, 'score_ci_high': 0.7}
1
a100_80gb
70000263e33cfec6319397dff2c2b3a31a09029d6097e6717277fdfa4e077f0f
"2024-12-22T12:31:09.683000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.343983
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.42, 'score_ci_high': 0.62}
1
a100_80gb
15b15f8ef4670e7388d2c574a34da9be1ae00e6167acb1540b4d475def8c1326
"2024-12-22T12:31:12.837000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.387291
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.49, 'score_ci_high': 0.68}
1
a100_80gb
2fbdd968ff41467b6602f7fa73d6540210a0f73e8b6f02d514ee7b37985061eb
"2024-12-22T12:31:16.329000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_numbers_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.722497
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.46, 'score_ci_high': 0.65}
1
a100_80gb
84af3c19324ad36f641f1d767595f6c5144d2ab31b971fde153030efe15cfab8
"2024-12-22T17:59:16.280000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.322273
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.16938389001772242, 'accuracy_ci_high': 0.34006359259419927, 'score_ci_low': 0.16938389001772242, 'score_ci_high': 0.34006359259419927}
1
a100_80gb
2b3db8a2cc5a7c723deb06aec20c933e426dc7a10ad3ec60f665f9be792ac00b
"2024-12-22T17:59:22.346000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.554165
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.23, 'score': 0.23, 'score_name': 'accuracy', 'accuracy_ci_low': 0.1537221796898291, 'accuracy_ci_high': 0.32070099643879074, 'score_ci_low': 0.1537221796898291, 'score_ci_high': 0.32070099643879074}
1
a100_80gb
73fd8992bd6f0bb39a71b42edbe592f8a33ed0a9bc9bdc53ded54248e986acb0
"2024-12-22T17:59:27.801000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.96106
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.31, 'score': 0.31, 'score_name': 'accuracy', 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.41, 'score_ci_low': 0.23, 'score_ci_high': 0.41}
1
a100_80gb
691362ede3f33a0cb1726403edc3807dd76f9a5a5bad8e9f5b1d8b5e1b5f4ab4
"2024-12-22T17:59:34.099000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.817065
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.23, 'score': 0.23, 'score_name': 'accuracy', 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.321953775829212, 'score_ci_low': 0.16, 'score_ci_high': 0.321953775829212}
1
a100_80gb
5d75301dd424716dbfada4751c084b5a3ca8978b5b4b00a612db387058de8311
"2024-12-22T17:59:40.694000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.085324
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.29, 'score': 0.29, 'score_name': 'accuracy', 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.38, 'score_ci_low': 0.2, 'score_ci_high': 0.38}
1
a100_80gb
8fdde2dab5886126ce5d1087525cad1bcdb159b66a5ccbe3d2d9c4313ae90e32
"2024-12-22T17:59:45.579000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.33243
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.26, 'score': 0.26, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.35, 'score_ci_low': 0.17, 'score_ci_high': 0.35}
1
a100_80gb
371763971f45aa9e937cd4c923989c816195ec8abb8a16aef6b04c8cb756cc28
"2024-12-22T17:59:50.989000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.909424
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.25, 'score': 0.25, 'score_name': 'accuracy', 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_ci_low': 0.17, 'score_ci_high': 0.34}
1
a100_80gb
5a9fdc87a94684f5e1434e0670893a8f2a7fadd72b9de5d90a7374b273e65afe
"2024-12-22T17:59:58.761000Z"
meta-llama/Llama-3.2-1B-Instruct
card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.2869
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-1B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.22, 'score': 0.22, 'score_name': 'accuracy', 'accuracy_ci_low': 0.14, 'accuracy_ci_high': 0.31, 'score_ci_low': 0.14, 'score_ci_high': 0.31}
1
a100_80gb