eduagarcia commited on
Commit
ba893b3
·
1 Parent(s): a20a999

Revert "Deleted 39 reapeated models"

Browse files

This reverts commit a20a999bb1e93ee61ed152e2cbda2b4a967cb5ce.

Files changed (38) hide show
  1. AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original.json +17 -0
  2. AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_float16_Original.json +17 -0
  3. BAAI/Gemma2-9B-IT-Simpo-Infinity-Preference_eval_request_False_bfloat16_Original.json +17 -0
  4. BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B_eval_request_False_bfloat16_Original.json +17 -0
  5. Magpie-Align/MagpieLM-8B-Chat-v0.1_eval_request_False_bfloat16_Original.json +17 -0
  6. Magpie-Align/MagpieLM-8B-SFT-v0.1_eval_request_False_bfloat16_Original.json +17 -0
  7. Qwen/Qwen2-57B-A14B-Instruct_eval_request_False_bfloat16_Original.json +17 -0
  8. Qwen/Qwen2-57B-A14B-Instruct_eval_request_False_float16_Original.json +17 -0
  9. Qwen/Qwen2.5-14B-Instruct_eval_request_502e5d8_False_bfloat16_Original.json +17 -0
  10. Qwen/Qwen2.5-14B_eval_request_False_bfloat16_Original.json +17 -0
  11. Qwen/Qwen2.5-7B-Instruct_eval_request_False_bfloat16_Original.json +17 -0
  12. Qwen/Qwen2.5-7B_eval_request_False_bfloat16_Original.json +17 -0
  13. Qwen/Qwen2.5-Coder-7B_eval_request_False_bfloat16_Original.json +17 -0
  14. TheDrummer/Big-Tiger-Gemma-27B-v1_eval_request_False_bfloat16_Original.json +17 -0
  15. anthracite-org/magnum-v1-72b_eval_request_False_bfloat16_Original.json +17 -0
  16. anthracite-org/magnum-v1-72b_eval_request_False_float16_Original.json +17 -0
  17. anthracite-org/magnum-v2.5-12b-kto_eval_request_False_float16_Original.json +17 -0
  18. byroneverson/gemma-2-27b-it-abliterated_eval_request_False_bfloat16_Original.json +17 -0
  19. chargoddard/internlm2-base-7b-llama_eval_request_False_bfloat16_Original.json +17 -0
  20. cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_False_bfloat16_Original.json +17 -0
  21. cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json +17 -0
  22. cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_626c825_False_bfloat16_Original.json +17 -0
  23. cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original.json +17 -0
  24. cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_9ad9d14_False_bfloat16_Original.json +17 -0
  25. cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_False_bfloat16_Original.json +17 -0
  26. mattshumer/Reflection-Llama-3.1-70B_eval_request_False_bfloat16_Original.json +17 -0
  27. mattshumer/Reflection-Llama-3.1-70B_eval_request_False_float16_Original.json +17 -0
  28. migtissera/Tess-v2.5-Gemma-2-27B-alpha_eval_request_False_bfloat16_Original.json +17 -0
  29. paloalma/ECE-TW3-JRGL-V1_eval_request_2f08c7a_False_bfloat16_Original.json +17 -0
  30. paloalma/ECE-TW3-JRGL-V1_eval_request_2f08c7a_False_float16_Original.json +17 -0
  31. paloalma/ECE-TW3-JRGL-V1_eval_request_False_float16_Original.json +17 -0
  32. paloalma/ECE-TW3-JRGL-V1_eval_request_f1916d0_False_float16_Original.json +17 -0
  33. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2_eval_request_9ac0fbe_False_bfloat16_Original.json +17 -0
  34. princeton-nlp/gemma-2-9b-it-DPO_eval_request_False_bfloat16_Original.json +17 -0
  35. recoilme/recoilme-gemma-2-9B-v0.3_eval_request_False_bfloat16_Original.json +17 -0
  36. recoilme/recoilme-gemma-2-9B-v0.4_eval_request_False_bfloat16_Original.json +17 -0
  37. v000000/Qwen2.5-Lumen-14B_eval_request_False_bfloat16_Original.json +17 -0
  38. vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json +17 -0
AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AALF/gemma-2-27b-it-SimPO-37K",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 27.227,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-08-29T19:24:31Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1061,
16
+ "job_start_time": "2024-09-09T02-16-20.601975"
17
+ }
AALF/gemma-2-27b-it-SimPO-37K_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AALF/gemma-2-27b-it-SimPO-37K",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 27.227,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-05T20:33:39Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1058,
16
+ "job_start_time": "2024-09-09T02-07-11.545200"
17
+ }
BAAI/Gemma2-9B-IT-Simpo-Infinity-Preference_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "BAAI/Gemma2-9B-IT-Simpo-Infinity-Preference",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 9.242,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-02T16:32:57Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1050,
16
+ "job_start_time": "2024-09-09T01-43-17.349330"
17
+ }
BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 0.003,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-07-18T22:23:09Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 922,
16
+ "job_start_time": "2024-07-19T01-31-48.200936"
17
+ }
Magpie-Align/MagpieLM-8B-Chat-v0.1_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Magpie-Align/MagpieLM-8B-Chat-v0.1",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 8.03,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-19T04:37:59Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1112,
16
+ "job_start_time": "2024-09-23T09-07-25.125234"
17
+ }
Magpie-Align/MagpieLM-8B-SFT-v0.1_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Magpie-Align/MagpieLM-8B-SFT-v0.1",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 8.03,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-19T04:36:41Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1111,
16
+ "job_start_time": "2024-09-23T09-03-42.325574"
17
+ }
Qwen/Qwen2-57B-A14B-Instruct_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2-57B-A14B-Instruct",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 57.409,
8
+ "architectures": "Qwen2MoeForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-06-08T03:07:23Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
Qwen/Qwen2-57B-A14B-Instruct_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2-57B-A14B-Instruct",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 57.409,
8
+ "architectures": "Qwen2MoeForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Chinese",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-06-08T12:06:46Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 876,
16
+ "job_start_time": "2024-07-06T03-19-18.813912"
17
+ }
Qwen/Qwen2.5-14B-Instruct_eval_request_502e5d8_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-14B-Instruct",
3
+ "base_model": "",
4
+ "revision": "502e5d8bfd665ed113fd9b3626445ca7b0596303",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 14.77,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-10-15T23:06:21Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1215,
16
+ "job_start_time": "2024-10-16T01-39-04.529325"
17
+ }
Qwen/Qwen2.5-14B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-14B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 14.77,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-18T22:09:13Z",
13
+ "model_type": "🟢 : pretrained",
14
+ "source": "leaderboard",
15
+ "job_id": 1109,
16
+ "job_start_time": "2024-09-23T08-56-25.014044"
17
+ }
Qwen/Qwen2.5-7B-Instruct_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-7B-Instruct",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.616,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-18T22:08:38Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1108,
16
+ "job_start_time": "2024-09-23T08-52-49.584127"
17
+ }
Qwen/Qwen2.5-7B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-7B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.616,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-18T22:09:22Z",
13
+ "model_type": "🟢 : pretrained",
14
+ "source": "leaderboard",
15
+ "job_id": 1110,
16
+ "job_start_time": "2024-09-23T09-00-01.154915"
17
+ }
Qwen/Qwen2.5-Coder-7B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-Coder-7B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.616,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-10-13T05:10:23Z",
13
+ "model_type": "🟢 : pretrained",
14
+ "source": "leaderboard",
15
+ "job_id": 1204,
16
+ "job_start_time": "2024-10-13T08-56-26.974073"
17
+ }
TheDrummer/Big-Tiger-Gemma-27B-v1_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "TheDrummer/Big-Tiger-Gemma-27B-v1",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 27.227,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-05T20:25:36Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1056,
16
+ "job_start_time": "2024-09-09T02-01-14.321515"
17
+ }
anthracite-org/magnum-v1-72b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "anthracite-org/magnum-v1-72b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 72.706,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-09-21T18:03:09Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
anthracite-org/magnum-v1-72b_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "anthracite-org/magnum-v1-72b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 72.706,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-09-05T21:44:11Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
anthracite-org/magnum-v2.5-12b-kto_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "anthracite-org/magnum-v2.5-12b-kto",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 12.248,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-08-22T18:39:50Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1060,
16
+ "job_start_time": "2024-09-09T02-13-17.352675"
17
+ }
byroneverson/gemma-2-27b-it-abliterated_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "byroneverson/gemma-2-27b-it-abliterated",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 27.227,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-05T20:20:45Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1054,
16
+ "job_start_time": "2024-09-09T01-55-12.781709"
17
+ }
chargoddard/internlm2-base-7b-llama_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "chargoddard/internlm2-base-7b-llama",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.738,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Chinese",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-04-21T18:04:47Z",
13
+ "model_type": "🟢 : pretrained",
14
+ "source": "leaderboard",
15
+ "job_id": 533,
16
+ "job_start_time": "2024-04-22T11-06-43.377245"
17
+ }
cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-06-17T07:12:51Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 902,
16
+ "job_start_time": "2024-07-09T02-53-19.032021"
17
+ }
cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "d099b57c21184bb51317ff4c150f284d9e59d4fd",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-06-13T18:30:14Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 901,
16
+ "job_start_time": "2024-07-09T02-27-51.604980"
17
+ }
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_626c825_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.7-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "626c8252e4fd574a9aee4c5f0590529a59412345",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-29T06:49:40Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 899,
16
+ "job_start_time": "2024-07-09T02-21-14.129252"
17
+ }
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.7-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "628c376aa32b0828ab90e0c3013b98f059533437",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-31T08:44:12Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 900,
16
+ "job_start_time": "2024-07-09T02-24-32.164637"
17
+ }
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_9ad9d14_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.7-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "9ad9d14e7ffae62b6ae4458035b6d9f165c212ee",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-25T06:10:49Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 898,
16
+ "job_start_time": "2024-07-09T01-56-45.222276"
17
+ }
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "cognitivecomputations/dolphin-2.7-mixtral-8x7b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 7.0,
8
+ "architectures": "MixtralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-13T16:00:21Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 897,
16
+ "job_start_time": "2024-07-09T01-31-58.297976"
17
+ }
mattshumer/Reflection-Llama-3.1-70B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "mattshumer/Reflection-Llama-3.1-70B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 70.554,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-09-11T06:57:38Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
mattshumer/Reflection-Llama-3.1-70B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "mattshumer/Reflection-Llama-3.1-70B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 70.554,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-09-06T17:31:23Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
migtissera/Tess-v2.5-Gemma-2-27B-alpha_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "migtissera/Tess-v2.5-Gemma-2-27B-alpha",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 27.0,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-05T20:23:26Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1055,
16
+ "job_start_time": "2024-09-09T01-58-12.573627"
17
+ }
paloalma/ECE-TW3-JRGL-V1_eval_request_2f08c7a_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "paloalma/ECE-TW3-JRGL-V1",
3
+ "base_model": "",
4
+ "revision": "2f08c7ab9db03b1b9f455c7beee6a41e99aa910e",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 68.977,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-28T03:51:50Z",
13
+ "model_type": "🤝 : base merges and moerges",
14
+ "source": "leaderboard",
15
+ "job_id": 888,
16
+ "job_start_time": "2024-07-06T16-36-30.068182"
17
+ }
paloalma/ECE-TW3-JRGL-V1_eval_request_2f08c7a_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "paloalma/ECE-TW3-JRGL-V1",
3
+ "base_model": "",
4
+ "revision": "2f08c7ab9db03b1b9f455c7beee6a41e99aa910e",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 68.977,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-05-28T03:52:27Z",
13
+ "model_type": "🤝 : base merges and moerges",
14
+ "source": "leaderboard",
15
+ "job_id": 889,
16
+ "job_start_time": "2024-07-06T18-23-51.839608"
17
+ }
paloalma/ECE-TW3-JRGL-V1_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "paloalma/ECE-TW3-JRGL-V1",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 68.977,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-04-26T08:12:30Z",
13
+ "model_type": "🤝 : base merges and moerges",
14
+ "source": "leaderboard",
15
+ "job_id": 618,
16
+ "job_start_time": "2024-05-16T11-58-38.457588"
17
+ }
paloalma/ECE-TW3-JRGL-V1_eval_request_f1916d0_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "paloalma/ECE-TW3-JRGL-V1",
3
+ "base_model": "",
4
+ "revision": "f1916d0913e1ae5b9a83d9fa24bddd4350795920",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 68.977,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "PENDING",
12
+ "submitted_time": "2024-05-28T15:45:07Z",
13
+ "model_type": "🤝 : base merges and moerges",
14
+ "source": "leaderboard",
15
+ "job_id": -1,
16
+ "job_start_time": null
17
+ }
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2_eval_request_9ac0fbe_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2",
3
+ "base_model": "",
4
+ "revision": "9ac0fbee445e7755e50520e9881d67588b4b854c",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 8.03,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-21T04:19:04Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1339,
16
+ "job_start_time": "2024-12-07T03-56-57.634393"
17
+ }
princeton-nlp/gemma-2-9b-it-DPO_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "princeton-nlp/gemma-2-9b-it-DPO",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 9.242,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-19T15:24:11Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1114,
16
+ "job_start_time": "2024-09-23T09-14-45.604278"
17
+ }
recoilme/recoilme-gemma-2-9B-v0.3_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "recoilme/recoilme-gemma-2-9B-v0.3",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 10.159,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-19T15:29:39Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1116,
16
+ "job_start_time": "2024-09-23T09-22-04.155178"
17
+ }
recoilme/recoilme-gemma-2-9B-v0.4_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "recoilme/recoilme-gemma-2-9B-v0.4",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 10.159,
8
+ "architectures": "Gemma2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-19T15:29:45Z",
13
+ "model_type": "🔶 : fine-tuned/fp on domain-specific datasets",
14
+ "source": "leaderboard",
15
+ "job_id": 1117,
16
+ "job_start_time": "2024-09-23T09-25-45.091565"
17
+ }
v000000/Qwen2.5-Lumen-14B_eval_request_False_bfloat16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "v000000/Qwen2.5-Lumen-14B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "bfloat16",
7
+ "params": 14.77,
8
+ "architectures": "Qwen2ForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-09-20T18:11:48Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 1122,
16
+ "job_start_time": "2024-09-23T09-43-32.674090"
17
+ }
vicgalle/ConfigurableSOLAR-10.7B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "vicgalle/ConfigurableSOLAR-10.7B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 10.732,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "English",
11
+ "status": "RERUN",
12
+ "submitted_time": "2024-06-12T19:45:05Z",
13
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 825,
16
+ "job_start_time": "2024-06-16T07-13-41.656977"
17
+ }