Files changed (1) hide show
  1. README.md +139 -34
README.md CHANGED
@@ -1,12 +1,43 @@
1
  ---
2
  language:
3
  - en
4
- pipeline_tag: text-generation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  base_model:
6
  - meta-llama/Llama-3.1-8B-Instruct
7
  - ValiantLabs/Llama3.1-8B-ShiningValiant2
8
  - ValiantLabs/Llama3.1-8B-Cobalt
9
- library_name: transformers
10
  model_type: llama
11
  model-index:
12
  - name: sequelbox/Llama3.1-8B-PlumMath
@@ -35,38 +66,98 @@ model-index:
35
  - type: acc
36
  value: 40.27
37
  name: acc
38
- tags:
39
- - mergekit
40
- - merge
41
- - shining-valiant
42
- - shining-valiant-2
43
- - cobalt
44
- - plum
45
- - valiant
46
- - valiant-labs
47
- - llama
48
- - llama-3.1
49
- - llama-3.1-instruct
50
- - llama-3.1-instruct-8b
51
- - llama-3
52
- - llama-3-instruct
53
- - llama-3-instruct-8b
54
- - 8b
55
- - math
56
- - math-instruct
57
- - science
58
- - physics
59
- - biology
60
- - chemistry
61
- - compsci
62
- - computer-science
63
- - engineering
64
- - technical
65
- - conversational
66
- - chat
67
- - instruct
68
- license: llama3.1
69
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  ---
71
  # PlumMath
72
 
@@ -103,3 +194,17 @@ models:
103
  weight: 0.2
104
  base_model: meta-llama/Llama-3.1-8B-Instruct
105
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  language:
3
  - en
4
+ license: llama3.1
5
+ library_name: transformers
6
+ tags:
7
+ - mergekit
8
+ - merge
9
+ - shining-valiant
10
+ - shining-valiant-2
11
+ - cobalt
12
+ - plum
13
+ - valiant
14
+ - valiant-labs
15
+ - llama
16
+ - llama-3.1
17
+ - llama-3.1-instruct
18
+ - llama-3.1-instruct-8b
19
+ - llama-3
20
+ - llama-3-instruct
21
+ - llama-3-instruct-8b
22
+ - 8b
23
+ - math
24
+ - math-instruct
25
+ - science
26
+ - physics
27
+ - biology
28
+ - chemistry
29
+ - compsci
30
+ - computer-science
31
+ - engineering
32
+ - technical
33
+ - conversational
34
+ - chat
35
+ - instruct
36
  base_model:
37
  - meta-llama/Llama-3.1-8B-Instruct
38
  - ValiantLabs/Llama3.1-8B-ShiningValiant2
39
  - ValiantLabs/Llama3.1-8B-Cobalt
40
+ pipeline_tag: text-generation
41
  model_type: llama
42
  model-index:
43
  - name: sequelbox/Llama3.1-8B-PlumMath
 
66
  - type: acc
67
  value: 40.27
68
  name: acc
69
+ - task:
70
+ type: text-generation
71
+ name: Text Generation
72
+ dataset:
73
+ name: IFEval (0-Shot)
74
+ type: HuggingFaceH4/ifeval
75
+ args:
76
+ num_few_shot: 0
77
+ metrics:
78
+ - type: inst_level_strict_acc and prompt_level_strict_acc
79
+ value: 22.42
80
+ name: strict accuracy
81
+ source:
82
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
83
+ name: Open LLM Leaderboard
84
+ - task:
85
+ type: text-generation
86
+ name: Text Generation
87
+ dataset:
88
+ name: BBH (3-Shot)
89
+ type: BBH
90
+ args:
91
+ num_few_shot: 3
92
+ metrics:
93
+ - type: acc_norm
94
+ value: 16.45
95
+ name: normalized accuracy
96
+ source:
97
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
98
+ name: Open LLM Leaderboard
99
+ - task:
100
+ type: text-generation
101
+ name: Text Generation
102
+ dataset:
103
+ name: MATH Lvl 5 (4-Shot)
104
+ type: hendrycks/competition_math
105
+ args:
106
+ num_few_shot: 4
107
+ metrics:
108
+ - type: exact_match
109
+ value: 3.93
110
+ name: exact match
111
+ source:
112
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
113
+ name: Open LLM Leaderboard
114
+ - task:
115
+ type: text-generation
116
+ name: Text Generation
117
+ dataset:
118
+ name: GPQA (0-shot)
119
+ type: Idavidrein/gpqa
120
+ args:
121
+ num_few_shot: 0
122
+ metrics:
123
+ - type: acc_norm
124
+ value: 9.06
125
+ name: acc_norm
126
+ source:
127
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
128
+ name: Open LLM Leaderboard
129
+ - task:
130
+ type: text-generation
131
+ name: Text Generation
132
+ dataset:
133
+ name: MuSR (0-shot)
134
+ type: TAUR-Lab/MuSR
135
+ args:
136
+ num_few_shot: 0
137
+ metrics:
138
+ - type: acc_norm
139
+ value: 8.98
140
+ name: acc_norm
141
+ source:
142
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
143
+ name: Open LLM Leaderboard
144
+ - task:
145
+ type: text-generation
146
+ name: Text Generation
147
+ dataset:
148
+ name: MMLU-PRO (5-shot)
149
+ type: TIGER-Lab/MMLU-Pro
150
+ config: main
151
+ split: test
152
+ args:
153
+ num_few_shot: 5
154
+ metrics:
155
+ - type: acc
156
+ value: 21.95
157
+ name: accuracy
158
+ source:
159
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=sequelbox/Llama3.1-8B-PlumMath
160
+ name: Open LLM Leaderboard
161
  ---
162
  # PlumMath
163
 
 
194
  weight: 0.2
195
  base_model: meta-llama/Llama-3.1-8B-Instruct
196
  ```
197
+
198
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
199
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_sequelbox__Llama3.1-8B-PlumMath)
200
+
201
+ | Metric |Value|
202
+ |-------------------|----:|
203
+ |Avg. |13.80|
204
+ |IFEval (0-Shot) |22.42|
205
+ |BBH (3-Shot) |16.45|
206
+ |MATH Lvl 5 (4-Shot)| 3.93|
207
+ |GPQA (0-shot) | 9.06|
208
+ |MuSR (0-shot) | 8.98|
209
+ |MMLU-PRO (5-shot) |21.95|
210
+