morriszms commited on
Commit
f409210
1 Parent(s): 59eaa9d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Yi-34B-200K-AEZAKMI-v2-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Yi-34B-200K-AEZAKMI-v2-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Yi-34B-200K-AEZAKMI-v2-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Yi-34B-200K-AEZAKMI-v2-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Yi-34B-200K-AEZAKMI-v2-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Yi-34B-200K-AEZAKMI-v2-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Yi-34B-200K-AEZAKMI-v2-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Yi-34B-200K-AEZAKMI-v2-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Yi-34B-200K-AEZAKMI-v2-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Yi-34B-200K-AEZAKMI-v2-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Yi-34B-200K-AEZAKMI-v2-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Yi-34B-200K-AEZAKMI-v2-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - llm
5
+ - fine-tune
6
+ - yi
7
+ - TensorBlock
8
+ - GGUF
9
+ datasets:
10
+ - adamo1139/AEZAKMI_v2
11
+ license_name: yi-license
12
+ license_link: LICENSE
13
+ base_model: adamo1139/Yi-34B-200K-AEZAKMI-v2
14
+ model-index:
15
+ - name: Yi-34B-200K-AEZAKMI-v2
16
+ results:
17
+ - task:
18
+ type: text-generation
19
+ name: Text Generation
20
+ dataset:
21
+ name: AI2 Reasoning Challenge (25-Shot)
22
+ type: ai2_arc
23
+ config: ARC-Challenge
24
+ split: test
25
+ args:
26
+ num_few_shot: 25
27
+ metrics:
28
+ - type: acc_norm
29
+ value: 67.92
30
+ name: normalized accuracy
31
+ source:
32
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
33
+ name: Open LLM Leaderboard
34
+ - task:
35
+ type: text-generation
36
+ name: Text Generation
37
+ dataset:
38
+ name: HellaSwag (10-Shot)
39
+ type: hellaswag
40
+ split: validation
41
+ args:
42
+ num_few_shot: 10
43
+ metrics:
44
+ - type: acc_norm
45
+ value: 85.61
46
+ name: normalized accuracy
47
+ source:
48
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
49
+ name: Open LLM Leaderboard
50
+ - task:
51
+ type: text-generation
52
+ name: Text Generation
53
+ dataset:
54
+ name: MMLU (5-Shot)
55
+ type: cais/mmlu
56
+ config: all
57
+ split: test
58
+ args:
59
+ num_few_shot: 5
60
+ metrics:
61
+ - type: acc
62
+ value: 75.22
63
+ name: accuracy
64
+ source:
65
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
66
+ name: Open LLM Leaderboard
67
+ - task:
68
+ type: text-generation
69
+ name: Text Generation
70
+ dataset:
71
+ name: TruthfulQA (0-shot)
72
+ type: truthful_qa
73
+ config: multiple_choice
74
+ split: validation
75
+ args:
76
+ num_few_shot: 0
77
+ metrics:
78
+ - type: mc2
79
+ value: 56.74
80
+ source:
81
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
82
+ name: Open LLM Leaderboard
83
+ - task:
84
+ type: text-generation
85
+ name: Text Generation
86
+ dataset:
87
+ name: Winogrande (5-shot)
88
+ type: winogrande
89
+ config: winogrande_xl
90
+ split: validation
91
+ args:
92
+ num_few_shot: 5
93
+ metrics:
94
+ - type: acc
95
+ value: 81.61
96
+ name: accuracy
97
+ source:
98
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
99
+ name: Open LLM Leaderboard
100
+ - task:
101
+ type: text-generation
102
+ name: Text Generation
103
+ dataset:
104
+ name: GSM8k (5-shot)
105
+ type: gsm8k
106
+ config: main
107
+ split: test
108
+ args:
109
+ num_few_shot: 5
110
+ metrics:
111
+ - type: acc
112
+ value: 58.91
113
+ name: accuracy
114
+ source:
115
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
116
+ name: Open LLM Leaderboard
117
+ - task:
118
+ type: text-generation
119
+ name: Text Generation
120
+ dataset:
121
+ name: IFEval (0-Shot)
122
+ type: HuggingFaceH4/ifeval
123
+ args:
124
+ num_few_shot: 0
125
+ metrics:
126
+ - type: inst_level_strict_acc and prompt_level_strict_acc
127
+ value: 45.55
128
+ name: strict accuracy
129
+ source:
130
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
131
+ name: Open LLM Leaderboard
132
+ - task:
133
+ type: text-generation
134
+ name: Text Generation
135
+ dataset:
136
+ name: BBH (3-Shot)
137
+ type: BBH
138
+ args:
139
+ num_few_shot: 3
140
+ metrics:
141
+ - type: acc_norm
142
+ value: 35.28
143
+ name: normalized accuracy
144
+ source:
145
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
146
+ name: Open LLM Leaderboard
147
+ - task:
148
+ type: text-generation
149
+ name: Text Generation
150
+ dataset:
151
+ name: MATH Lvl 5 (4-Shot)
152
+ type: hendrycks/competition_math
153
+ args:
154
+ num_few_shot: 4
155
+ metrics:
156
+ - type: exact_match
157
+ value: 4.83
158
+ name: exact match
159
+ source:
160
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
161
+ name: Open LLM Leaderboard
162
+ - task:
163
+ type: text-generation
164
+ name: Text Generation
165
+ dataset:
166
+ name: GPQA (0-shot)
167
+ type: Idavidrein/gpqa
168
+ args:
169
+ num_few_shot: 0
170
+ metrics:
171
+ - type: acc_norm
172
+ value: 10.96
173
+ name: acc_norm
174
+ source:
175
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
176
+ name: Open LLM Leaderboard
177
+ - task:
178
+ type: text-generation
179
+ name: Text Generation
180
+ dataset:
181
+ name: MuSR (0-shot)
182
+ type: TAUR-Lab/MuSR
183
+ args:
184
+ num_few_shot: 0
185
+ metrics:
186
+ - type: acc_norm
187
+ value: 6.48
188
+ name: acc_norm
189
+ source:
190
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
191
+ name: Open LLM Leaderboard
192
+ - task:
193
+ type: text-generation
194
+ name: Text Generation
195
+ dataset:
196
+ name: MMLU-PRO (5-shot)
197
+ type: TIGER-Lab/MMLU-Pro
198
+ config: main
199
+ split: test
200
+ args:
201
+ num_few_shot: 5
202
+ metrics:
203
+ - type: acc
204
+ value: 39.03
205
+ name: accuracy
206
+ source:
207
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=adamo1139/Yi-34B-200K-AEZAKMI-v2
208
+ name: Open LLM Leaderboard
209
+ ---
210
+
211
+ <div style="width: auto; margin-left: auto; margin-right: auto">
212
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
213
+ </div>
214
+ <div style="display: flex; justify-content: space-between; width: 100%;">
215
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
216
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
217
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
218
+ </p>
219
+ </div>
220
+ </div>
221
+
222
+ ## adamo1139/Yi-34B-200K-AEZAKMI-v2 - GGUF
223
+
224
+ This repo contains GGUF format model files for [adamo1139/Yi-34B-200K-AEZAKMI-v2](https://huggingface.co/adamo1139/Yi-34B-200K-AEZAKMI-v2).
225
+
226
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
227
+
228
+ <div style="text-align: left; margin: 20px 0;">
229
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
230
+ Run them on the TensorBlock client using your local machine ↗
231
+ </a>
232
+ </div>
233
+
234
+ ## Prompt template
235
+
236
+ ```
237
+ <|im_start|>system
238
+ {system_prompt}<|im_end|>
239
+ <|im_start|>user
240
+ {prompt}<|im_end|>
241
+ <|im_start|>assistant
242
+ ```
243
+
244
+ ## Model file specification
245
+
246
+ | Filename | Quant type | File Size | Description |
247
+ | -------- | ---------- | --------- | ----------- |
248
+ | [Yi-34B-200K-AEZAKMI-v2-Q2_K.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q2_K.gguf) | Q2_K | 12.825 GB | smallest, significant quality loss - not recommended for most purposes |
249
+ | [Yi-34B-200K-AEZAKMI-v2-Q3_K_S.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q3_K_S.gguf) | Q3_K_S | 14.960 GB | very small, high quality loss |
250
+ | [Yi-34B-200K-AEZAKMI-v2-Q3_K_M.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q3_K_M.gguf) | Q3_K_M | 16.655 GB | very small, high quality loss |
251
+ | [Yi-34B-200K-AEZAKMI-v2-Q3_K_L.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q3_K_L.gguf) | Q3_K_L | 18.139 GB | small, substantial quality loss |
252
+ | [Yi-34B-200K-AEZAKMI-v2-Q4_0.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q4_0.gguf) | Q4_0 | 19.467 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
253
+ | [Yi-34B-200K-AEZAKMI-v2-Q4_K_S.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q4_K_S.gguf) | Q4_K_S | 19.599 GB | small, greater quality loss |
254
+ | [Yi-34B-200K-AEZAKMI-v2-Q4_K_M.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q4_K_M.gguf) | Q4_K_M | 20.659 GB | medium, balanced quality - recommended |
255
+ | [Yi-34B-200K-AEZAKMI-v2-Q5_0.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q5_0.gguf) | Q5_0 | 23.708 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
256
+ | [Yi-34B-200K-AEZAKMI-v2-Q5_K_S.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q5_K_S.gguf) | Q5_K_S | 23.708 GB | large, low quality loss - recommended |
257
+ | [Yi-34B-200K-AEZAKMI-v2-Q5_K_M.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q5_K_M.gguf) | Q5_K_M | 24.322 GB | large, very low quality loss - recommended |
258
+ | [Yi-34B-200K-AEZAKMI-v2-Q6_K.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q6_K.gguf) | Q6_K | 28.214 GB | very large, extremely low quality loss |
259
+ | [Yi-34B-200K-AEZAKMI-v2-Q8_0.gguf](https://huggingface.co/tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF/blob/main/Yi-34B-200K-AEZAKMI-v2-Q8_0.gguf) | Q8_0 | 36.542 GB | very large, extremely low quality loss - not recommended |
260
+
261
+
262
+ ## Downloading instruction
263
+
264
+ ### Command line
265
+
266
+ Firstly, install Huggingface Client
267
+
268
+ ```shell
269
+ pip install -U "huggingface_hub[cli]"
270
+ ```
271
+
272
+ Then, downoad the individual model file the a local directory
273
+
274
+ ```shell
275
+ huggingface-cli download tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF --include "Yi-34B-200K-AEZAKMI-v2-Q2_K.gguf" --local-dir MY_LOCAL_DIR
276
+ ```
277
+
278
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
279
+
280
+ ```shell
281
+ huggingface-cli download tensorblock/Yi-34B-200K-AEZAKMI-v2-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
282
+ ```
Yi-34B-200K-AEZAKMI-v2-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:809068996e1541e3ce6a4c6063845687297e2758a0c0f9454ce927aab636272e
3
+ size 12825234560
Yi-34B-200K-AEZAKMI-v2-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81eae9bf0a9a186f270de8292e80c686efbf6e62b3695090ca53d5c21a3ff5e5
3
+ size 18139446400
Yi-34B-200K-AEZAKMI-v2-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ee41b410aa377c689edc37cf2a1b71dc467fc887c99f42a1355e791dc19eb69
3
+ size 16654924928
Yi-34B-200K-AEZAKMI-v2-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87b7ee9c01cc8fce901cf7efe3d85ddd921f2deaddaa1b992eae842887cd44a1
3
+ size 14960295040
Yi-34B-200K-AEZAKMI-v2-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:269772f2473dd0dca0e16dae3c8e9714cb4939f6df02b1231e4d1ed17974b554
3
+ size 19466529920
Yi-34B-200K-AEZAKMI-v2-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3866751b65d28ec6ea2b5db0d8aba2b4f91324f058ab2b12bf8b581f807b9f6
3
+ size 20658711680
Yi-34B-200K-AEZAKMI-v2-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6fcc39fdcf917aed5854caa5a6ed1e68fca1f697c543b4491cb4350ecea3235
3
+ size 19598650496
Yi-34B-200K-AEZAKMI-v2-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d4c43eef94bca68b6e9530f36145982aae5bb950b77db4521a12fcf850c690
3
+ size 23707692160
Yi-34B-200K-AEZAKMI-v2-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a73d46531effa6ec9d6bdf01ac05651c24eaa5097205646931da2142aa3cd6b
3
+ size 24321846400
Yi-34B-200K-AEZAKMI-v2-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db73140de60be1f4e48f5e27408551d5d4d8b545b0780cd877f3810ca17d44bb
3
+ size 23707692160
Yi-34B-200K-AEZAKMI-v2-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:916429d4580c3e9cdc0e7d8b11a93f0c5c0bb4588e4edf8c67266dbb4eaa802b
3
+ size 28213927040
Yi-34B-200K-AEZAKMI-v2-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:479bafa8d7d52fec14397d8e800da134e19de04d3a4b059db3064597be3484d9
3
+ size 36542282880