morriszms commited on
Commit
fcd8e14
1 Parent(s): c1702f8

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ calme-2.2-qwen2-7b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ calme-2.2-qwen2-7b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ calme-2.2-qwen2-7b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ calme-2.2-qwen2-7b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ calme-2.2-qwen2-7b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ calme-2.2-qwen2-7b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ calme-2.2-qwen2-7b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ calme-2.2-qwen2-7b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ calme-2.2-qwen2-7b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ calme-2.2-qwen2-7b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ calme-2.2-qwen2-7b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ calme-2.2-qwen2-7b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ tags:
7
+ - chat
8
+ - qwen
9
+ - qwen2
10
+ - finetune
11
+ - chatml
12
+ - OpenHermes-2.5
13
+ - HelpSteer2
14
+ - Orca
15
+ - SlimOrca
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: MaziyarPanahi/calme-2.2-qwen2-7b
19
+ datasets:
20
+ - nvidia/HelpSteer2
21
+ - teknium/OpenHermes-2.5
22
+ - microsoft/orca-math-word-problems-200k
23
+ - Open-Orca/SlimOrca
24
+ pipeline_tag: text-generation
25
+ inference: false
26
+ model_creator: MaziyarPanahi
27
+ quantized_by: MaziyarPanahi
28
+ model-index:
29
+ - name: calme-2.2-qwen2-7b
30
+ results:
31
+ - task:
32
+ type: text-generation
33
+ name: Text Generation
34
+ dataset:
35
+ name: IFEval (0-Shot)
36
+ type: HuggingFaceH4/ifeval
37
+ args:
38
+ num_few_shot: 0
39
+ metrics:
40
+ - type: inst_level_strict_acc and prompt_level_strict_acc
41
+ value: 35.97
42
+ name: strict accuracy
43
+ source:
44
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
45
+ name: Open LLM Leaderboard
46
+ - task:
47
+ type: text-generation
48
+ name: Text Generation
49
+ dataset:
50
+ name: BBH (3-Shot)
51
+ type: BBH
52
+ args:
53
+ num_few_shot: 3
54
+ metrics:
55
+ - type: acc_norm
56
+ value: 33.11
57
+ name: normalized accuracy
58
+ source:
59
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
60
+ name: Open LLM Leaderboard
61
+ - task:
62
+ type: text-generation
63
+ name: Text Generation
64
+ dataset:
65
+ name: MATH Lvl 5 (4-Shot)
66
+ type: hendrycks/competition_math
67
+ args:
68
+ num_few_shot: 4
69
+ metrics:
70
+ - type: exact_match
71
+ value: 19.34
72
+ name: exact match
73
+ source:
74
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
75
+ name: Open LLM Leaderboard
76
+ - task:
77
+ type: text-generation
78
+ name: Text Generation
79
+ dataset:
80
+ name: GPQA (0-shot)
81
+ type: Idavidrein/gpqa
82
+ args:
83
+ num_few_shot: 0
84
+ metrics:
85
+ - type: acc_norm
86
+ value: 5.48
87
+ name: acc_norm
88
+ source:
89
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
90
+ name: Open LLM Leaderboard
91
+ - task:
92
+ type: text-generation
93
+ name: Text Generation
94
+ dataset:
95
+ name: MuSR (0-shot)
96
+ type: TAUR-Lab/MuSR
97
+ args:
98
+ num_few_shot: 0
99
+ metrics:
100
+ - type: acc_norm
101
+ value: 13.28
102
+ name: acc_norm
103
+ source:
104
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
105
+ name: Open LLM Leaderboard
106
+ - task:
107
+ type: text-generation
108
+ name: Text Generation
109
+ dataset:
110
+ name: MMLU-PRO (5-shot)
111
+ type: TIGER-Lab/MMLU-Pro
112
+ config: main
113
+ split: test
114
+ args:
115
+ num_few_shot: 5
116
+ metrics:
117
+ - type: acc
118
+ value: 32.21
119
+ name: accuracy
120
+ source:
121
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.2-qwen2-7b
122
+ name: Open LLM Leaderboard
123
+ ---
124
+
125
+ <div style="width: auto; margin-left: auto; margin-right: auto">
126
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
127
+ </div>
128
+ <div style="display: flex; justify-content: space-between; width: 100%;">
129
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
130
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
131
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
132
+ </p>
133
+ </div>
134
+ </div>
135
+
136
+ ## MaziyarPanahi/calme-2.2-qwen2-7b - GGUF
137
+
138
+ This repo contains GGUF format model files for [MaziyarPanahi/calme-2.2-qwen2-7b](https://huggingface.co/MaziyarPanahi/calme-2.2-qwen2-7b).
139
+
140
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
141
+
142
+ ## Prompt template
143
+
144
+ ```
145
+ <|im_start|>system
146
+ {system_prompt}<|im_end|>
147
+ <|im_start|>user
148
+ {prompt}<|im_end|>
149
+ <|im_start|>assistant
150
+ ```
151
+
152
+ ## Model file specification
153
+
154
+ | Filename | Quant type | File Size | Description |
155
+ | -------- | ---------- | --------- | ----------- |
156
+ | [calme-2.2-qwen2-7b-Q2_K.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q2_K.gguf) | Q2_K | 2.809 GB | smallest, significant quality loss - not recommended for most purposes |
157
+ | [calme-2.2-qwen2-7b-Q3_K_S.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q3_K_S.gguf) | Q3_K_S | 3.253 GB | very small, high quality loss |
158
+ | [calme-2.2-qwen2-7b-Q3_K_M.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q3_K_M.gguf) | Q3_K_M | 3.547 GB | very small, high quality loss |
159
+ | [calme-2.2-qwen2-7b-Q3_K_L.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q3_K_L.gguf) | Q3_K_L | 3.808 GB | small, substantial quality loss |
160
+ | [calme-2.2-qwen2-7b-Q4_0.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q4_0.gguf) | Q4_0 | 4.127 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
161
+ | [calme-2.2-qwen2-7b-Q4_K_S.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q4_K_S.gguf) | Q4_K_S | 4.152 GB | small, greater quality loss |
162
+ | [calme-2.2-qwen2-7b-Q4_K_M.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q4_K_M.gguf) | Q4_K_M | 4.361 GB | medium, balanced quality - recommended |
163
+ | [calme-2.2-qwen2-7b-Q5_0.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q5_0.gguf) | Q5_0 | 4.950 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
164
+ | [calme-2.2-qwen2-7b-Q5_K_S.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q5_K_S.gguf) | Q5_K_S | 4.950 GB | large, low quality loss - recommended |
165
+ | [calme-2.2-qwen2-7b-Q5_K_M.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q5_K_M.gguf) | Q5_K_M | 5.071 GB | large, very low quality loss - recommended |
166
+ | [calme-2.2-qwen2-7b-Q6_K.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q6_K.gguf) | Q6_K | 5.825 GB | very large, extremely low quality loss |
167
+ | [calme-2.2-qwen2-7b-Q8_0.gguf](https://huggingface.co/tensorblock/calme-2.2-qwen2-7b-GGUF/tree/main/calme-2.2-qwen2-7b-Q8_0.gguf) | Q8_0 | 7.542 GB | very large, extremely low quality loss - not recommended |
168
+
169
+
170
+ ## Downloading instruction
171
+
172
+ ### Command line
173
+
174
+ Firstly, install Huggingface Client
175
+
176
+ ```shell
177
+ pip install -U "huggingface_hub[cli]"
178
+ ```
179
+
180
+ Then, downoad the individual model file the a local directory
181
+
182
+ ```shell
183
+ huggingface-cli download tensorblock/calme-2.2-qwen2-7b-GGUF --include "calme-2.2-qwen2-7b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
184
+ ```
185
+
186
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
187
+
188
+ ```shell
189
+ huggingface-cli download tensorblock/calme-2.2-qwen2-7b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
190
+ ```
calme-2.2-qwen2-7b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1fbad3b0312c2feb419c6dbea868262b85a79d25fa568afaff669e2a9eb26c1
3
+ size 3015938368
calme-2.2-qwen2-7b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae46ec5d79743aa600bb26624a7aec43a4aa707b248583c47856788f926f39ef
3
+ size 4088457536
calme-2.2-qwen2-7b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f78dbab138cecab8c7c9949a7a1d00e96d5c710028738524015b97e749204592
3
+ size 3808389440
calme-2.2-qwen2-7b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5f00e9e44584221b009c946e2085bbd22221ce37165ae1a6135d18a40078ec
3
+ size 3492366656
calme-2.2-qwen2-7b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1e9de94644e8171e825419b8735c045c9d314ba8e5c4cdaa74ac089915199e3
3
+ size 4431388992
calme-2.2-qwen2-7b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:972841ff8cd95e83ff3a29ca1d3f9ca6c4b1ecb6be3429ed47ed9a8a0fe98733
3
+ size 4683071808
calme-2.2-qwen2-7b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1e1cfde4b571b0460f6412f9d690986318a594b4ea8c96862cf14d690361a4
3
+ size 4457767232
calme-2.2-qwen2-7b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d420cf60a060deca2acfa1cacaf519e0765bcf510390ac4425661729fcb5764
3
+ size 5315174720
calme-2.2-qwen2-7b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c9309119a06dd6d1ad2ed7bba01b613a55a3485f524fa5ab6a786714354e27
3
+ size 5444829504
calme-2.2-qwen2-7b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397dc5ef84a89727250785796626fda562a4a4cd66cee727f336299ddf836f48
3
+ size 5315174720
calme-2.2-qwen2-7b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca78de33b0f54f49c4e11cb17250bddce4c64c0451d6885a6693a33245f457ad
3
+ size 6254197056
calme-2.2-qwen2-7b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:486abd38af809a026c036d3f6fce213b0e8c0e47c2e4a77f9f4f5137098ccaaa
3
+ size 8098523456