morriszms commited on
Commit
ba008fc
1 Parent(s): 203f656

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ calme-2.4-qwen2-7b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ calme-2.4-qwen2-7b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ calme-2.4-qwen2-7b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ calme-2.4-qwen2-7b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ calme-2.4-qwen2-7b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ calme-2.4-qwen2-7b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ calme-2.4-qwen2-7b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ calme-2.4-qwen2-7b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ calme-2.4-qwen2-7b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ calme-2.4-qwen2-7b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ calme-2.4-qwen2-7b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ calme-2.4-qwen2-7b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ tags:
7
+ - chat
8
+ - qwen
9
+ - qwen2
10
+ - finetune
11
+ - chatml
12
+ - OpenHermes-2.5
13
+ - HelpSteer2
14
+ - Orca
15
+ - SlimOrca
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: MaziyarPanahi/calme-2.4-qwen2-7b
19
+ datasets:
20
+ - nvidia/HelpSteer2
21
+ - teknium/OpenHermes-2.5
22
+ - microsoft/orca-math-word-problems-200k
23
+ - Open-Orca/SlimOrca
24
+ pipeline_tag: text-generation
25
+ inference: false
26
+ model_creator: MaziyarPanahi
27
+ quantized_by: MaziyarPanahi
28
+ model-index:
29
+ - name: calme-2.4-qwen2-7b
30
+ results:
31
+ - task:
32
+ type: text-generation
33
+ name: Text Generation
34
+ dataset:
35
+ name: IFEval (0-Shot)
36
+ type: HuggingFaceH4/ifeval
37
+ args:
38
+ num_few_shot: 0
39
+ metrics:
40
+ - type: inst_level_strict_acc and prompt_level_strict_acc
41
+ value: 33.0
42
+ name: strict accuracy
43
+ source:
44
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
45
+ name: Open LLM Leaderboard
46
+ - task:
47
+ type: text-generation
48
+ name: Text Generation
49
+ dataset:
50
+ name: BBH (3-Shot)
51
+ type: BBH
52
+ args:
53
+ num_few_shot: 3
54
+ metrics:
55
+ - type: acc_norm
56
+ value: 31.82
57
+ name: normalized accuracy
58
+ source:
59
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
60
+ name: Open LLM Leaderboard
61
+ - task:
62
+ type: text-generation
63
+ name: Text Generation
64
+ dataset:
65
+ name: MATH Lvl 5 (4-Shot)
66
+ type: hendrycks/competition_math
67
+ args:
68
+ num_few_shot: 4
69
+ metrics:
70
+ - type: exact_match
71
+ value: 18.35
72
+ name: exact match
73
+ source:
74
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
75
+ name: Open LLM Leaderboard
76
+ - task:
77
+ type: text-generation
78
+ name: Text Generation
79
+ dataset:
80
+ name: GPQA (0-shot)
81
+ type: Idavidrein/gpqa
82
+ args:
83
+ num_few_shot: 0
84
+ metrics:
85
+ - type: acc_norm
86
+ value: 4.47
87
+ name: acc_norm
88
+ source:
89
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
90
+ name: Open LLM Leaderboard
91
+ - task:
92
+ type: text-generation
93
+ name: Text Generation
94
+ dataset:
95
+ name: MuSR (0-shot)
96
+ type: TAUR-Lab/MuSR
97
+ args:
98
+ num_few_shot: 0
99
+ metrics:
100
+ - type: acc_norm
101
+ value: 14.43
102
+ name: acc_norm
103
+ source:
104
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
105
+ name: Open LLM Leaderboard
106
+ - task:
107
+ type: text-generation
108
+ name: Text Generation
109
+ dataset:
110
+ name: MMLU-PRO (5-shot)
111
+ type: TIGER-Lab/MMLU-Pro
112
+ config: main
113
+ split: test
114
+ args:
115
+ num_few_shot: 5
116
+ metrics:
117
+ - type: acc
118
+ value: 33.08
119
+ name: accuracy
120
+ source:
121
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-qwen2-7b
122
+ name: Open LLM Leaderboard
123
+ ---
124
+
125
+ <div style="width: auto; margin-left: auto; margin-right: auto">
126
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
127
+ </div>
128
+ <div style="display: flex; justify-content: space-between; width: 100%;">
129
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
130
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
131
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
132
+ </p>
133
+ </div>
134
+ </div>
135
+
136
+ ## MaziyarPanahi/calme-2.4-qwen2-7b - GGUF
137
+
138
+ This repo contains GGUF format model files for [MaziyarPanahi/calme-2.4-qwen2-7b](https://huggingface.co/MaziyarPanahi/calme-2.4-qwen2-7b).
139
+
140
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
141
+
142
+ ## Prompt template
143
+
144
+ ```
145
+ <|im_start|>system
146
+ {system_prompt}<|im_end|>
147
+ <|im_start|>user
148
+ {prompt}<|im_end|>
149
+ <|im_start|>assistant
150
+ ```
151
+
152
+ ## Model file specification
153
+
154
+ | Filename | Quant type | File Size | Description |
155
+ | -------- | ---------- | --------- | ----------- |
156
+ | [calme-2.4-qwen2-7b-Q2_K.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q2_K.gguf) | Q2_K | 2.809 GB | smallest, significant quality loss - not recommended for most purposes |
157
+ | [calme-2.4-qwen2-7b-Q3_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q3_K_S.gguf) | Q3_K_S | 3.253 GB | very small, high quality loss |
158
+ | [calme-2.4-qwen2-7b-Q3_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q3_K_M.gguf) | Q3_K_M | 3.547 GB | very small, high quality loss |
159
+ | [calme-2.4-qwen2-7b-Q3_K_L.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q3_K_L.gguf) | Q3_K_L | 3.808 GB | small, substantial quality loss |
160
+ | [calme-2.4-qwen2-7b-Q4_0.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q4_0.gguf) | Q4_0 | 4.127 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
161
+ | [calme-2.4-qwen2-7b-Q4_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q4_K_S.gguf) | Q4_K_S | 4.152 GB | small, greater quality loss |
162
+ | [calme-2.4-qwen2-7b-Q4_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q4_K_M.gguf) | Q4_K_M | 4.361 GB | medium, balanced quality - recommended |
163
+ | [calme-2.4-qwen2-7b-Q5_0.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q5_0.gguf) | Q5_0 | 4.950 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
164
+ | [calme-2.4-qwen2-7b-Q5_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q5_K_S.gguf) | Q5_K_S | 4.950 GB | large, low quality loss - recommended |
165
+ | [calme-2.4-qwen2-7b-Q5_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q5_K_M.gguf) | Q5_K_M | 5.071 GB | large, very low quality loss - recommended |
166
+ | [calme-2.4-qwen2-7b-Q6_K.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q6_K.gguf) | Q6_K | 5.825 GB | very large, extremely low quality loss |
167
+ | [calme-2.4-qwen2-7b-Q8_0.gguf](https://huggingface.co/tensorblock/calme-2.4-qwen2-7b-GGUF/tree/main/calme-2.4-qwen2-7b-Q8_0.gguf) | Q8_0 | 7.542 GB | very large, extremely low quality loss - not recommended |
168
+
169
+
170
+ ## Downloading instruction
171
+
172
+ ### Command line
173
+
174
+ Firstly, install Huggingface Client
175
+
176
+ ```shell
177
+ pip install -U "huggingface_hub[cli]"
178
+ ```
179
+
180
+ Then, downoad the individual model file the a local directory
181
+
182
+ ```shell
183
+ huggingface-cli download tensorblock/calme-2.4-qwen2-7b-GGUF --include "calme-2.4-qwen2-7b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
184
+ ```
185
+
186
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
187
+
188
+ ```shell
189
+ huggingface-cli download tensorblock/calme-2.4-qwen2-7b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
190
+ ```
calme-2.4-qwen2-7b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a53814746f9c11ae2e67aed152462f0aa81aadab24d91e668aad3be6caaa7b4
3
+ size 3015938368
calme-2.4-qwen2-7b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0305a6e06c987fb23d2a4603a127e490ca51eaf33304a358e9d66d00ced87793
3
+ size 4088457536
calme-2.4-qwen2-7b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79a672be517e3da94d3c5302fec3b156ad301758aaa30de98d46d9d6750db385
3
+ size 3808389440
calme-2.4-qwen2-7b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa267b9dc1f25b0a3947589e7b5c93ee25f502995533cd2ac81879328308bf99
3
+ size 3492366656
calme-2.4-qwen2-7b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:addaf5958a800255485f3372409afc962ea7ecacfd8a80f37d5924c92a8f0135
3
+ size 4431388992
calme-2.4-qwen2-7b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:779b583ec155c4ae485677760ca7ba08c0986059ed118ade4d0300d576735b1d
3
+ size 4683071808
calme-2.4-qwen2-7b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aea17e2553f889eba0b9d9704513eef0f1691199699c058f93bcc7ac70f81d80
3
+ size 4457767232
calme-2.4-qwen2-7b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7acd0192fc4d1382116123a18461f9781e199314bbbc2dc67d6c53d81aa2fe1c
3
+ size 5315174720
calme-2.4-qwen2-7b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64cf572b3a7e243180670362b96131694c8b247d69adf181ec4c1cc0b82e5956
3
+ size 5444829504
calme-2.4-qwen2-7b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ebf6f7d074c55ee3cd9708b1309ed26eb65ba5beb4eec08936332b558e1e73c
3
+ size 5315174720
calme-2.4-qwen2-7b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f038462d75b08f8ab6178b49487b8379c7abb6f3b7b654aebf7244a9f0e8b4f0
3
+ size 6254197056
calme-2.4-qwen2-7b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4206e4f8cd084422e5b5bfad735f3e786d122fcaea4bdc8191c07cb2fb40fd59
3
+ size 8098523456