morriszms commited on
Commit
2b666c2
1 Parent(s): bb1437c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Einstein-v7-Qwen2-7B-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Einstein-v7-Qwen2-7B-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Einstein-v7-Qwen2-7B-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Einstein-v7-Qwen2-7B-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Einstein-v7-Qwen2-7B-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Einstein-v7-Qwen2-7B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ Einstein-v7-Qwen2-7B-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ Einstein-v7-Qwen2-7B-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ Einstein-v7-Qwen2-7B-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ Einstein-v7-Qwen2-7B-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ Einstein-v7-Qwen2-7B-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ Einstein-v7-Qwen2-7B-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Einstein-v7-Qwen2-7B-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf6af0b9abcf954363c2dc1f2935f3f6a8084725b376d85de66b5babd4ccd7a
3
+ size 3015939392
Einstein-v7-Qwen2-7B-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04cb5c8b37c25965b1995e548df00af06fe542c7ed14ac1e8a330695bfea55a8
3
+ size 4088458560
Einstein-v7-Qwen2-7B-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:045145e276229227808215a2913d87415647d47bfff0d3e0819ff35aca2484ae
3
+ size 3808390464
Einstein-v7-Qwen2-7B-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ae3ff8efa069a428f937d3bd3e03b7c8f683cce0225111fca32149b2013717e
3
+ size 3492367680
Einstein-v7-Qwen2-7B-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7b134a76dffa54f0a029740fa26ac5f6295f2fd6c0efba8ba0e488eacc2bcf
3
+ size 4431390016
Einstein-v7-Qwen2-7B-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e44df3683b218ef8d68deb0b7ab0efb4b68bcfafe2789f045394ec0bfa49a224
3
+ size 4683072832
Einstein-v7-Qwen2-7B-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff09505e4b263e699f410c9b991a221c581a39d25b23caeb9d4e8396703a653
3
+ size 4457768256
Einstein-v7-Qwen2-7B-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ce3e879250fa441316ef61501f2916ae06223d0a198f0f69d0e369b76110001
3
+ size 5315175744
Einstein-v7-Qwen2-7B-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfeac1fcfcdd89690e1a77174db407ea5daba3cdffcca2bacb7d40c664968d6b
3
+ size 5444830528
Einstein-v7-Qwen2-7B-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32550af545e2099f064bed23d0ec4ac5852b9318bfd7cc8eb18dc1f5e8ec8c58
3
+ size 5315175744
Einstein-v7-Qwen2-7B-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8e787921161b7e5d92f1dd183e0f92d4f0be9b8bb790d69ba04e87e41f24592
3
+ size 6254198080
Einstein-v7-Qwen2-7B-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb7c1b21f8ba15cdc87960421f33dad63e3c73d3e6d6197279c12cd76f9f70dd
3
+ size 8098524480
README.md ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: other
5
+ tags:
6
+ - axolotl
7
+ - instruct
8
+ - finetune
9
+ - chatml
10
+ - gpt4
11
+ - synthetic data
12
+ - science
13
+ - physics
14
+ - chemistry
15
+ - biology
16
+ - math
17
+ - qwen
18
+ - qwen2
19
+ - TensorBlock
20
+ - GGUF
21
+ base_model: Weyaxi/Einstein-v7-Qwen2-7B
22
+ datasets:
23
+ - allenai/ai2_arc
24
+ - camel-ai/physics
25
+ - camel-ai/chemistry
26
+ - camel-ai/biology
27
+ - camel-ai/math
28
+ - metaeval/reclor
29
+ - openbookqa
30
+ - mandyyyyii/scibench
31
+ - derek-thomas/ScienceQA
32
+ - TIGER-Lab/ScienceEval
33
+ - jondurbin/airoboros-3.2
34
+ - LDJnr/Capybara
35
+ - Cot-Alpaca-GPT4-From-OpenHermes-2.5
36
+ - STEM-AI-mtl/Electrical-engineering
37
+ - knowrohit07/saraswati-stem
38
+ - sablo/oasst2_curated
39
+ - lmsys/lmsys-chat-1m
40
+ - TIGER-Lab/MathInstruct
41
+ - bigbio/med_qa
42
+ - meta-math/MetaMathQA-40K
43
+ - openbookqa
44
+ - piqa
45
+ - metaeval/reclor
46
+ - derek-thomas/ScienceQA
47
+ - scibench
48
+ - sciq
49
+ - Open-Orca/SlimOrca
50
+ - migtissera/Synthia-v1.3
51
+ - TIGER-Lab/ScienceEval
52
+ - allenai/WildChat
53
+ - microsoft/orca-math-word-problems-200k
54
+ - openchat/openchat_sharegpt4_dataset
55
+ - teknium/GPTeacher-General-Instruct
56
+ - m-a-p/CodeFeedback-Filtered-Instruction
57
+ - totally-not-an-llm/EverythingLM-data-V3
58
+ - HuggingFaceH4/no_robots
59
+ - OpenAssistant/oasst_top1_2023-08-25
60
+ - WizardLM/WizardLM_evol_instruct_70k
61
+ - abacusai/SystemChat-1.1
62
+ - H-D-T/Buzz-V1.2
63
+ model-index:
64
+ - name: Einstein-v7-Qwen2-7B
65
+ results:
66
+ - task:
67
+ type: text-generation
68
+ name: Text Generation
69
+ dataset:
70
+ name: IFEval (0-Shot)
71
+ type: HuggingFaceH4/ifeval
72
+ args:
73
+ num_few_shot: 0
74
+ metrics:
75
+ - type: inst_level_strict_acc and prompt_level_strict_acc
76
+ value: 41.0
77
+ name: strict accuracy
78
+ source:
79
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
80
+ name: Open LLM Leaderboard
81
+ - task:
82
+ type: text-generation
83
+ name: Text Generation
84
+ dataset:
85
+ name: BBH (3-Shot)
86
+ type: BBH
87
+ args:
88
+ num_few_shot: 3
89
+ metrics:
90
+ - type: acc_norm
91
+ value: 32.84
92
+ name: normalized accuracy
93
+ source:
94
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
95
+ name: Open LLM Leaderboard
96
+ - task:
97
+ type: text-generation
98
+ name: Text Generation
99
+ dataset:
100
+ name: MATH Lvl 5 (4-Shot)
101
+ type: hendrycks/competition_math
102
+ args:
103
+ num_few_shot: 4
104
+ metrics:
105
+ - type: exact_match
106
+ value: 15.18
107
+ name: exact match
108
+ source:
109
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
110
+ name: Open LLM Leaderboard
111
+ - task:
112
+ type: text-generation
113
+ name: Text Generation
114
+ dataset:
115
+ name: GPQA (0-shot)
116
+ type: Idavidrein/gpqa
117
+ args:
118
+ num_few_shot: 0
119
+ metrics:
120
+ - type: acc_norm
121
+ value: 6.6
122
+ name: acc_norm
123
+ source:
124
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
125
+ name: Open LLM Leaderboard
126
+ - task:
127
+ type: text-generation
128
+ name: Text Generation
129
+ dataset:
130
+ name: MuSR (0-shot)
131
+ type: TAUR-Lab/MuSR
132
+ args:
133
+ num_few_shot: 0
134
+ metrics:
135
+ - type: acc_norm
136
+ value: 14.06
137
+ name: acc_norm
138
+ source:
139
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
140
+ name: Open LLM Leaderboard
141
+ - task:
142
+ type: text-generation
143
+ name: Text Generation
144
+ dataset:
145
+ name: MMLU-PRO (5-shot)
146
+ type: TIGER-Lab/MMLU-Pro
147
+ config: main
148
+ split: test
149
+ args:
150
+ num_few_shot: 5
151
+ metrics:
152
+ - type: acc
153
+ value: 34.4
154
+ name: accuracy
155
+ source:
156
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v7-Qwen2-7B
157
+ name: Open LLM Leaderboard
158
+ ---
159
+
160
+ <div style="width: auto; margin-left: auto; margin-right: auto">
161
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
162
+ </div>
163
+ <div style="display: flex; justify-content: space-between; width: 100%;">
164
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
165
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
166
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
167
+ </p>
168
+ </div>
169
+ </div>
170
+
171
+ ## Weyaxi/Einstein-v7-Qwen2-7B - GGUF
172
+
173
+ This repo contains GGUF format model files for [Weyaxi/Einstein-v7-Qwen2-7B](https://huggingface.co/Weyaxi/Einstein-v7-Qwen2-7B).
174
+
175
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
176
+
177
+ ## Prompt template
178
+
179
+ ```
180
+ <|im_start|>system
181
+ {system_prompt}<|im_end|>
182
+ <|im_start|>user
183
+ {prompt}<|im_end|>
184
+ <|im_start|>assistant
185
+ ```
186
+
187
+ ## Model file specification
188
+
189
+ | Filename | Quant type | File Size | Description |
190
+ | -------- | ---------- | --------- | ----------- |
191
+ | [Einstein-v7-Qwen2-7B-Q2_K.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q2_K.gguf) | Q2_K | 2.809 GB | smallest, significant quality loss - not recommended for most purposes |
192
+ | [Einstein-v7-Qwen2-7B-Q3_K_S.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q3_K_S.gguf) | Q3_K_S | 3.253 GB | very small, high quality loss |
193
+ | [Einstein-v7-Qwen2-7B-Q3_K_M.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q3_K_M.gguf) | Q3_K_M | 3.547 GB | very small, high quality loss |
194
+ | [Einstein-v7-Qwen2-7B-Q3_K_L.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q3_K_L.gguf) | Q3_K_L | 3.808 GB | small, substantial quality loss |
195
+ | [Einstein-v7-Qwen2-7B-Q4_0.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q4_0.gguf) | Q4_0 | 4.127 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
196
+ | [Einstein-v7-Qwen2-7B-Q4_K_S.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q4_K_S.gguf) | Q4_K_S | 4.152 GB | small, greater quality loss |
197
+ | [Einstein-v7-Qwen2-7B-Q4_K_M.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q4_K_M.gguf) | Q4_K_M | 4.361 GB | medium, balanced quality - recommended |
198
+ | [Einstein-v7-Qwen2-7B-Q5_0.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q5_0.gguf) | Q5_0 | 4.950 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
199
+ | [Einstein-v7-Qwen2-7B-Q5_K_S.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q5_K_S.gguf) | Q5_K_S | 4.950 GB | large, low quality loss - recommended |
200
+ | [Einstein-v7-Qwen2-7B-Q5_K_M.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q5_K_M.gguf) | Q5_K_M | 5.071 GB | large, very low quality loss - recommended |
201
+ | [Einstein-v7-Qwen2-7B-Q6_K.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q6_K.gguf) | Q6_K | 5.825 GB | very large, extremely low quality loss |
202
+ | [Einstein-v7-Qwen2-7B-Q8_0.gguf](https://huggingface.co/tensorblock/Einstein-v7-Qwen2-7B-GGUF/tree/main/Einstein-v7-Qwen2-7B-Q8_0.gguf) | Q8_0 | 7.542 GB | very large, extremely low quality loss - not recommended |
203
+
204
+
205
+ ## Downloading instruction
206
+
207
+ ### Command line
208
+
209
+ Firstly, install Huggingface Client
210
+
211
+ ```shell
212
+ pip install -U "huggingface_hub[cli]"
213
+ ```
214
+
215
+ Then, downoad the individual model file the a local directory
216
+
217
+ ```shell
218
+ huggingface-cli download tensorblock/Einstein-v7-Qwen2-7B-GGUF --include "Einstein-v7-Qwen2-7B-Q2_K.gguf" --local-dir MY_LOCAL_DIR
219
+ ```
220
+
221
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
222
+
223
+ ```shell
224
+ huggingface-cli download tensorblock/Einstein-v7-Qwen2-7B-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
225
+ ```