Upload README.md with huggingface_hub
Browse files
README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
tags:
|
6 |
+
- transformers
|
7 |
+
- safetensors
|
8 |
+
- qwen2
|
9 |
+
- text-generation
|
10 |
+
- conversational
|
11 |
+
- autotrain_compatible
|
12 |
+
- text-generation-inference
|
13 |
+
- endpoints_compatible
|
14 |
+
- region:us
|
15 |
+
license: "unknown"
|
16 |
+
inference: false
|
17 |
+
quantized_by: pbatra
|
18 |
+
---
|
19 |
+
|
20 |
+
# DeepSeek-R1-Distill-Qwen-1.5B
|
21 |
+
|
22 |
+
This repository contains quantized versions of the model from the original repository: [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B).
|
23 |
+
|
24 |
+
| Name | Quantization Method | Size (GB) |
|
25 |
+
|------|---------------------|-----------|
|
26 |
+
| deepseek-r1-distill-qwen-1.5b.Q2_K.gguf | q2_k | 0.70 |
|
27 |
+
| deepseek-r1-distill-qwen-1.5b.Q3_K_S.gguf | q3_k_s | 0.80 |
|
28 |
+
| deepseek-r1-distill-qwen-1.5b.Q3_K_M.gguf | q3_k_m | 0.86 |
|
29 |
+
| deepseek-r1-distill-qwen-1.5b.Q3_K_L.gguf | q3_k_l | 0.91 |
|
30 |
+
| deepseek-r1-distill-qwen-1.5b.Q4_0.gguf | q4_0 | 0.99 |
|
31 |
+
| deepseek-r1-distill-qwen-1.5b.Q4_K_S.gguf | q4_k_s | 1.00 |
|
32 |
+
| deepseek-r1-distill-qwen-1.5b.Q4_K_M.gguf | q4_k_m | 1.04 |
|
33 |
+
| deepseek-r1-distill-qwen-1.5b.Q5_0.gguf | q5_0 | 1.17 |
|
34 |
+
| deepseek-r1-distill-qwen-1.5b.Q5_K_S.gguf | q5_k_s | 1.17 |
|
35 |
+
| deepseek-r1-distill-qwen-1.5b.Q5_K_M.gguf | q5_k_m | 1.20 |
|
36 |
+
| deepseek-r1-distill-qwen-1.5b.Q6_K.gguf | q6_k | 1.36 |
|
37 |
+
| deepseek-r1-distill-qwen-1.5b.Q8_0.gguf | q8_0 | 1.76 |
|