Model weights chaged. Quantized on ru part of Aya datset.
Browse files- config.json +1 -1
- gptq_model-4bit-32g.safetensors +2 -2
- quantize_config.json +2 -2
config.json
CHANGED
@@ -23,10 +23,10 @@
|
|
23 |
"pretraining_tp": 1,
|
24 |
"quantization_config": {
|
25 |
"bits": 4,
|
|
|
26 |
"damp_percent": 0.1,
|
27 |
"desc_act": true,
|
28 |
"group_size": 32,
|
29 |
-
"is_marlin_format": false,
|
30 |
"model_file_base_name": "gptq_model-4bit-32g",
|
31 |
"model_name_or_path": "command-r-gptq",
|
32 |
"quant_method": "gptq",
|
|
|
23 |
"pretraining_tp": 1,
|
24 |
"quantization_config": {
|
25 |
"bits": 4,
|
26 |
+
"checkpoint_format": "gptq",
|
27 |
"damp_percent": 0.1,
|
28 |
"desc_act": true,
|
29 |
"group_size": 32,
|
|
|
30 |
"model_file_base_name": "gptq_model-4bit-32g",
|
31 |
"model_name_or_path": "command-r-gptq",
|
32 |
"quant_method": "gptq",
|
gptq_model-4bit-32g.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8721161e5c38ae69e2af45a12a4cd4405fbd71599d3da760c092518499ac2d4f
|
3 |
+
size 27411564840
|
quantize_config.json
CHANGED
@@ -8,6 +8,6 @@
|
|
8 |
"true_sequential": true,
|
9 |
"model_name_or_path": "command-r-gptq",
|
10 |
"model_file_base_name": "gptq_model-4bit-32g",
|
11 |
-
"
|
12 |
-
"
|
13 |
}
|
|
|
8 |
"true_sequential": true,
|
9 |
"model_name_or_path": "command-r-gptq",
|
10 |
"model_file_base_name": "gptq_model-4bit-32g",
|
11 |
+
"quant_method": "gptq",
|
12 |
+
"checkpoint_format": "gptq"
|
13 |
}
|