t3ga commited on
Commit
f497cf0
·
1 Parent(s): 0cbb5cd

Model weights chaged. Quantized on ru part of Aya datset.

Browse files
config.json CHANGED
@@ -23,10 +23,10 @@
23
  "pretraining_tp": 1,
24
  "quantization_config": {
25
  "bits": 4,
 
26
  "damp_percent": 0.1,
27
  "desc_act": true,
28
  "group_size": 32,
29
- "is_marlin_format": false,
30
  "model_file_base_name": "gptq_model-4bit-32g",
31
  "model_name_or_path": "command-r-gptq",
32
  "quant_method": "gptq",
 
23
  "pretraining_tp": 1,
24
  "quantization_config": {
25
  "bits": 4,
26
+ "checkpoint_format": "gptq",
27
  "damp_percent": 0.1,
28
  "desc_act": true,
29
  "group_size": 32,
 
30
  "model_file_base_name": "gptq_model-4bit-32g",
31
  "model_name_or_path": "command-r-gptq",
32
  "quant_method": "gptq",
gptq_model-4bit-32g.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcd07528e1fce56acc4f5d3d0867c8ca975a995c81550d5fd84087ec0c61dcff
3
- size 27411564808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8721161e5c38ae69e2af45a12a4cd4405fbd71599d3da760c092518499ac2d4f
3
+ size 27411564840
quantize_config.json CHANGED
@@ -8,6 +8,6 @@
8
  "true_sequential": true,
9
  "model_name_or_path": "command-r-gptq",
10
  "model_file_base_name": "gptq_model-4bit-32g",
11
- "is_marlin_format": false,
12
- "quant_method": "gptq"
13
  }
 
8
  "true_sequential": true,
9
  "model_name_or_path": "command-r-gptq",
10
  "model_file_base_name": "gptq_model-4bit-32g",
11
+ "quant_method": "gptq",
12
+ "checkpoint_format": "gptq"
13
  }