Mossy20 commited on
Commit
63a3136
·
verified ·
1 Parent(s): 61dd154

Upload LlamaForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. recipe.yaml +1 -1
config.json CHANGED
@@ -51,7 +51,7 @@
51
  "quantization_status": "compressed",
52
  "sparsity_config": {
53
  "format": "dense",
54
- "global_sparsity": 0.4504805113143489,
55
  "ignore": [
56
  "lm_head"
57
  ],
 
51
  "quantization_status": "compressed",
52
  "sparsity_config": {
53
  "format": "dense",
54
+ "global_sparsity": 0.45048483360247604,
55
  "ignore": [
56
  "lm_head"
57
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b820a04dfcb60f9ba541d402291eda85e45d211e4de44dacbbfbb0bd60d94eb
3
  size 4721963376
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3774433910fd03838b3bea93174ae7156782dfc32afb7df2a4138ac7d86b121b
3
  size 4721963376
recipe.yaml CHANGED
@@ -1,6 +1,6 @@
1
  sparsity_stage:
2
  sparsity_modifiers:
3
- SparseGPTModifier: {sparsity: 0.5, mask_structure: '2:4', sequential_update: false}
4
  run_type: &id001 !!python/object/apply:llmcompressor.recipe.stage.StageRunType [oneshot]
5
  finetuning_stage:
6
  finetuning_modifiers:
 
1
  sparsity_stage:
2
  sparsity_modifiers:
3
+ SparseGPTModifier: {sparsity: 0.9, mask_structure: '2:4', sequential_update: false}
4
  run_type: &id001 !!python/object/apply:llmcompressor.recipe.stage.StageRunType [oneshot]
5
  finetuning_stage:
6
  finetuning_modifiers: