|
sparsity_stage: |
|
sparsity_modifiers: |
|
SparseGPTModifier: {sparsity: 0.9, mask_structure: '2:4', sequential_update: false} |
|
run_type: &id001 !!python/object/apply:llmcompressor.recipe.stage.StageRunType [oneshot] |
|
finetuning_stage: |
|
finetuning_modifiers: |
|
ConstantPruningModifier: |
|
targets: ['re:.*q_proj.weight', 're:.*k_proj.weight', 're:.*v_proj.weight', 're:.*o_proj.weight', |
|
're:.*gate_proj.weight', 're:.*up_proj.weight', 're:.*down_proj.weight'] |
|
start: 0 |
|
run_type: !!python/object/apply:llmcompressor.recipe.stage.StageRunType [train] |
|
quantization_stage: |
|
quantization_modifiers: |
|
GPTQModifier: |
|
ignore: [lm_head] |
|
config_groups: |
|
group_0: |
|
weights: {num_bits: 4, type: int, symmetric: true, strategy: channel} |
|
targets: [Linear] |
|
run_type: *id001 |
|
|