nm-research's picture
Upload folder using huggingface_hub
96f3fdf verified
raw
history blame contribute delete
724 Bytes
quant_stage:
quant_modifiers:
SmoothQuantModifier:
smoothing_strength: 0.7
mappings:
- - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj']
- re:.*input_layernorm
- - ['re:.*gate_proj', 're:.*up_proj']
- re:.*post_attention_layernorm
- - ['re:.*down_proj']
- re:.*up_proj
GPTQModifier:
sequential_update: true
dampening_frac: 0.01
ignore: [lm_head]
config_groups:
group_0:
targets: [Linear]
weights: {num_bits: 8, type: int, symmetric: true, strategy: channel, observer: mse}
input_activations: {num_bits: 8, type: int, symmetric: true, strategy: token, dynamic: true,
observer: memoryless}