quant_stage: quant_modifiers: QuantizationModifier: ignore: [lm_head] config_groups: fp8_linear: weights: {num_bits: 8, type: float, strategy: channel, dynamic: false, symmetric: true} input_activations: {num_bits: 8, type: float, strategy: token, dynamic: true, symmetric: true} targets: [Linear] fp8_attention: output_activations: {num_bits: 8, type: float, strategy: channel, dynamic: false, symmetric: true} targets: ['re:.*q_proj.weight', 're:.*k_proj.weight', 're:.*v_proj.weight']