```bash git clone https://github.com/neuralmagic/AutoFP8.git pip install -e AutoFP8 ``` ```python from transformers import AutoTokenizer from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig from datasets import load_dataset pretrained_model_dir = "Unbabel/TowerInstruct-7B-v0.1" quantized_model_dir = "TowerInstruct-7B-v0.1-FP8" tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset("mgoin/ultrachat_2k", split="train_sft").select(range(512)) examples = [tokenizer.apply_chat_template(batch["messages"], tokenize=False) for batch in ds] examples = tokenizer(examples, padding=True, truncation=True, return_tensors="pt").to("cuda") quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static") model = AutoFP8ForCausalLM.from_pretrained( pretrained_model_dir, quantize_config=quantize_config ) model.quantize(examples) model.save_quantized(quantized_model_dir) ```