meetrais commited on
Commit
f8d2eeb
·
1 Parent(s): 41152e3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -0
README.md CHANGED
@@ -38,12 +38,17 @@ The following `bitsandbytes` quantization config was used during training:
38
  ## Code to call this mnodel
39
 
40
  import torch
 
41
  from peft import PeftModel, PeftConfig
 
42
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
43
  from transformers import BitsAndBytesConfig
44
 
45
  peft_model_id = "meetrais/finetuned_mistral_7b"
 
46
  config = PeftConfig.from_pretrained(peft_model_id)
 
47
  bnb_config = BitsAndBytesConfig(
48
  load_in_4bit=True,
49
  bnb_4bit_use_double_quant=True,
@@ -51,6 +56,7 @@ bnb_4bit_quant_type="nf4",
51
  bnb_4bit_compute_dtype=torch.bfloat16
52
  )
53
  model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=bnb_config, device_map='auto')
 
54
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
55
 
56
  if tokenizer.pad_token is None:
 
38
  ## Code to call this mnodel
39
 
40
  import torch
41
+
42
  from peft import PeftModel, PeftConfig
43
+
44
  from transformers import AutoModelForCausalLM, AutoTokenizer
45
+
46
  from transformers import BitsAndBytesConfig
47
 
48
  peft_model_id = "meetrais/finetuned_mistral_7b"
49
+
50
  config = PeftConfig.from_pretrained(peft_model_id)
51
+
52
  bnb_config = BitsAndBytesConfig(
53
  load_in_4bit=True,
54
  bnb_4bit_use_double_quant=True,
 
56
  bnb_4bit_compute_dtype=torch.bfloat16
57
  )
58
  model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=bnb_config, device_map='auto')
59
+
60
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
61
 
62
  if tokenizer.pad_token is None: