onekq commited on
Commit
16173e3
·
verified ·
1 Parent(s): 6ff2918

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -26,7 +26,7 @@ nf4_config = BitsAndBytesConfig(
26
  model = AutoModelForCausalLM.from_pretrained("ibm-granite/granite-8b-code-base-4k", quantization_config=nf4_config)
27
 
28
  # Load the tokenizer associated with the model
29
- tokenizer = AutoTokenizer.from_pretrained("ibm-granite/granite-3b-code-base-2k")
30
 
31
  # Push the model and tokenizer to the Hugging Face hub
32
  model.push_to_hub("onekq-ai/granite-8b-code-base-4k-bnb-4bit", use_auth_token=True)
 
26
  model = AutoModelForCausalLM.from_pretrained("ibm-granite/granite-8b-code-base-4k", quantization_config=nf4_config)
27
 
28
  # Load the tokenizer associated with the model
29
+ tokenizer = AutoTokenizer.from_pretrained("ibm-granite/granite-8b-code-base-4k")
30
 
31
  # Push the model and tokenizer to the Hugging Face hub
32
  model.push_to_hub("onekq-ai/granite-8b-code-base-4k-bnb-4bit", use_auth_token=True)