VishnuPJ commited on
Commit
f4f9aef
1 Parent(s): 41bcdf8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -56,7 +56,7 @@ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
56
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
57
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
58
  model, tokenizer = FastLanguageModel.from_pretrained(
59
- model_name="VishnuPJ/MalayaLLM_Gemma_2_9B_Instruct_V1.0",
60
  max_seq_length=max_seq_length,
61
  dtype=dtype,
62
  load_in_4bit=load_in_4bit,
 
56
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
57
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
58
  model, tokenizer = FastLanguageModel.from_pretrained(
59
+ model_name="VishnuPJ/MalayaLLM_Gemma_2_2B_Instruct_V1.0",
60
  max_seq_length=max_seq_length,
61
  dtype=dtype,
62
  load_in_4bit=load_in_4bit,