thinh111 ntphat commited on
Commit
aa57960
β€’
1 Parent(s): 188b1d6

Update model.py (#3)

Browse files

- Update model.py (2336c12fe980b8af144cae4ae4d1a7c32a7c3c22)


Co-authored-by: Nguyen Tan Phat <[email protected]>

Files changed (1) hide show
  1. model.py +2 -1
model.py CHANGED
@@ -3,10 +3,11 @@ str_cmd1 = 'pip install "unsloth[colab-new] @ git+https://github.com/unslothai/u
3
  str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
4
  os.system(str_cmd1)
5
  os.system(str_cmd2)
 
6
 
7
 
8
  from unsloth import FastLanguageModel
9
- # import torch
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
 
3
  str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
4
  os.system(str_cmd1)
5
  os.system(str_cmd2)
6
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1" # or "0,1" for multiple GPUs
7
 
8
 
9
  from unsloth import FastLanguageModel
10
+ import torch
11
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
12
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
13
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.