ntphat commited on
Commit
2336c12
β€’
1 Parent(s): 22e41ed

Update model.py

Browse files

enable cuda for torch

Files changed (1) hide show
  1. model.py +2 -1
model.py CHANGED
@@ -3,10 +3,11 @@ str_cmd1 = 'pip install "unsloth[colab-new] @ git+https://github.com/unslothai/u
3
  str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
4
  os.system(str_cmd1)
5
  os.system(str_cmd2)
 
6
 
7
 
8
  from unsloth import FastLanguageModel
9
- # import torch
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
 
3
  str_cmd2 = 'pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes'
4
  os.system(str_cmd1)
5
  os.system(str_cmd2)
6
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1" # or "0,1" for multiple GPUs
7
 
8
 
9
  from unsloth import FastLanguageModel
10
+ import torch
11
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
12
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
13
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.