mrm8488 commited on
Commit
d87f4e9
1 Parent(s): 9ff47b2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -23
README.md CHANGED
@@ -54,7 +54,7 @@ The model was converted using [this notebook](https://nbviewer.org/urls/huggingf
54
  ### How to use
55
 
56
  ```sh
57
- wget https://huggingface.co/mrm8488/bertin-gpt-j-6B-ES-8bit/resolve/main/utils.py -O utils.py
58
  pip install transformers
59
  pip install bitsandbytes-cuda111==0.26.0
60
  ```
@@ -63,28 +63,7 @@ pip install bitsandbytes-cuda111==0.26.0
63
  import transformers
64
  import torch
65
 
66
- from utils import convert_to_int8
67
-
68
-
69
- class GPTJBlock(transformers.models.gptj.modeling_gptj.GPTJBlock):
70
- def __init__(self, config):
71
- super().__init__(config)
72
-
73
- convert_to_int8(self.attn)
74
- convert_to_int8(self.mlp)
75
-
76
-
77
- class GPTJModel(transformers.models.gptj.modeling_gptj.GPTJModel):
78
- def __init__(self, config):
79
- super().__init__(config)
80
- convert_to_int8(self)
81
-
82
-
83
- class GPTJForCausalLM(transformers.models.gptj.modeling_gptj.GPTJForCausalLM):
84
- def __init__(self, config):
85
- super().__init__(config)
86
- convert_to_int8(self)
87
-
88
 
89
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
90
 
 
54
  ### How to use
55
 
56
  ```sh
57
+ wget https://huggingface.co/mrm8488/bertin-gpt-j-6B-ES-8bit/resolve/main/utils.py -O Utils.py
58
  pip install transformers
59
  pip install bitsandbytes-cuda111==0.26.0
60
  ```
 
63
  import transformers
64
  import torch
65
 
66
+ from Utils import GPTJBlock, GPTJForCausalLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
69