Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,20 @@ import time
|
|
3 |
import torch
|
4 |
|
5 |
from peft import PeftModel, PeftConfig
|
6 |
-
from transformers import AutoModelForCausalLM
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
model =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
tokenizer.pad_token = tokenizer.eos_token
|
14 |
|
|
|
3 |
import torch
|
4 |
|
5 |
from peft import PeftModel, PeftConfig
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
|
8 |
+
peft_model_id = "Ngadou/falcon-7b-scam-buster"
|
9 |
+
config = PeftConfig.from_pretrained(peft_model_id)
|
10 |
+
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, trust_remote_code=True, return_dict=True, load_in_8bit=True, device_map='auto')
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
13 |
+
|
14 |
+
model = PeftModel.from_pretrained(model, peft_model_id).to("cuda")
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
# Load the Lora model
|
19 |
+
model = PeftModel.from_pretrained(model, peft_model_id)
|
20 |
|
21 |
tokenizer.pad_token = tokenizer.eos_token
|
22 |
|