rootxhacker
commited on
Commit
•
adb3ef4
1
Parent(s):
a644f69
Update README.md
Browse files
README.md
CHANGED
@@ -56,7 +56,7 @@ Users (both direct and downstream) should be made aware of the risks, biases and
|
|
56 |
|
57 |
## How to Get Started with the Model
|
58 |
|
59 |
-
|
60 |
|
61 |
import torch
|
62 |
from peft import PeftModel, PeftConfig
|
@@ -87,8 +87,8 @@ def get_completion(query: str, model, tokenizer) -> str:
|
|
87 |
generated_ids = model.generate(**model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
88 |
decoded = tokenizer.batch_decode(generated_ids)
|
89 |
return (decoded[0])
|
90 |
-
|
91 |
-
|
92 |
|
93 |
# Load the Lora model
|
94 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
|
|
56 |
|
57 |
## How to Get Started with the Model
|
58 |
|
59 |
+
```
|
60 |
|
61 |
import torch
|
62 |
from peft import PeftModel, PeftConfig
|
|
|
87 |
generated_ids = model.generate(**model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
88 |
decoded = tokenizer.batch_decode(generated_ids)
|
89 |
return (decoded[0])
|
90 |
+
|
91 |
+
```
|
92 |
|
93 |
# Load the Lora model
|
94 |
model = PeftModel.from_pretrained(model, peft_model_id)
|