cognitivess
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -118,38 +118,50 @@ To use this model, first install the custom package:
|
|
118 |
Then, you can use the model like this:
|
119 |
|
120 |
```python
|
121 |
-
import cognitivess_model
|
122 |
-
|
123 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
124 |
import torch
|
125 |
|
126 |
-
|
|
|
127 |
|
128 |
# Load the tokenizer
|
129 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
130 |
|
131 |
-
# Load the model
|
132 |
model = AutoModelForCausalLM.from_pretrained(
|
133 |
-
|
134 |
torch_dtype=torch.float32,
|
135 |
-
device_map="auto"
|
136 |
-
)
|
137 |
|
|
|
|
|
|
|
|
|
|
|
138 |
messages = [
|
139 |
-
{"role": "user", "content": "
|
140 |
]
|
141 |
|
142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
144 |
-
|
145 |
-
|
146 |
-
do_sample=True,
|
147 |
-
temperature=0.5,
|
148 |
-
max_new_tokens=1024
|
149 |
-
)
|
150 |
|
151 |
-
|
152 |
-
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
```
|
155 |
|
|
|
118 |
Then, you can use the model like this:
|
119 |
|
120 |
```python
|
121 |
+
import cognitivess_model
|
122 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
123 |
import torch
|
124 |
|
125 |
+
# Define the model path
|
126 |
+
model_path = "CognitivessAI/cognitivess"
|
127 |
|
128 |
# Load the tokenizer
|
129 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
130 |
|
131 |
+
# Load the model with correct configuration for precision and device placement
|
132 |
model = AutoModelForCausalLM.from_pretrained(
|
133 |
+
model_path,
|
134 |
torch_dtype=torch.float32,
|
135 |
+
device_map="auto" # Automatically maps model to available devices
|
136 |
+
).eval()
|
137 |
|
138 |
+
# Move model to CUDA if available
|
139 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
140 |
+
model.to(device)
|
141 |
+
|
142 |
+
# Prepare input
|
143 |
messages = [
|
144 |
+
{"role": "user", "content": "Who are you?"}
|
145 |
]
|
146 |
|
147 |
+
# Tokenize input
|
148 |
+
input_ids = tokenizer(
|
149 |
+
[msg["content"] for msg in messages],
|
150 |
+
return_tensors='pt',
|
151 |
+
padding=True,
|
152 |
+
truncation=True
|
153 |
+
).input_ids
|
154 |
|
155 |
+
# Move input_ids to the same device as the model
|
156 |
+
input_ids = input_ids.to(device)
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
# Generate output
|
159 |
+
output_ids = model.generate(input_ids, max_new_tokens=50)
|
160 |
+
|
161 |
+
# Decode output
|
162 |
+
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
163 |
+
|
164 |
+
print(response)
|
165 |
|
166 |
```
|
167 |
|