Update README.md
Browse files
README.md
CHANGED
@@ -258,7 +258,7 @@ Here's how you can run the model using the `pipeline()` function from 🤗 Trans
|
|
258 |
import torch
|
259 |
from transformers import pipeline
|
260 |
|
261 |
-
pipe = pipeline("text-generation", model="
|
262 |
|
263 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
264 |
messages = [
|
|
|
258 |
import torch
|
259 |
from transformers import pipeline
|
260 |
|
261 |
+
pipe = pipeline("text-generation", model="vicky4s4s/zepher-beta", torch_dtype=torch.bfloat16, device_map="auto")
|
262 |
|
263 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
264 |
messages = [
|