model card updated
Browse files
README.md
CHANGED
@@ -48,7 +48,15 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
48 |
# Load the fine-tuned model and tokenizer
|
49 |
model_name = "POLLCHECK/Pollcheck-llama3-news-classifier" # Change this to the path of your fine-tuned model
|
50 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
51 |
-
model =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# make the model and tokenizer to chat formet
|
54 |
model, tokenizer = setup_chat_format(model, tokenizer)
|
|
|
48 |
# Load the fine-tuned model and tokenizer
|
49 |
model_name = "POLLCHECK/Pollcheck-llama3-news-classifier" # Change this to the path of your fine-tuned model
|
50 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
51 |
+
model = AutoModelForCausalLM.from_pretrained(
|
52 |
+
model_name,
|
53 |
+
return_dict=True,
|
54 |
+
low_cpu_mem_usage=True,
|
55 |
+
#torch_dtype=torch.bfloat16,
|
56 |
+
device_map="auto",
|
57 |
+
trust_remote_code=True,
|
58 |
+
)
|
59 |
+
|
60 |
|
61 |
# make the model and tokenizer to chat formet
|
62 |
model, tokenizer = setup_chat_format(model, tokenizer)
|