Corrected mistral to llama, typo
Browse files
README.md
CHANGED
@@ -61,7 +61,7 @@ import torch
|
|
61 |
from transformers import AutoTokenizer, AutoModel, AutoConfig
|
62 |
from peft import PeftModel
|
63 |
|
64 |
-
# Loading base
|
65 |
tokenizer = AutoTokenizer.from_pretrained(
|
66 |
"jealk/llm2vec-da-mntp"
|
67 |
)
|
|
|
61 |
from transformers import AutoTokenizer, AutoModel, AutoConfig
|
62 |
from peft import PeftModel
|
63 |
|
64 |
+
# Loading base Llama model, along with custom code that enables bidirectional connections in decoder-only LLMs. MNTP LoRA weights are merged into the base model.
|
65 |
tokenizer = AutoTokenizer.from_pretrained(
|
66 |
"jealk/llm2vec-da-mntp"
|
67 |
)
|