jealk commited on
Commit
7351e0c
·
verified ·
1 Parent(s): e807790

Corrected mistral to llama, typo

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -61,7 +61,7 @@ import torch
61
  from transformers import AutoTokenizer, AutoModel, AutoConfig
62
  from peft import PeftModel
63
 
64
- # Loading base Mistral model, along with custom code that enables bidirectional connections in decoder-only LLMs. MNTP LoRA weights are merged into the base model.
65
  tokenizer = AutoTokenizer.from_pretrained(
66
  "jealk/llm2vec-da-mntp"
67
  )
 
61
  from transformers import AutoTokenizer, AutoModel, AutoConfig
62
  from peft import PeftModel
63
 
64
+ # Loading base Llama model, along with custom code that enables bidirectional connections in decoder-only LLMs. MNTP LoRA weights are merged into the base model.
65
  tokenizer = AutoTokenizer.from_pretrained(
66
  "jealk/llm2vec-da-mntp"
67
  )