paraphrase-multilingual-mpnet-base-v2.gguf

import torch
from llama_cpp import Llama
from sentence_transformers import SentenceTransformer
from scipy.spatial.distance import cosine

model = SentenceTransformer(
    "paraphrase-multilingual-mpnet-base-v2",
    model_kwargs={"torch_dtype": torch.float16}
)
llm = Llama.from_pretrained(
    "mykor/paraphrase-multilingual-mpnet-base-v2.gguf",
    filename="paraphrase-multilingual-mpnet-base-277M-v2-F16.gguf",
    embedding=True,
    verbose=False,
)

text = "움츠러든 어깨를 따라서 다시 저물어가는 오늘의 끝 밤이 조용히 나를 안으면 무너져가는 날 잊어버릴 수 있어"
embed1 = model.encode(text)
embed2 = llm.embed(text)
print(cosine(embed1, embed2))
0.0
Downloads last month
86
GGUF
Model size
277M params
Architecture
bert

8-bit

16-bit

Inference API
Unable to determine this model's library. Check the docs .