metadata
license: cc-by-3.0
language:
- en
A model for mapping abstract sentence descriptions to sentences that fit the descriptions. Use load_finetuned_model
to load the query and sentence encoder, and encode_batch()
to encode a sentence with the model.
from transformers import AutoTokenizer, AutoModel
import torch
def load_finetuned_model():
def fix_module_prefix_in_state_dict(state_dict):
return {k.replace('module.', ''): v for k, v in state_dict.items()}
sentence_encoder = AutoModel.from_pretrained("sentence-transformers/all-mpnet-base-v2")
query_encoder = AutoModel.from_pretrained("sentence-transformers/all-mpnet-base-v2")
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-mpnet-base-v2")
sentence_encoder.load_state_dict(params_sent_encoder)
query_encoder.load_state_dict(params_query_encoder)
query_encoder.eval()
sentence_encoder.eval()
return tokenizer, query_encoder, sentence_encoder
def encode_batch(model, tokenizer, sentences, device):
input_ids = tokenizer(sentences, padding=True, max_length=512, truncation=True, return_tensors="pt",
add_special_tokens=True).to(device)
features = model(**input_ids)[0]
features = torch.sum(features[:,1:,:] * input_ids["attention_mask"][:,1:].unsqueeze(-1), dim=1) / torch.clamp(torch.sum(input_ids["attention_mask"][:,1:], dim=1, keepdims=True), min=1e-9)
return features