|
from transformers import AutoModel, AutoTokenizer
|
|
import torch
|
|
|
|
class EndpointHandler():
|
|
def __init__(self, path=""):
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
|
self.model = AutoModel.from_pretrained(path)
|
|
|
|
def __call__(self, data):
|
|
|
|
inputs = data['inputs']
|
|
|
|
|
|
prompt = "Contextual understanding of the following text, from the perspective of Chassidic philosophy: "
|
|
|
|
|
|
combined_input = prompt + inputs
|
|
|
|
|
|
encoded_input = self.tokenizer(combined_input, return_tensors='pt', padding=True, truncation=True, max_length=512)
|
|
|
|
|
|
with torch.no_grad():
|
|
outputs = self.model(**encoded_input)
|
|
|
|
|
|
embeddings = outputs.last_hidden_state.squeeze().tolist()
|
|
|
|
|
|
return {'embeddings': embeddings}
|
|
|