|
from transformers import AutoTokenizer |
|
import re |
|
import torch |
|
|
|
def model_fn(model_dir): |
|
tokenizer = AutoTokenizer.from_pretrained(model_dir) |
|
model = torch.load(f"{model_dir}/torch_model.pt") |
|
return model, tokenizer |
|
|
|
def predict_fn(data, load_list): |
|
model, tokenizer = load_list |
|
request_inputs = data.pop("inputs", data) |
|
template = request_inputs["template"] |
|
messages = request_inputs["messages"] |
|
char_name = request_inputs["char_name"] |
|
user_name = request_inputs["user_name"] |
|
template = open(f"{template}.txt", "r").read() |
|
user_input = "\n".join([ |
|
"{name}: {message}".format( |
|
name = char_name if (id["role"] == "AI") else user_name, |
|
message = id["message"].strip() |
|
) for id in messages |
|
]) |
|
prompt = template.format(char_name = char_name, user_name = user_name, user_input = user_input) |
|
input_ids = tokenizer(prompt + f"\n{char_name}:", return_tensors = "pt").to("cuda") |
|
encoded_output = model.generate( |
|
input_ids["input_ids"], |
|
max_new_tokens = 50, |
|
temperature = 0.5, |
|
top_p = 0.9, |
|
top_k = 0, |
|
repetition_penalty = 1.1, |
|
pad_token_id = 50256, |
|
num_return_sequences = 1 |
|
) |
|
decoded_output = tokenizer.decode(encoded_output[0], skip_special_tokens=True).replace(prompt,"") |
|
decoded_output = decoded_output.split(f"{char_name}:", 1)[1].split(f"{user_name}:",1)[0].strip() |
|
parsed_result = re.sub('\*.*?\*', '', decoded_output).strip() |
|
if len(parsed_result) != 0: decoded_output = parsed_result |
|
decoded_output = " ".join(decoded_output.replace("*","").split()) |
|
try: |
|
parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1] |
|
if len(parsed_result) != 0: decoded_output = parsed_result |
|
except Exception: pass |
|
return { |
|
"role": "AI", |
|
"message": decoded_output |
|
} |