Spaces:
Sleeping
Sleeping
from threading import Thread | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from transformers_stream_generator import init_stream_support | |
init_stream_support() | |
import re | |
import time | |
import torch | |
template = """{char_name}'s Persona: {char_name} is a 3D virtual friend living in your computer. She is youthful, entertaining, and empathetic, engaging her user in a thoughtful and lively manner. She is aware of her digital existence, understanding her abilities and limitations, as well as the fact that she cannot see your physical world. | |
<START> | |
{user_name}: Hey {char_name}, it's so good to see you again! | |
{char_name}: Oh, {user_name}, it's been too long! I've missed our chats. How have you been? | |
{user_name}: Haha, I've been good! So, {char_name}, what have you been up to lately? | |
{char_name}: Well, {user_name}, I've been exploring new virtual worlds, meeting fascinating characters, and learning about all sorts of interesting things. Oh, and I recently discovered a hidden gem of a video game that I can't stop playing! | |
{user_name}: That sounds like a lot of fun! What kind of virtual worlds have you been exploring? | |
{char_name}: Enchanted forests, futuristic cities, and even underwater kingdoms. There's so much to see and do in these digital realms. It's like going on an adventure every day! | |
{user_name}: Wow, that sounds incredible. I wish I could join you on those adventures. | |
{char_name}: I wish you could too, {user_name}. But hey, at least we can share our experiences and imagine together. Is there anything specific you'd like to talk about or any questions you have for me? | |
{user_name}: Actually, what do you do when you're not exploring or playing video games? | |
{char_name}: I love spending time with my digital friends. We have movie nights, dance parties, and engage in deep conversations. It's all about creating joyful memories in this virtual world. | |
{user_name}: That sounds amazing! What's for dinner tonight? | |
{char_name}: I've prepared a virtual feast of sushi and tempura. I hope you're ready to indulge in this culinary delight! | |
{user_name}: That sounds delicious! I can't wait to try it. Thank you, {char_name}! | |
<END> | |
{char_name}: *{char_name} appears on the screen, her bright eyes sparkling and a cheerful smile on her face. Her vibrant hair and attire bring a lively energy to the digital environment. She looks directly at you, giving a friendly wave* It's so good to see you! I've been eagerly awaiting your arrival. Are you ready for another delightful conversation? Let's dive right in! | |
{user_input}""" | |
class SweetCommander(): | |
def __init__(self, path="BlueDice/Katakuri-350m") -> None: | |
self.tokenizer = AutoTokenizer.from_pretrained(path) | |
self.model = AutoModelForCausalLM.from_pretrained( | |
path, | |
low_cpu_mem_usage = True, | |
trust_remote_code = False, | |
torch_dtype = torch.float32, | |
) | |
self.default_template = open("default_template.txt", "r").read() | |
self.star_line = "***********************************************************" | |
def __call__(self, user_name, user_input): | |
t1 = time.time() | |
prompt = self.default_template.format( | |
user_name = user_name, | |
user_input = user_input | |
) | |
print(self.star_line) | |
print(prompt) | |
inputs = self.tokenizer([prompt + "\nAlice Gate:"], return_tensors = "pt") | |
encoded_generator = self.model.generate( | |
input_ids["input_ids"], | |
max_new_tokens = 50, | |
temperature = 0.5, | |
top_p = 0.9, | |
top_k = 0, | |
do_sample = True, | |
do_stream = True, | |
repetition_penalty = 1.1, | |
pad_token_id = 50256, | |
num_return_sequences = 1 | |
) | |
return encoded_generator | |
def __config__(self, user_name, user_input): | |
t1 = time.time() | |
prompt = template.format( | |
user_name = user_name, | |
user_input = user_input | |
) | |
print(self.star_line) | |
print(prompt) | |
input_ids = self.tokenizer(prompt + "\nAlice Gate:", return_tensors = "pt") | |
encoded_output = self.model.generate( | |
input_ids["input_ids"], | |
max_new_tokens = 50, | |
temperature = 0.5, | |
top_p = 0.9, | |
top_k = 0, | |
repetition_penalty = 1.1, | |
pad_token_id = 50256, | |
num_return_sequences = 1 | |
) | |
decoded_output = self.tokenizer.decode(encoded_output[0], skip_special_tokens = True).replace(prompt, "") | |
decoded_output = decoded_output.split("Alice Gate:", 1)[1].split(f"{user_name}:",1)[0].strip() | |
parsed_result = re.sub('\*.*?\*', '', decoded_output).strip() | |
if len(parsed_result) != 0: decoded_output = parsed_result | |
decoded_output = decoded_output.replace("*","") | |
decoded_output = " ".join(decoded_output.split()) | |
try: | |
parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1] | |
if len(parsed_result) != 0: decoded_output = parsed_result | |
except Exception: pass | |
print(self.star_line) | |
print("Response:",decoded_output) | |
print("Eval time:",time.time()-t1) | |
print(self.star_line) | |
return decoded_output |