Spaces:
Sleeping
Sleeping
import streamlit as st | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer | |
class StreamlitStreamer(TextStreamer): | |
def on_finalized_text(self, text: str, stream_end: bool = False): | |
st.session_state['new_mail'] += text | |
new_mail.write(st.session_state['new_mail']) | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
def load_model(): | |
return AutoModelForCausalLM.from_pretrained( | |
"tomaszki/mail_fixer", | |
).to(device) | |
def load_tokenizer(): | |
return AutoTokenizer.from_pretrained("facebook/opt-125m") | |
model = load_model() | |
tokenizer = load_tokenizer() | |
st.title('Mail fixer') | |
placeholder = '''Hi Emma, | |
How did you like our art project? I thought it was pretty cool too! But one thing I noticed was that sometimes the letters were hard to see because they were a little bit too small. Let's try making them bigger or changing their shape so everyone can read them better. | |
Thanks for helping us work on the project! I have lots of other ideas for things we can do together. Can't wait to get started! | |
Love, James''' | |
mail = st.text_area('Enter your mail here', placeholder=placeholder, height=300) | |
new_mail = st.text('') | |
if mail: | |
st.session_state['new_mail'] = '' | |
streamer = StreamlitStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) | |
prompt = f'Original email:\n{mail}\nFixed email:\n' | |
tokenized = tokenizer(prompt, return_tensors='pt').to(device) | |
output = model.generate(**tokenized, max_new_tokens=1024, streamer=streamer) |