nlp_proj / pages /GPT.py
IvaElen's picture
Update pages/GPT.py
89033e0
import streamlit as st
import torch
import numpy as np
import transformers
import random
import textwrap
@st.cache_data
def load_model():
model_finetuned = transformers.AutoModelWithLMHead.from_pretrained(
'tinkoff-ai/ruDialoGPT-small',
output_attentions = False,
output_hidden_states = False
)
model_finetuned.load_state_dict(torch.load('GPT_sonnik_only.pt', map_location=torch.device('cpu')))
tokenizer = transformers.AutoTokenizer.from_pretrained('tinkoff-ai/ruDialoGPT-small')
return model_finetuned, tokenizer
def preprocess_text(text_input, tokenizer):
prompt = tokenizer.encode(text_input, return_tensors='pt')
return prompt
def predict_sentiment(model, prompt, temp, num_generate):
print('1')
with torch.inference_mode():
print('2')
result = model.generate(
input_ids=prompt,
max_length=100,
num_beams=5,
do_sample=True,
temperature=float(temp),
top_k=50,
top_p=0.6,
no_repeat_ngram_size=3,
num_return_sequences=num_generate,
).cpu().numpy()
print(result)
return result
st.title('Text generation with dreambook')
model, tokenizer = load_model()
text_input = st.text_input("Enter some text about movie")
max_len = st.slider('Length of sequence', 0, 100, 50)
temp = st.slider('Temperature', 1, 30, 1)
num_generate = st.text_input("Enter number of sequences")
if st.button('Generate'):
print('uirhf')
prompt = preprocess_text(text_input, tokenizer)
print('uirhf')
result = predict_sentiment(model, prompt, temp, int(num_generate))
print('uirhf')
for i in result:
st.write(textwrap.fill(tokenizer.decode(i), max_len))