Update pages/GPT.py
Browse files- pages/GPT.py +8 -3
pages/GPT.py
CHANGED
@@ -5,7 +5,7 @@ import transformers
|
|
5 |
import random
|
6 |
import textwrap
|
7 |
|
8 |
-
@st.cache
|
9 |
def load_model():
|
10 |
model_finetuned = transformers.AutoModelWithLMHead.from_pretrained(
|
11 |
'tinkoff-ai/ruDialoGPT-small',
|
@@ -21,7 +21,9 @@ def preprocess_text(text_input, tokenizer):
|
|
21 |
return prompt
|
22 |
|
23 |
def predict_sentiment(model, prompt, temp, num_generate):
|
|
|
24 |
with torch.inference_mode():
|
|
|
25 |
result = model.generate(
|
26 |
input_ids=prompt,
|
27 |
max_length=100,
|
@@ -34,19 +36,22 @@ def predict_sentiment(model, prompt, temp, num_generate):
|
|
34 |
num_return_sequences=num_generate,
|
35 |
).cpu().numpy()
|
36 |
print(result)
|
37 |
-
|
38 |
|
39 |
st.title('Text generation with dreambook')
|
40 |
|
41 |
model, tokenizer = load_model()
|
42 |
|
43 |
text_input = st.text_input("Enter some text about movie")
|
44 |
-
max_len = st.slider('Length of sequence', 0,
|
45 |
temp = st.slider('Temperature', 1, 30, 1)
|
46 |
num_generate = st.text_input("Enter number of sequences")
|
47 |
|
48 |
if st.button('Generate'):
|
|
|
49 |
prompt = preprocess_text(text_input, tokenizer)
|
|
|
50 |
result = predict_sentiment(model, prompt, temp, int(num_generate))
|
|
|
51 |
for i in result:
|
52 |
st.write(textwrap.fill(tokenizer.decode(i), max_len))
|
|
|
5 |
import random
|
6 |
import textwrap
|
7 |
|
8 |
+
# @st.cache
|
9 |
def load_model():
|
10 |
model_finetuned = transformers.AutoModelWithLMHead.from_pretrained(
|
11 |
'tinkoff-ai/ruDialoGPT-small',
|
|
|
21 |
return prompt
|
22 |
|
23 |
def predict_sentiment(model, prompt, temp, num_generate):
|
24 |
+
print('1')
|
25 |
with torch.inference_mode():
|
26 |
+
print('2')
|
27 |
result = model.generate(
|
28 |
input_ids=prompt,
|
29 |
max_length=100,
|
|
|
36 |
num_return_sequences=num_generate,
|
37 |
).cpu().numpy()
|
38 |
print(result)
|
39 |
+
return result
|
40 |
|
41 |
st.title('Text generation with dreambook')
|
42 |
|
43 |
model, tokenizer = load_model()
|
44 |
|
45 |
text_input = st.text_input("Enter some text about movie")
|
46 |
+
max_len = st.slider('Length of sequence', 0, 100, 50)
|
47 |
temp = st.slider('Temperature', 1, 30, 1)
|
48 |
num_generate = st.text_input("Enter number of sequences")
|
49 |
|
50 |
if st.button('Generate'):
|
51 |
+
print('uirhf')
|
52 |
prompt = preprocess_text(text_input, tokenizer)
|
53 |
+
print('uirhf')
|
54 |
result = predict_sentiment(model, prompt, temp, int(num_generate))
|
55 |
+
print('uirhf')
|
56 |
for i in result:
|
57 |
st.write(textwrap.fill(tokenizer.decode(i), max_len))
|