Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,102 +1,81 @@
|
|
1 |
-
import argparse
|
2 |
-
import re
|
3 |
-
import os
|
4 |
import streamlit as st
|
5 |
-
import random
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
-
import
|
10 |
-
|
11 |
-
random.seed(None)
|
12 |
first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english:"""
|
13 |
-
|
14 |
-
@st.cache(
|
15 |
-
def
|
16 |
-
|
17 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
18 |
return model, tokenizer
|
19 |
-
def extend(input_text, num_return_sequences, bad_words, max_size=20, top_k=50, top_p=0.95):
|
20 |
-
if len(input_text) == 0:
|
21 |
-
input_text = ""
|
22 |
-
encoded_prompt = tokenizer.encode(
|
23 |
-
input_text, add_special_tokens=False, return_tensors="pt")
|
24 |
-
encoded_prompt = encoded_prompt.to(device)
|
25 |
-
if encoded_prompt.size()[-1] == 0:
|
26 |
-
input_ids = None
|
27 |
-
else:
|
28 |
-
input_ids = encoded_prompt
|
29 |
-
bad_words = bad_words.split()
|
30 |
-
print(bad_words)
|
31 |
-
bad_word_ids = []
|
32 |
-
for bad_word in bad_words:
|
33 |
-
bad_word = " " + bad_word
|
34 |
-
ids = tokenizer(bad_word).input_ids
|
35 |
-
bad_word_ids.append(ids)
|
36 |
-
|
37 |
-
output_sequences = model.generate(
|
38 |
-
input_ids=input_ids,
|
39 |
-
max_length=max_size + len(encoded_prompt[0]),
|
40 |
-
top_k=top_k,
|
41 |
-
bad_words_ids = bad_word_ids,
|
42 |
-
top_p=top_p,
|
43 |
-
do_sample=True,
|
44 |
-
num_return_sequences=num_return_sequences)
|
45 |
-
# Remove the batch dimension when returning multiple sequences
|
46 |
-
if len(output_sequences.shape) > 2:
|
47 |
-
output_sequences.squeeze_()
|
48 |
-
generated_sequences = []
|
49 |
-
print(output_sequences)
|
50 |
-
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
|
51 |
-
generated_sequence = generated_sequence.tolist()
|
52 |
-
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
|
53 |
-
print(text)
|
54 |
-
total_sequence = (
|
55 |
-
text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
|
56 |
-
)
|
57 |
-
generated_sequences.append(total_sequence)
|
58 |
-
st.write(total_sequence)
|
59 |
-
|
60 |
-
parsed_text = total_sequence.replace("<|startoftext|>", "").replace("\r","").replace("\n\n", "\n")
|
61 |
-
if len(parsed_text) == 0:
|
62 |
-
parsed_text = "שגיאה"
|
63 |
-
return parsed_text
|
64 |
-
if __name__ == "__main__":
|
65 |
-
st.title("GPT2 Demo:")
|
66 |
-
pre_model_path = "BigSalmon/MrLincoln5"
|
67 |
-
model, tokenizer = load_model(pre_model_path)
|
68 |
-
stop_token = "<|endoftext|>"
|
69 |
-
new_lines = "\n\n\n"
|
70 |
-
np.random.seed(None)
|
71 |
-
random_seed = np.random.randint(10000,size=1)
|
72 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
73 |
-
n_gpu = 0 if torch.cuda.is_available()==False else torch.cuda.device_count()
|
74 |
-
torch.manual_seed(random_seed)
|
75 |
-
if n_gpu > 0:
|
76 |
-
torch.cuda.manual_seed_all(random_seed)
|
77 |
-
model.to(device)
|
78 |
-
text_area = st.text_area("Enter the first few words (or leave blank), tap on \"Generate Text\" below. Tapping again will produce a different result.", first)
|
79 |
-
st.sidebar.subheader("Configurable parameters")
|
80 |
-
max_len = st.sidebar.slider("Max-Length", 0, 256, 5,help="The maximum length of the sequence to be generated.")
|
81 |
-
num_return_sequences = st.sidebar.slider("Outputs", 1, 50, 5,help="The number of outputs to be returned.")
|
82 |
-
top_k = st.sidebar.slider("Top-K", 0, 100, 40, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
|
83 |
-
top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.92, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
|
84 |
-
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import torch
|
4 |
+
|
|
|
5 |
first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english:"""
|
6 |
+
|
7 |
+
@st.cache(allow_output_mutation=True)
|
8 |
+
def get_model():
|
9 |
+
print("hello")
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
|
11 |
+
print("hello")
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/Points2")
|
13 |
+
print("hello")
|
14 |
return model, tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
model, tokenizer = get_model()
|
17 |
+
|
18 |
+
st.text('''How To Make Prompt:
|
19 |
+
informal english: sega’s doing mobile gaming right with this approach to exploiting its back catalog, giving people a way to build up their library of classics on devices they’ll always have close to hand for cheap. it’s a far better experience than nintendo’s classic machine sales on its home gaming systems, which tend to be locked in and not passed between hardware generations.
|
20 |
+
Translated into the Style of Abraham Lincoln: pursuing the proper course, sega's foray into mobile gaming entails leveraging its back catalog of classics on portable devices that ( are certain to be in reach whenever / will be at the ready for use at all times ) ( without spending a fortune / at minimal cost ). such an endeavor comes as a welcome alternative to nintendo's propensity to ( sell / offer / market ) home gaming systems that are rigidly bound to a single hardware generation.
|
21 |
+
Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly bent on / firmly set on / unyielding in its insistence on / steadfastly transfixed by / uncompromising in its commitment to / rigidly decided on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices.
|
22 |
+
informal english: garage band has made people who know nothing about music good at creating music.
|
23 |
+
Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
|
24 |
+
informal english: chrome extensions can make doing regular tasks much easier to get done.
|
25 |
+
Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
|
26 |
+
informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
|
27 |
+
Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will ( hasten their transition into the modern age / leap-frog them into the twenty-first century / facilitate their integration into contemporary life ).
|
28 |
+
informal english: national parks are a big part of the us culture.
|
29 |
+
Translated into the Style of Abraham Lincoln: the culture of the united states is ( inextricably ( bound up with / molded by / enriched by / enlivened by ) its ( serene / picturesque / pristine / breathtaking ) national parks ).
|
30 |
+
informal english: corn fields are all across illinois, visible once you leave chicago.
|
31 |
+
Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
|
32 |
+
informal english:''')
|
33 |
+
|
34 |
+
temp = st.sidebar.slider("Temperature", 0.7, 1.5)
|
35 |
+
number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
|
36 |
+
lengths = st.sidebar.slider("Length", 3, 10)
|
37 |
+
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
38 |
+
|
39 |
+
def run_generate(text, bad_words):
|
40 |
+
yo = []
|
41 |
+
input_ids = tokenizer.encode(text, return_tensors='pt').to(device)
|
42 |
+
res = len(tokenizer.encode(text))
|
43 |
+
bad_words = bad_words.split()
|
44 |
+
bad_word_ids = []
|
45 |
+
for bad_word in bad_words:
|
46 |
+
bad_word = " " + bad_word
|
47 |
+
ids = tokenizer(bad_word).input_ids
|
48 |
+
bad_word_ids.append(ids)
|
49 |
+
sample_outputs = model.generate(
|
50 |
+
input_ids,
|
51 |
+
do_sample=True,
|
52 |
+
max_length= res + lengths,
|
53 |
+
min_length = res + lengths,
|
54 |
+
top_k=50,
|
55 |
+
temperature=temp,
|
56 |
+
num_return_sequences=number_of_outputs,
|
57 |
+
bad_words_ids=bad_word_ids
|
58 |
+
)
|
59 |
+
for i in range(number_of_outputs):
|
60 |
+
e = tokenizer.decode(sample_outputs[i])
|
61 |
+
e = e.replace(text, "")
|
62 |
+
yo.append(e)
|
63 |
+
return yo
|
64 |
+
with st.form(key='my_form'):
|
65 |
+
text = st.text_area(label='Enter sentence', value=first)
|
66 |
+
submit_button = st.form_submit_button(label='Submit')
|
67 |
+
if submit_button:
|
68 |
+
translated_text = run_generate(text, bad_words)
|
69 |
+
st.write(translated_text if translated_text else "No translation found")
|
70 |
+
with torch.no_grad():
|
71 |
+
text2 = str(text)
|
72 |
+
print(text2)
|
73 |
+
text3 = tokenizer.encode(text2)
|
74 |
+
myinput, past_key_values = torch.tensor([text3]), None
|
75 |
+
myinput = myinput
|
76 |
+
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
77 |
+
logits = logits[0,-1]
|
78 |
+
probabilities = torch.nn.functional.softmax(logits)
|
79 |
+
best_logits, best_indices = logits.topk(100)
|
80 |
+
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
81 |
+
st.write(best_words)
|