swcrazyfan commited on
Commit
d5e6e07
1 Parent(s): b2a8d79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -9,7 +9,7 @@ tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Dekingify-T5-Large")
9
  def tokenize_data(text):
10
  # Tokenize the review body
11
  # input_ = "paraphrase: "+ str(text) + ' >'
12
- input_ = "deking: " + str(text) + ' </s>'
13
  max_len = 512
14
  # tokenize inputs
15
  tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
@@ -18,20 +18,16 @@ def tokenize_data(text):
18
  "attention_mask": tokenized_inputs['attention_mask']}
19
  return inputs
20
 
21
- def generate_answers(text):
22
  inputs = tokenize_data(text)
23
  results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
24
- num_beams=5,
25
- max_length=512,
26
- min_length=1,
27
  early_stopping=True,
28
  num_return_sequences=1)
29
  answer = tokenizer.decode(results[0], skip_special_tokens=True)
30
  return answer
31
 
32
- #iface = gr.Interface(fn=generate_answers, inputs=["Write your text here..."], outputs=["Jamesified text"])
33
- #iface.launch(inline=False, share=True)
34
-
35
- iface = gr.Interface(title="DeKingify", description="Write any English text from the 17th-century. Then, click submit to 'Dekingify' it (try to rephrase it in modern, English language).", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10)], outputs=["text"])
36
- #iface = gr.Interface(title="King Jamesify” fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original",lines=30)],outputs=[gr.outputs.Textbox(label="King Jamesified", lines=30)])
37
  iface.launch(inline=False)
 
9
  def tokenize_data(text):
10
  # Tokenize the review body
11
  # input_ = "paraphrase: "+ str(text) + ' >'
12
+ input_ = "kingify: " + str(text) + ' </s>'
13
  max_len = 512
14
  # tokenize inputs
15
  tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
 
18
  "attention_mask": tokenized_inputs['attention_mask']}
19
  return inputs
20
 
21
+ def generate_answers(text, max_length, min_length, num_beams):
22
  inputs = tokenize_data(text)
23
  results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
24
+ num_beams=num_beams,
25
+ max_length=max_length,
26
+ min_length=min_length,
27
  early_stopping=True,
28
  num_return_sequences=1)
29
  answer = tokenizer.decode(results[0], skip_special_tokens=True)
30
  return answer
31
 
32
+ iface = gr.Interface(title="Kingify", description="Write anything below. Then, click submit to 'Kingify' it.", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10), gr.inputs.Slider(label="Maximum Length", minimum=1, maximum=512, default=512, step=1), gr.inputs.Slider(label="Minimum Length", minimum=1, maximum=512, default=1, step=1), gr.inputs.Slider(label="Number of Beams", minimum=1, maximum=10, default=5, step=1)], outputs=["text"])
 
 
 
 
33
  iface.launch(inline=False)