swcrazyfan commited on
Commit
87b9baf
·
1 Parent(s): f5eb200

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -27
app.py CHANGED
@@ -1,32 +1,26 @@
1
- import torch
2
- from transformers import (T5ForConditionalGeneration,T5Tokenizer)
3
  import gradio as gr
 
 
4
 
5
- best_model_path = "swcrazyfan/KingJamesify-T5-large"
6
- model = T5ForConditionalGeneration.from_pretrained(best_model_path)
7
- tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/KingJamesify-T5-large")
8
-
9
- def tokenize_data(text):
10
- # Tokenize the review body
11
- input_ = "kingify: " + str(text) + ' </s>'
12
- max_len = 512
13
- # tokenize inputs
14
- tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
15
 
16
- inputs={"input_ids": tokenized_inputs['input_ids'],
17
- "attention_mask": tokenized_inputs['attention_mask']}
18
- return inputs
 
 
19
 
20
- def generate_answers(text, temperature, num_beams, max_length):
21
- inputs = tokenize_data(text)
22
- results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
23
- num_beams=num_beams,
24
- max_length=max_length,
25
- min_length=1,
26
- early_stopping=True,
27
- num_return_sequences=1,
28
- temperature=temperature)
29
- answer = tokenizer.decode(results[0], skip_special_tokens=True)
30
- return answer
31
 
32
- iface = gr.Interface(title="Kingify", description="Write anything below. Then, click submit to 'Kingify' it.", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10), gr.inputs.Slider(label="Temperature", default=0.7, minimum=0.0, maximum=1.0, step=0.01), gr.inputs.Slider(label="Number of Beams", default=5, minimum=1, maximum=10, step=1), gr.inputs.Textbox(label="Max Length", default=512, lines=1)], outputs=["text"])
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
4
 
5
+ model = T5ForConditionalGeneration.from_pretrained('swcrazyfan/KingJamesify-T5-large')
6
+ tokenizer = T5Tokenizer.from_pretrained('swcrazyfan/KingJamesify-T5-large')
 
 
 
 
 
 
 
 
7
 
8
+ def king_jamesify(input_text):
9
+ input_ids = tokenizer.encode(input_text, return_tensors='pt').to(torch.int64)
10
+ generated_ids = model.generate(input_ids=input_ids, max_length=100, num_beams=4, repetition_penalty=2.5, length_penalty=1.0, temperature=1.0, top_k=50, top_p=1.0, no_repeat_ngram_size=3)
11
+ result = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
12
+ return result
13
 
14
+ iface = gr.Interface(king_jamesify,
15
+ [gr.inputs.Textbox(lines=20, label="Enter text to be King Jamesified"),
16
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.7, label="Temperature"),
17
+ gr.inputs.Slider(minimum=1, maximum=100, default=100, label="Max Length"),
18
+ gr.inputs.Slider(minimum=1, maximum=10, default=4, label="Number of Beams"),
19
+ gr.inputs.Slider(minimum=0.0, maximum=10.0, default=2.5, label="Repetition Penalty"),
20
+ gr.inputs.Slider(minimum=0.0, maximum=10.0, default=1.0, label="Length Penalty"),
21
+ gr.inputs.Slider(minimum=1, maximum=100, default=50, label="Top K"),
22
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=1.0, label="Top P"),
23
+ gr.inputs.Slider(minimum=1, maximum=10, default=3, label="No Repeat Ngram Size")],
24
+ gr.outputs.Textbox(label="King Jamesified Text"))
25
 
26
+ iface.launch()