qa-Gradio / app.py
minjibi's picture
Update app.py
1307787
raw
history blame
1.4 kB
import gradio as gr
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from transformers import (
MT5ForConditionalGeneration,
MT5TokenizerFast,
)
model = MT5ForConditionalGeneration.from_pretrained(
"minjibi/qa",
return_dict=True,
)
tokenizer = MT5TokenizerFast.from_pretrained(
"minjibi/qa"
)
def predict(text):
input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
generated_ids = model.generate(
input_ids=input_ids,
num_beams=5,
max_length=1000,
repetition_penalty=3.0, #default = 2.5
length_penalty=1.0,
early_stopping=True,
top_p=50, #default 50
top_k=20, #default 20
num_return_sequences=3,
)
preds = [
tokenizer.decode(
g,
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
for g in generated_ids
]
output = ['Question: '+ text.replace('A', 'Answer') for text in preds]
#final_str = '\n'.join([f"{i+1}. {s}" for i, s in enumerate(output)])
final_str = '\n'.join([f"{i+1}. Question: {s.split('Answer')[0].strip()}\n Answer{s.split('Answer')[1].strip()}" for i, s in enumerate(output)])
return final_str
iface = gr.Interface(fn=predict, inputs="text", outputs="text")
iface.launch()