pierreguillou commited on
Commit
1f7f0cb
·
1 Parent(s): ee3e0d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -9
app.py CHANGED
@@ -1,4 +1,16 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  title = "QA App | T5 base finetuned on SQuAD 1.1 in Portuguese"
4
  description = "Forneça seu próprio parágrafo e faça perguntas sobre o texto. Quão bem o modelo responde? (este aplicativo usa o modelo https://huggingface.co/pierreguillou/t5-base-qa-squad-v1.1-portuguese)"
@@ -8,12 +20,29 @@ context = "A pandemia de COVID-19, também conhecida como pandemia de coronavír
8
 
9
  question = "Quando começou a pandemia de Covid-19 no mundo?"
10
 
11
- gr.Interface.load(
12
- "pierreguillou/t5-base-qa-squad-v1.1-portuguese",
13
- css=".footer {display:none !important}",
14
- inputs=[gr.inputs.Textbox(lines=7, default=context, label="Context paragraph"), gr.inputs.Textbox(lines=2, default=question, label="Question")],
15
- outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")],
16
- title=title,
17
- description=description,
18
- article=article,
19
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import transformers
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+
5
+ # model & tokenizer
6
+ model_name = "t5-base-qa-squad-v1.1-portuguese"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
+
10
+ # parameters
11
+ max_target_length=32
12
+ num_beams=1
13
+ early_stopping=True
14
 
15
  title = "QA App | T5 base finetuned on SQuAD 1.1 in Portuguese"
16
  description = "Forneça seu próprio parágrafo e faça perguntas sobre o texto. Quão bem o modelo responde? (este aplicativo usa o modelo https://huggingface.co/pierreguillou/t5-base-qa-squad-v1.1-portuguese)"
 
20
 
21
  question = "Quando começou a pandemia de Covid-19 no mundo?"
22
 
23
+ def qa(question, context):
24
+ input_text = "question: " + question + "context: " + context
25
+ inputs = tokenizer(input_text, return_tensors="pt")
26
+
27
+ outputs = model.generate(inputs["input_ids"],
28
+ max_length=max_target_length,
29
+ num_beams=num_beams,
30
+ early_stopping=early_stopping
31
+ )
32
+ pred = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
33
+ return pred
34
+
35
+ # interface gradio
36
+ iface = gr.Interface(
37
+ title=title,
38
+ description=description,
39
+ article=article,
40
+ allow_screenshot=allow_screenshot,
41
+ allow_flagging=allow_flagging,
42
+ fn=qa,
43
+ inputs=[gr.inputs.Textbox(placeholder="Digite uma questao aqui:", lines=5),gr.inputs.Textbox(placeholder="Digite um contexto aqui:", lines=5)],
44
+ outputs=[gr.outputs.HTML(label="QA com T5 base")],
45
+ examples=examples
46
+ )
47
+
48
+ iface.launch()