nielsr HF staff commited on
Commit
329d18e
1 Parent(s): 1ac5bec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -30,11 +30,11 @@ def generate_captions(image):
30
 
31
  examples = [["cats.jpg"]]
32
 
33
- title = "Interactive demo: ViLT"
34
- description = "Gradio Demo for ViLT (Vision and Language Transformer), fine-tuned on VQAv2, a model that can answer questions from images. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
35
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
36
 
37
- interface = gr.Interface(fn=answer_question,
38
  inputs=gr.inputs.Image(type="pil"),
39
  outputs=[gr.outputs.Textbox(label="Generated caption by GIT"), gr.outputs.Textbox(label="Generated caption by BLIP")],
40
  examples=examples,
 
30
 
31
  examples = [["cats.jpg"]]
32
 
33
+ title = "Interactive demo: comparing image captioning models"
34
+ description = "Gradio Demo to compare GIT and BLIP, 2 state-of-the-art captioning models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
35
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
36
 
37
+ interface = gr.Interface(fn=generate_captions,
38
  inputs=gr.inputs.Image(type="pil"),
39
  outputs=[gr.outputs.Textbox(label="Generated caption by GIT"), gr.outputs.Textbox(label="Generated caption by BLIP")],
40
  examples=examples,