PKaushik commited on
Commit
db3a731
·
verified ·
1 Parent(s): 03287b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from transformers import ViltProcessor, ViltForQuestionAnswering
3
  import torch
4
 
5
- torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
6
 
7
  processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
8
  model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
@@ -23,7 +23,7 @@ def answer_question(image, text):
23
  image = gr.inputs.Image(type="pil")
24
  question = gr.inputs.Textbox(label="Question")
25
  answer = gr.outputs.Textbox(label="Predicted answer")
26
- examples = [["cats.jpg", "How many cats are there?"]]
27
 
28
  title = "Interactive demo: ViLT"
29
  description = "Gradio Demo for ViLT (Vision and Language Transformer), fine-tuned on VQAv2, a model that can answer questions from images. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
 
2
  from transformers import ViltProcessor, ViltForQuestionAnswering
3
  import torch
4
 
5
+ torch.hub.download_url_to_file('http://images.cocodataset.org/test-stuff2017/000000027054.jpg', 'zoo.jpg')
6
 
7
  processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
8
  model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
 
23
  image = gr.inputs.Image(type="pil")
24
  question = gr.inputs.Textbox(label="Question")
25
  answer = gr.outputs.Textbox(label="Predicted answer")
26
+ examples = [["cats.jpg", "How many girafee are there?"]]
27
 
28
  title = "Interactive demo: ViLT"
29
  description = "Gradio Demo for ViLT (Vision and Language Transformer), fine-tuned on VQAv2, a model that can answer questions from images. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."