Spaces:
Runtime error
Runtime error
Lander San Millan
commited on
Commit
·
1114f49
1
Parent(s):
5f70455
feat: examples added
Browse files- app.py +29 -9
- examples/athl.jpeg +0 -0
- examples/giraffe.jpeg +0 -0
- examples/koala.png +0 -0
- examples/lasalve.jpeg +0 -0
- examples/muniain.jpeg +0 -0
- examples/townhall.jpg +0 -0
app.py
CHANGED
@@ -8,19 +8,38 @@ from flamingo_mini_task import FlamingoModel, FlamingoProcessor
|
|
8 |
from datasets import load_dataset,concatenate_datasets
|
9 |
from PIL import Image
|
10 |
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
flamingo_megatiny_captioning_models = {
|
13 |
-
|
14 |
'model': FlamingoModel.from_pretrained('TheMrguiller/Flamingo-tiny_ScienceQA_COT-QA'),
|
15 |
},
|
16 |
-
|
17 |
'model': FlamingoModel.from_pretrained('TheMrguiller/Flamingo-mini-Bilbao_Captions-task_BilbaoQA-ScienceQA'),
|
18 |
},
|
19 |
-
|
20 |
'model': FlamingoModel.from_pretrained('landersanmi/flamingo-megatiny-opt-QA')
|
21 |
},
|
22 |
}
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
def generate_text(image, question, option_a, option_b, option_c, option_d, cot_checkbox, model_name):
|
26 |
model = flamingo_megatiny_captioning_models[model_name]['model']
|
@@ -44,17 +63,17 @@ def generate_text(image, question, option_a, option_b, option_c, option_d, cot_c
|
|
44 |
prompt = prompt,
|
45 |
)
|
46 |
|
47 |
-
return prediction[0]
|
48 |
|
49 |
|
50 |
|
51 |
|
52 |
-
image_input = gr.Image(
|
53 |
-
question_input = gr.inputs.Textbox(default="
|
54 |
opt_a_input = gr.inputs.Textbox(default="Dog")
|
55 |
-
opt_b_input = gr.inputs.Textbox(default="
|
56 |
-
opt_c_input = gr.inputs.Textbox(default="
|
57 |
-
opt_d_input = gr.inputs.Textbox(default="
|
58 |
cot_checkbox = gr.inputs.Checkbox(label="Generate COT")
|
59 |
select_model = gr.inputs.Dropdown(choices=list(flamingo_megatiny_captioning_models.keys()))
|
60 |
|
@@ -72,6 +91,7 @@ gr.Interface(
|
|
72 |
cot_checkbox,
|
73 |
select_model
|
74 |
],
|
|
|
75 |
outputs=text_output,
|
76 |
title='Generate answers from MCQ',
|
77 |
description='Generate answers from Multiple Choice Questions or generate a Chain Of Though about the question and the options given',
|
|
|
8 |
from datasets import load_dataset,concatenate_datasets
|
9 |
from PIL import Image
|
10 |
|
11 |
+
EXAMPLES_DIR = 'examples'
|
12 |
+
DEFAULT_PROMPT = "<image>"
|
13 |
+
MINI_MODEL = "flamingo-mini-bilbaocaptions-scienceQA[QA]"
|
14 |
+
TINY_MODEL = "flamingo-tiny-scienceQA[COT+QA]"
|
15 |
+
MEGATINY_MODEL = "flamingo-megatiny-opt-scienceQA[QA]"
|
16 |
|
17 |
flamingo_megatiny_captioning_models = {
|
18 |
+
MINI_MODEL: {
|
19 |
'model': FlamingoModel.from_pretrained('TheMrguiller/Flamingo-tiny_ScienceQA_COT-QA'),
|
20 |
},
|
21 |
+
TINY_MODEL: {
|
22 |
'model': FlamingoModel.from_pretrained('TheMrguiller/Flamingo-mini-Bilbao_Captions-task_BilbaoQA-ScienceQA'),
|
23 |
},
|
24 |
+
MEGATINY_MODEL:{
|
25 |
'model': FlamingoModel.from_pretrained('landersanmi/flamingo-megatiny-opt-QA')
|
26 |
},
|
27 |
}
|
28 |
|
29 |
+
# setup some example images
|
30 |
+
examples = []
|
31 |
+
path = EXAMPLES_DIR + "/{}"
|
32 |
+
cot = False
|
33 |
+
|
34 |
+
examples.append([path.format("koala.png"), "What animal is this?", "Koala", "Elephant", "Cat", "Mouse", cot, MEGATINY_MODEL])
|
35 |
+
examples.append([path.format("townhall.jpg"), "What building is this?", "Guggenheim museum", "San mames stadium", "Alhondiga", "Bilbao townhall", cot, TINY_MODEL])
|
36 |
+
examples.append([path.format("muniain.jpeg"), "What team is IKer Muniain associated?", "Real Madrid", "Manchester United", "Athletic Bilbao", "Rayo Vallecano", cot, TINY_MODEL])
|
37 |
+
examples.append([path.format("lasalve.jpeg"), "What is the name of this bridge?", "La Salve", "Zubizuri", "La Ribera", "San Anton", cot, TINY_MODEL])
|
38 |
+
examples.append([path.format("athl.jpeg"), "Football fans hold flags with what team colors?", "Athletic", "Besiktas", "Udinese", "Real Madrid", cot, TINY_MODEL])
|
39 |
+
|
40 |
+
#examples.append([path, cot, DEFAULT_PROMPT, DEFAULT_MODEL])
|
41 |
+
#examples.append([path, cot, DEFAULT_PROMPT, DEFAULT_MODEL])
|
42 |
+
|
43 |
|
44 |
def generate_text(image, question, option_a, option_b, option_c, option_d, cot_checkbox, model_name):
|
45 |
model = flamingo_megatiny_captioning_models[model_name]['model']
|
|
|
63 |
prompt = prompt,
|
64 |
)
|
65 |
|
66 |
+
return prediction[0].split('[ANSWER]')[1]
|
67 |
|
68 |
|
69 |
|
70 |
|
71 |
+
image_input = gr.Image(path.format("giraffe.jpeg"))
|
72 |
+
question_input = gr.inputs.Textbox(default="What animal is this?")
|
73 |
opt_a_input = gr.inputs.Textbox(default="Dog")
|
74 |
+
opt_b_input = gr.inputs.Textbox(default="Giraffe")
|
75 |
+
opt_c_input = gr.inputs.Textbox(default="Elephant")
|
76 |
+
opt_d_input = gr.inputs.Textbox(default="Cocodrile")
|
77 |
cot_checkbox = gr.inputs.Checkbox(label="Generate COT")
|
78 |
select_model = gr.inputs.Dropdown(choices=list(flamingo_megatiny_captioning_models.keys()))
|
79 |
|
|
|
91 |
cot_checkbox,
|
92 |
select_model
|
93 |
],
|
94 |
+
examples=examples,
|
95 |
outputs=text_output,
|
96 |
title='Generate answers from MCQ',
|
97 |
description='Generate answers from Multiple Choice Questions or generate a Chain Of Though about the question and the options given',
|
examples/athl.jpeg
ADDED
![]() |
examples/giraffe.jpeg
ADDED
![]() |
examples/koala.png
ADDED
![]() |
examples/lasalve.jpeg
ADDED
![]() |
examples/muniain.jpeg
ADDED
![]() |
examples/townhall.jpg
ADDED
![]() |