Atom007 commited on
Commit
6de9491
1 Parent(s): 6313717

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+
4
+ from transformers import BlipForConditionalGeneration, BlipProcessor
5
+
6
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
7
+
8
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
9
+ model_image_captioning = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
10
+
11
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+
13
+ def inference(raw_image, question, decoding_strategy):
14
+ inputs = processor(images=raw_image, text=question, return_tensors="pt")
15
+
16
+ if decoding_strategy == "Beam search":
17
+ inputs["max_length"] = 20
18
+ inputs["num_beams"] = 5
19
+ elif decoding_strategy == "Nucleus sampling":
20
+ inputs["max_length"] = 20
21
+ inputs["num_beams"] = 1
22
+ inputs["do_sample"] = True
23
+ inputs["top_k"] = 50
24
+ inputs["top_p"] = 0.95
25
+ elif decoding_strategy == "Contrastive search":
26
+ inputs["penalty_alpha"] = 0.6
27
+ inputs["top_k"] = 4
28
+ inputs["max_length"] = 512
29
+
30
+ out = model_image_captioning.generate(**inputs)
31
+ return processor.batch_decode(out, skip_special_tokens=True)[0]
32
+
33
+ inputs = [
34
+ gr.inputs.Image(type='pil'),
35
+ gr.inputs.Textbox(lines=2, label="Context (optional)"),
36
+ gr.inputs.Radio(choices=["Beam search","Nucleus sampling", "Contrastive search"], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")
37
+ ]
38
+ outputs = gr.outputs.Textbox(label="Output")
39
+
40
+ title = "BLIP"
41
+
42
+ description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
43
+
44
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
45
+
46
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article).launch(enable_queue=True)