shivangibithel RamAnanth1 commited on
Commit
4eab53c
·
0 Parent(s):

Duplicate from RamAnanth1/InstructBLIP

Browse files

Co-authored-by: Ram Ananth <[email protected]>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +138 -0
  4. banff.jpg +0 -0
  5. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: InstructBLIP
3
+ emoji: 📊
4
+ colorFrom: indigo
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: RamAnanth1/InstructBLIP
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from lavis.models import load_model_and_preprocess
3
+ import torch
4
+
5
+ device = torch.device("cuda") if torch.cuda.is_available() else "cpu"
6
+
7
+ model_name = "blip2_t5_instruct"
8
+ model_type = "flant5xl"
9
+ model, vis_processors, _ = load_model_and_preprocess(
10
+ name=model_name,
11
+ model_type=model_type,
12
+ is_eval=True,
13
+ device=device
14
+ )
15
+
16
+ def infer(image, prompt, min_len, max_len, beam_size, len_penalty, repetition_penalty, top_p, decoding_method):
17
+ use_nucleus_sampling = decoding_method == "Nucleus sampling"
18
+ image = vis_processors["eval"](image).unsqueeze(0).to(device)
19
+
20
+ samples = {
21
+ "image": image,
22
+ "prompt": prompt,
23
+ }
24
+
25
+ output = model.generate(
26
+ samples,
27
+ length_penalty=float(len_penalty),
28
+ repetition_penalty=float(repetition_penalty),
29
+ num_beams=beam_size,
30
+ max_length=max_len,
31
+ min_length=min_len,
32
+ top_p=top_p,
33
+ use_nucleus_sampling=use_nucleus_sampling
34
+ )
35
+
36
+ return output[0]
37
+
38
+ theme = gr.themes.Monochrome(
39
+ primary_hue="indigo",
40
+ secondary_hue="blue",
41
+ neutral_hue="slate",
42
+ radius_size=gr.themes.sizes.radius_sm,
43
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
44
+ )
45
+ css = ".generating {visibility: hidden}"
46
+
47
+ examples = [
48
+ ["banff.jpg", "Can you tell me about this image in detail", 1, 200, 5, 1, 3, 0.9, "Beam search"]
49
+ ]
50
+ with gr.Blocks(theme=theme, analytics_enabled=False,css=css) as demo:
51
+ gr.Markdown("## InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning")
52
+ gr.Markdown(
53
+ """
54
+ Unofficial demo for InstructBLIP. InstructBLIP is a new vision-language instruction-tuning framework by Salesforce that uses BLIP-2 models, achieving state-of-the-art zero-shot generalization performance on a wide range of vision-language tasks.
55
+ The demo is based on the official <a href="https://github.com/salesforce/LAVIS/tree/main/projects/instructblip" style="text-decoration: underline;" target="_blank"> Github </a> implementation
56
+ """
57
+ )
58
+ gr.HTML("<p>You can duplicate this Space to run it privately without a queue for shorter queue times : <a style='display:inline-block' href='https://huggingface.co/spaces/RamAnanth1/InstructBLIP?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a> </p>")
59
+
60
+ with gr.Row():
61
+ with gr.Column(scale=3):
62
+ image_input = gr.Image(type="pil")
63
+ prompt_textbox = gr.Textbox(label="Prompt:", placeholder="prompt", lines=2)
64
+ output = gr.Textbox(label="Output")
65
+ submit = gr.Button("Run", variant="primary")
66
+
67
+ with gr.Column(scale=1):
68
+ min_len = gr.Slider(
69
+ minimum=1,
70
+ maximum=50,
71
+ value=1,
72
+ step=1,
73
+ interactive=True,
74
+ label="Min Length",
75
+ )
76
+
77
+ max_len = gr.Slider(
78
+ minimum=10,
79
+ maximum=500,
80
+ value=250,
81
+ step=5,
82
+ interactive=True,
83
+ label="Max Length",
84
+ )
85
+
86
+ sampling = gr.Radio(
87
+ choices=["Beam search", "Nucleus sampling"],
88
+ value="Beam search",
89
+ label="Text Decoding Method",
90
+ interactive=True,
91
+ )
92
+
93
+ top_p = gr.Slider(
94
+ minimum=0.5,
95
+ maximum=1.0,
96
+ value=0.9,
97
+ step=0.1,
98
+ interactive=True,
99
+ label="Top p",
100
+ )
101
+
102
+ beam_size = gr.Slider(
103
+ minimum=1,
104
+ maximum=10,
105
+ value=5,
106
+ step=1,
107
+ interactive=True,
108
+ label="Beam Size",
109
+ )
110
+
111
+ len_penalty = gr.Slider(
112
+ minimum=-1,
113
+ maximum=2,
114
+ value=1,
115
+ step=0.2,
116
+ interactive=True,
117
+ label="Length Penalty",
118
+ )
119
+
120
+ repetition_penalty = gr.Slider(
121
+ minimum=-1,
122
+ maximum=3,
123
+ value=1,
124
+ step=0.2,
125
+ interactive=True,
126
+ label="Repetition Penalty",
127
+ )
128
+ gr.Examples(
129
+ examples=examples,
130
+ inputs=[image_input, prompt_textbox, min_len, max_len, beam_size, len_penalty, repetition_penalty, top_p, sampling],
131
+ cache_examples=False,
132
+ fn=infer,
133
+ outputs=[output],
134
+ )
135
+
136
+ submit.click(infer, inputs=[image_input, prompt_textbox, min_len, max_len, beam_size, len_penalty, repetition_penalty, top_p, sampling], outputs=[output])
137
+
138
+ demo.queue(concurrency_count=16).launch(debug=True)
banff.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ accelerate
2
+ git+https://github.com/RamAnanth/lavis.git
3
+ torch