File size: 691 Bytes
b1f99b2
6193570
b9d4425
b1f99b2
 
 
b9d4425
b1f99b2
 
0e7d03f
b1f99b2
 
 
 
 
 
 
 
 
6c88315
b1f99b2
c9ea326
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from transformers import pipeline
import gradio as gr


get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")



def captioner(image):
    result = get_completion(image)
    return result[0]['generated_text']

gr.close_all()
demo = gr.Interface(fn=captioner,
                    inputs=[gr.Image(label="Upload image", type="pil")],
                    outputs=[gr.Textbox(label="Caption")],
                    title="Image Captioning with BLIP",
                    description="Caption any image using the BLIP model",
                    allow_flagging="never",
                    examples=["christmas_dog.jpeg", "cow.jpeg"])

demo.launch(inline=False)