iCaption / app.py
lvelho's picture
Update app.py
4d542b9
raw
history blame contribute delete
959 Bytes
import os
import io
from PIL import Image
from transformers import pipeline
import gradio as gr
get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
def summarize(input):
output = get_completion(input)
return output[0]['generated_text']
def captioner(image):
result = get_completion(image)
return result[0]['generated_text']
gr.close_all()
christmas_dog = "dog_animal_greyhound_983023.jpg"
bird = "bird_exotic_bird_green.jpg"
cow = "cow_animal_cow_head.jpg"
demo = gr.Interface(fn=captioner,
inputs=[gr.Image(label="Upload image", type="pil", value=christmas_dog)],
outputs=[gr.Textbox(label="Caption")],
title="Image Captioning with BLIP",
description="Caption any image using the BLIP model",
allow_flagging="never",
examples=[christmas_dog, bird, cow])
demo.launch(share=True)