from transformers import pipeline import gradio as gr get_completion = pipeline("image-to-text",model="nlpconnect/vit-gpt2-image-captioning") # def summarize(input): # output = get_completion(input) # return output[0]['generated_text'] def captioner(image): result = get_completion(image) return result[0]['generated_text'] gr.close_all() demo = gr.Interface(fn=captioner, inputs=[gr.Image(label="Upload image", type="pil")], outputs=[gr.Textbox(label="Caption")], title="Image Captioning", description="Hasta la vista, captionless images!", allow_flagging="never") demo.launch()