tbdatasci commited on
Commit
ada5f8b
1 Parent(s): 72df077
Files changed (1) hide show
  1. app.py +36 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import os
4
+ import io
5
+ import IPython.display
6
+ from PIL import Image
7
+ import base64
8
+
9
+ get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
10
+
11
+ def summarize(input):
12
+ output = get_completion(input)
13
+ return output[0]['generated_text']
14
+
15
+ import gradio as gr
16
+
17
+ def image_to_base64_str(pil_image):
18
+ byte_arr = io.BytesIO()
19
+ pil_image.save(byte_arr, format='PNG')
20
+ byte_arr = byte_arr.getvalue()
21
+ return str(base64.b64encode(byte_arr).decode('utf-8'))
22
+
23
+ def captioner(image):
24
+ base64_image = image_to_base64_str(image)
25
+ result = get_completion(base64_image)
26
+ return result[0]['generated_text']
27
+
28
+ gr.close_all()
29
+ demo = gr.Interface(fn=captioner,
30
+ inputs=[gr.Image(label="Upload image", type="pil")],
31
+ outputs=[gr.Textbox(label="Caption")],
32
+ title="Image Captioning with BLIP",
33
+ description="Caption any image using the BLIP model",
34
+ allow_flagging="never")
35
+
36
+ demo.launch()