umarigan commited on
Commit
2f58859
·
verified ·
1 Parent(s): 1ce1351

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
+ from PIL import Image
4
+
5
+ # Load the model and processor
6
+ processor = BlipProcessor.from_pretrained("umarigan/blip-image-captioning-base-chestxray-finetuned")
7
+ model = BlipForConditionalGeneration.from_pretrained("umarigan/blip-image-captioning-base-chestxray-finetuned")
8
+
9
+ # Define the prediction function
10
+ def generate_caption(image):
11
+ text = "a photography of"
12
+ inputs = processor(image, text, return_tensors="pt")
13
+ out = model.generate(**inputs)
14
+ caption = processor.decode(out[0], skip_special_tokens=True)
15
+ return caption
16
+
17
+ # Example images from your Hugging Face Space
18
+ example_images = [
19
+ ("image.jpg", "Example 1"),
20
+ ("image1.jpg", "Example 2"),
21
+ ("image2.jpg", "Example 3")
22
+ ]
23
+
24
+ # Create the Gradio interface
25
+ with gr.Blocks() as demo:
26
+ gr.Markdown("# BLIP Image Captioning")
27
+
28
+ # Image input component with example images
29
+ with gr.Row():
30
+ with gr.Column():
31
+ image_input = gr.Image(type="pil", label="Upload an Image or Select an Example")
32
+ examples = gr.Examples(examples=example_images, inputs=image_input)
33
+
34
+ with gr.Column():
35
+ caption_output = gr.Textbox(label="Generated Caption")
36
+
37
+ # Generate button
38
+ generate_button = gr.Button("Generate Caption")
39
+ generate_button.click(fn=generate_caption, inputs=image_input, outputs=caption_output)
40
+
41
+ # Launch the app
42
+ demo.launch()