soumyaprabhamaiti commited on
Commit
49bb575
1 Parent(s): c3a1e37

Create image segmentation app

Browse files
.github/workflows/check_file_size.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check file size
2
+ on: # or directly `on: [push]` to run the action on every push on any branch
3
+ pull_request:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ check-file-size:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Check large files
14
+ uses: ActionsDesk/[email protected]
15
+ with:
16
+ filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces
.github/workflows/sync_to_HF_hub.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: true
17
+ - name: Push to hub
18
+ env:
19
+ HF: ${{ secrets.HF }}
20
+ run: git push --force https://soumyaprabhamaiti:[email protected]/spaces/soumyaprabhamaiti/image_segmentation_web_app main
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ import numpy as np
4
+ import cv2
5
+ import matplotlib.pyplot as plt
6
+
7
+
8
+ # Path to the pre-trained sentiment analysis model
9
+ model_path = "saved_model"
10
+
11
+ # Load the pre-trained segmentation model
12
+ segmentation_model = tf.keras.models.load_model(model_path)
13
+
14
+ # Target image shape
15
+ TARGET_SHAPE = (256, 256)
16
+
17
+ # Define image segmentation function
18
+ def segment_image(img:np.ndarray):
19
+ # Original image shape
20
+ ORIGINAL_SHAPE = img.shape
21
+
22
+ # Check if the image is RGB and convert if not
23
+ if len(ORIGINAL_SHAPE) == 2:
24
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
25
+
26
+ # Resize the image to TARGET_SHAPE
27
+ img = cv2.resize(img, TARGET_SHAPE)
28
+
29
+ # Add a batch dimension
30
+ img = np.expand_dims(img, axis=0)
31
+
32
+ # Predict the segmentation mask
33
+ mask = segmentation_model.predict(img)
34
+
35
+ # Remove the batch dimension
36
+ mask = np.squeeze(mask, axis=0)
37
+
38
+ # Convert to labels
39
+ mask = np.argmax(mask, axis=-1)
40
+
41
+ # Convert to uint8
42
+ mask = mask.astype(np.uint8)
43
+
44
+ # Resize to original image shape
45
+ mask = cv2.resize(mask, (ORIGINAL_SHAPE[1], ORIGINAL_SHAPE[0]))
46
+
47
+ return mask
48
+
49
+ def overlay_mask(img, mask, alpha=0.5):
50
+ # Define color mapping
51
+ colors = {
52
+ 0: [255, 0, 0], # Class 0 - Red
53
+ 1: [0, 255, 0], # Class 1 - Green
54
+ 2: [0, 0, 255] # Class 2 - Blue
55
+ # Add more colors for additional classes if needed
56
+ }
57
+
58
+ # Create a blank colored overlay image
59
+ overlay = np.zeros_like(img)
60
+
61
+ # Map each mask value to the corresponding color
62
+ for class_id, color in colors.items():
63
+ overlay[mask == class_id] = color
64
+
65
+ # Blend the overlay with the original image
66
+ output = cv2.addWeighted(img, 1 - alpha, overlay, alpha, 0)
67
+
68
+ return output
69
+
70
+
71
+ # The main function
72
+ def transform(img):
73
+ mask=segment_image(img)
74
+ blended_img = overlay_mask(img, mask)
75
+ return blended_img
76
+
77
+
78
+ # Create the Gradio app
79
+ app = gr.Interface(
80
+ fn=transform,
81
+ inputs=gr.Image(label="Input Image"),
82
+ outputs=gr.Image(label="Image with Segmentation Overlay"),
83
+ title="Image Segmentation on Pet Images",
84
+ description="Segment image of a pet animal into three classes: background, pet, and boundary.",
85
+ examples=[
86
+ "example_images/img1.jpg",
87
+ "example_images/img2.jpg",
88
+ "example_images/img3.jpg"
89
+ ]
90
+ )
91
+
92
+ # Run the app
93
+ app.launch()
example_images/img1.jpg ADDED
example_images/img2.jpg ADDED
example_images/img3.jpg ADDED
example_images/img4.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow
2
+ gradio
3
+ opencv-python
saved_model/fingerprint.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:706b1eb7b39818e6adb05977067a9eb60de93f0ee08cdba69eab221ccbe0c2a5
3
+ size 56
saved_model/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997f7b7999d5f61640e01f3812c19808ab71c88ccf47c7e3d6a90f6921991b7d
3
+ size 89706
saved_model/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb09b7345801bdfc009ceaed06323e63fbfa0992c73c9fc2506c4541d34b7abc
3
+ size 859084
saved_model/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c07ac84dc7c851f433881f68030336f10901f0d4594a3e84baaf6620dba4cf
3
+ size 25947878
saved_model/variables/variables.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:719056f4d8e75d7f561e76fd756fc3e8ba406025dd7c234583fa9c4c152cfd74
3
+ size 9077