perlarsson commited on
Commit
6c46d60
1 Parent(s): 21e29c3

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. ad.png +0 -0
  2. app.py +168 -4
ad.png ADDED
app.py CHANGED
@@ -1,7 +1,171 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch(share=True)
 
1
  import gradio as gr
2
+ from roboflow import Roboflow
3
+ import os
4
+ import tempfile
5
+ from PIL import Image, ImageDraw, ImageFont
6
+ import cv2
7
+ import numpy as np
8
+
9
+ # Initialize Roboflow
10
+ rf = Roboflow(api_key="E5qhgf3ZimDoTx5OfgZ8")
11
+ project = rf.workspace().project("newhassae")
12
+
13
+ def get_model(version):
14
+ return project.version(version).model
15
+
16
+ def preprocess_image(img, version):
17
+ # Initial crop for all images
18
+ img = img.crop((682, 345, 682+2703, 345+1403))
19
+
20
+ # Model specific processing
21
+ if version == 1:
22
+ return img.resize((640, 640))
23
+ elif version == 2:
24
+ return img
25
+ elif version == 3:
26
+ width, height = img.size
27
+ left = (width - 640) // 2
28
+ top = (height - 640) // 2
29
+ right = left + 640
30
+ bottom = top + 640
31
+ return img.crop((left, top, right, bottom))
32
+ return img
33
+
34
+ def process_images(image_files, version):
35
+ model = get_model(version)
36
+ results = []
37
+ if not isinstance(image_files, list):
38
+ image_files = [image_files]
39
+
40
+ for image_file in image_files:
41
+ try:
42
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
43
+ temp_file.write(image_file)
44
+ temp_path = temp_file.name
45
+
46
+ img = Image.open(temp_path)
47
+ processed_img = preprocess_image(img, version)
48
+
49
+ processed_temp = tempfile.NamedTemporaryFile(suffix='.jpg', delete=False)
50
+ processed_img.save(processed_temp.name)
51
+
52
+ try:
53
+ prediction = model.predict(processed_temp.name).json()
54
+ predicted_class = prediction["predictions"][0]["predictions"][0]["class"]
55
+ confidence = f"{float(prediction['predictions'][0]['predictions'][0]['confidence']) * 100:.1f}%"
56
+ except Exception as e:
57
+ predicted_class = "Error"
58
+ confidence = "N/A"
59
+
60
+ if processed_img.mode != 'RGB':
61
+ processed_img = processed_img.convert('RGB')
62
+
63
+ labeled_img = add_label_to_image(processed_img, predicted_class, confidence)
64
+
65
+ top_result = {
66
+ "predicted_class": predicted_class,
67
+ "confidence": confidence
68
+ }
69
+
70
+ results.append((labeled_img, top_result))
71
+
72
+ except Exception as e:
73
+ gr.Warning(f"Error processing image: {str(e)}")
74
+ continue
75
+ finally:
76
+ if 'temp_path' in locals():
77
+ os.unlink(temp_path)
78
+ if 'processed_temp' in locals():
79
+ os.unlink(processed_temp.name)
80
+
81
+ return results if results else [(Image.new('RGB', (400, 400), 'grey'), {"predicted_class": "Error", "confidence": "N/A"})]
82
+
83
+
84
+
85
+ def add_label_to_image(image, prediction, confidence):
86
+ # Convert PIL image to OpenCV format
87
+ img_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
88
+
89
+ # Image dimensions
90
+ img_height, img_width = img_cv.shape[:2]
91
+ padding = int(img_width * 0.02)
92
+
93
+ # Rectangle dimensions
94
+ rect_height = int(img_height * 0.15)
95
+ rect_width = img_width - (padding * 2)
96
+
97
+ # Draw red rectangle
98
+ cv2.rectangle(img_cv,
99
+ (padding, padding),
100
+ (padding + rect_width, padding + rect_height),
101
+ (0, 0, 255),
102
+ -1)
103
+
104
+ text = f"{prediction}: {confidence}"
105
+
106
+ # Text settings
107
+ font = cv2.FONT_HERSHEY_SIMPLEX
108
+ font_scale = 3.0
109
+ thickness = 8
110
+
111
+ # Get text size and position
112
+ (text_width, text_height), _ = cv2.getTextSize(text, font, font_scale, thickness)
113
+ text_x = padding + (rect_width - text_width) // 2
114
+ text_y = padding + (rect_height + text_height) // 2
115
+
116
+ # Draw white text
117
+ cv2.putText(img_cv, text, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)
118
+
119
+ # Convert back to PIL
120
+ img_pil = Image.fromarray(cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB))
121
+ return img_pil
122
+
123
+ def display_results(image_files, version):
124
+ results = process_images(image_files, version)
125
+ output_images = [res[0] for res in results]
126
+ predictions = [res[1] for res in results]
127
+
128
+ return output_images, predictions
129
+ # Create Gradio interface
130
+ with gr.Blocks() as demo:
131
+ gr.HTML("""
132
+ <div style="text-align: center; margin-bottom: 1rem">
133
+ <img src="https://haeab.se/wp-content/uploads/2023/12/ad.png" alt="Logo" style="height: 100px;">
134
+ </div>
135
+ """)
136
+ gr.Markdown("Hans Andersson Entrepenad")
137
+
138
+ with gr.Row():
139
+ with gr.Column():
140
+ model_version = gr.Slider(
141
+ minimum=1,
142
+ maximum=4,
143
+ step=1,
144
+ value=1,
145
+ label="Model Version",
146
+ interactive=True
147
+ )
148
+ image_input = gr.File(
149
+ label="Upload Image(s)",
150
+ file_count="multiple",
151
+ type="binary"
152
+ )
153
+
154
+ with gr.Column():
155
+ image_output = gr.Gallery(label="Processed Images")
156
+ text_output = gr.JSON(
157
+ label="Top Predictions",
158
+ height=400, # Increases height
159
+ container=True, # Adds a container around the JSON
160
+ show_label=True
161
+ )
162
+
163
+ submit_btn = gr.Button("Analyze Images")
164
+ submit_btn.click(
165
+ fn=display_results,
166
+ inputs=[image_input, model_version],
167
+ outputs=[image_output, text_output]
168
+ )
169
+ demo.launch(share=True, debug=True, show_error=True)
170
 
 
 
171