wuhp commited on
Commit
c9cbfde
Β·
verified Β·
1 Parent(s): 24fabae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -227
app.py CHANGED
@@ -7,17 +7,11 @@ import shutil
7
  from ultralytics import YOLO
8
  import requests
9
 
10
- # Directory and file configurations
11
  MODELS_DIR = "models"
12
  MODELS_INFO_FILE = "models_info.json"
13
  TEMP_DIR = "temp"
14
  OUTPUT_DIR = "outputs"
15
 
16
- # New files for storing ratings, detections, and recommended datasets
17
- RATINGS_FILE = "ratings.json"
18
- DETECTIONS_FILE = "detections.json"
19
- RECOMMENDED_DATASETS_FILE = "recommended_datasets.json"
20
-
21
  def download_file(url, dest_path):
22
  """
23
  Download a file from a URL to the destination path.
@@ -69,6 +63,7 @@ def load_models(models_dir=MODELS_DIR, info_file=MODELS_INFO_FILE):
69
  continue
70
 
71
  try:
 
72
  model = YOLO(model_path)
73
  models[model_name] = {
74
  'display_name': display_name,
@@ -81,14 +76,13 @@ def load_models(models_dir=MODELS_DIR, info_file=MODELS_INFO_FILE):
81
 
82
  return models
83
 
84
- def get_model_info(model_info, ratings_info):
85
  """
86
- Retrieve formatted model information for display, including average rating.
87
  Args:
88
  model_info (dict): The model's information dictionary.
89
- ratings_info (dict): The ratings information for the model.
90
  Returns:
91
- str: A formatted string containing model details and average rating.
92
  """
93
  info = model_info
94
  class_ids = info.get('class_ids', {})
@@ -99,11 +93,6 @@ def get_model_info(model_info, ratings_info):
99
  class_image_counts_formatted = "\n".join([f"{cname}: {count}" for cname, count in class_image_counts.items()])
100
  datasets_used_formatted = "\n".join([f"- {dataset}" for dataset in datasets_used])
101
 
102
- # Calculate average rating
103
- total_rating = ratings_info.get('total', 0)
104
- count_rating = ratings_info.get('count', 0)
105
- average_rating = (total_rating / count_rating) if count_rating > 0 else "No ratings yet"
106
-
107
  info_text = (
108
  f"**{info.get('display_name', 'Model Name')}**\n\n"
109
  f"**Architecture:** {info.get('architecture', 'N/A')}\n\n"
@@ -116,8 +105,7 @@ def get_model_info(model_info, ratings_info):
116
  f"**Number of Images Trained On:** {info.get('num_images', 'N/A')}\n\n"
117
  f"**Class IDs:**\n{class_ids_formatted}\n\n"
118
  f"**Datasets Used:**\n{datasets_used_formatted}\n\n"
119
- f"**Class Image Counts:**\n{class_image_counts_formatted}\n\n"
120
- f"**Average Rating:** {average_rating} ⭐"
121
  )
122
  return info_text
123
 
@@ -137,6 +125,7 @@ def predict_image(model_name, image, confidence, models):
137
  if not model:
138
  return "Error: Model not found.", None, None
139
  try:
 
140
  os.makedirs(TEMP_DIR, exist_ok=True)
141
  os.makedirs(OUTPUT_DIR, exist_ok=True)
142
 
@@ -148,6 +137,7 @@ def predict_image(model_name, image, confidence, models):
148
  latest_run = sorted(Path("runs/detect").glob("predict*"), key=os.path.getmtime)[-1]
149
  output_image_path = os.path.join(latest_run, Path(input_image_path).name)
150
  if not os.path.isfile(output_image_path):
 
151
  output_image_path = results[0].save()[0]
152
 
153
  final_output_path = os.path.join(OUTPUT_DIR, f"{model_name}_output_image.jpg")
@@ -155,86 +145,17 @@ def predict_image(model_name, image, confidence, models):
155
 
156
  output_image = Image.open(final_output_path)
157
 
158
- # Calculate number of detections
159
- detections = len(results[0].boxes)
160
- return f"βœ… Prediction completed successfully. **Detections:** {detections}", output_image, final_output_path
161
  except Exception as e:
162
  return f"❌ Error during prediction: {str(e)}", None, None
163
 
164
- def load_or_initialize_json(file_path, default_data):
165
- """
166
- Load JSON data from a file or initialize it with default data if the file doesn't exist.
167
- Args:
168
- file_path (str): Path to the JSON file.
169
- default_data (dict or list): Default data to initialize if file doesn't exist.
170
- Returns:
171
- dict or list: The loaded or initialized data.
172
- """
173
- if os.path.isfile(file_path):
174
- with open(file_path, 'r') as f:
175
- return json.load(f)
176
- else:
177
- with open(file_path, 'w') as f:
178
- json.dump(default_data, f, indent=4)
179
- return default_data
180
-
181
- def save_json(file_path, data):
182
- """
183
- Save data to a JSON file.
184
- Args:
185
- file_path (str): Path to the JSON file.
186
- data (dict or list): Data to save.
187
- """
188
- with open(file_path, 'w') as f:
189
- json.dump(data, f, indent=4)
190
-
191
- def is_valid_roboflow_url(url):
192
- """
193
- Validate if the provided URL is a Roboflow URL.
194
- Args:
195
- url (str): The URL to validate.
196
- Returns:
197
- bool: True if valid, False otherwise.
198
- """
199
- return url.startswith("https://roboflow.com/") or url.startswith("http://roboflow.com/")
200
-
201
- def get_top_model(detections_per_model, models):
202
- """
203
- Determine the top model based on the highest number of detections.
204
- Args:
205
- detections_per_model (dict): Dictionary with model names as keys and detection counts as values.
206
- models (dict): Dictionary of loaded models.
207
- Returns:
208
- str: The display name of the top model or a message if no detections exist.
209
- """
210
- if not detections_per_model:
211
- return "No detections yet."
212
- top_model_name = max(detections_per_model, key=detections_per_model.get)
213
- top_model_display = models[top_model_name]['display_name']
214
- top_detections = detections_per_model[top_model_name]
215
- return f"**Top Model:** {top_model_display} with **{top_detections}** detections."
216
-
217
  def main():
218
- # Load models
219
  models = load_models()
220
  if not models:
221
  print("No models loaded. Please check your models_info.json and model URLs.")
222
  return
223
 
224
- # Load or initialize ratings
225
- ratings_data = load_or_initialize_json(RATINGS_FILE, {})
226
- # Initialize ratings for each model if not present
227
- for model_name in models:
228
- if model_name not in ratings_data:
229
- ratings_data[model_name] = {"total": 0, "count": 0}
230
- save_json(RATINGS_FILE, ratings_data)
231
-
232
- # Load or initialize detections counter
233
- detections_data = load_or_initialize_json(DETECTIONS_FILE, {"total_detections": 0, "detections_per_model": {}})
234
-
235
- # Load or initialize recommended datasets
236
- recommended_datasets = load_or_initialize_json(RECOMMENDED_DATASETS_FILE, [])
237
-
238
  with gr.Blocks() as demo:
239
  gr.Markdown("# πŸ§ͺ YOLOv11 Model Tester")
240
  gr.Markdown(
@@ -243,15 +164,6 @@ def main():
243
  """
244
  )
245
 
246
- # Display total detections counter and top model
247
- with gr.Row():
248
- detections_counter = gr.Markdown(
249
- f"**Total Detections Across All Users:** {detections_data.get('total_detections', 0)}"
250
- )
251
- top_model_display = gr.Markdown(
252
- get_top_model(detections_data.get('detections_per_model', {}), models)
253
- )
254
-
255
  with gr.Row():
256
  model_dropdown = gr.Dropdown(
257
  choices=[models[m]['display_name'] for m in models],
@@ -269,8 +181,7 @@ def main():
269
  if not model_name:
270
  return "Model information not available."
271
  model_entry = models[model_name]['info']
272
- ratings_info = ratings_data.get(model_name, {"total": 0, "count": 0})
273
- return get_model_info(model_entry, ratings_info)
274
 
275
  model_dropdown.change(
276
  fn=update_model_info,
@@ -293,6 +204,7 @@ def main():
293
  image_input = gr.Image(
294
  type='pil',
295
  label="Upload Image for Prediction"
 
296
  )
297
  image_predict_btn = gr.Button("πŸ” Predict on Image")
298
  image_status = gr.Markdown("**Status will appear here.**")
@@ -303,32 +215,7 @@ def main():
303
  if not selected_display_name:
304
  return "❌ Please select a model.", None, None
305
  model_name = display_to_name.get(selected_display_name)
306
- status, output_img, output_path = predict_image(model_name, image, confidence, models)
307
-
308
- # Extract number of detections from the status message
309
- detections = 0
310
- if "Detections:" in status:
311
- try:
312
- detections = int(status.split("Detections:")[1].strip())
313
- except:
314
- pass
315
-
316
- # Update detections counter
317
- try:
318
- detections_data['total_detections'] += detections
319
- if model_name in detections_data['detections_per_model']:
320
- detections_data['detections_per_model'][model_name] += detections
321
- else:
322
- detections_data['detections_per_model'][model_name] = detections
323
- save_json(DETECTIONS_FILE, detections_data)
324
- except Exception as e:
325
- print(f"Error updating detections counter: {e}")
326
-
327
- # Update detections and top model display
328
- detections_counter.value = f"**Total Detections Across All Users:** {detections_data.get('total_detections', 0)}"
329
- top_model_display.value = get_top_model(detections_data.get('detections_per_model', {}), models)
330
-
331
- return status, output_img, output_path
332
 
333
  image_predict_btn.click(
334
  fn=process_image,
@@ -336,107 +223,6 @@ def main():
336
  outputs=[image_status, image_output, image_download_btn]
337
  )
338
 
339
- with gr.Tab("⭐ Rate Model"):
340
- with gr.Column():
341
- selected_model = gr.Dropdown(
342
- choices=[models[m]['display_name'] for m in models],
343
- label="Select Model to Rate",
344
- value=None
345
- )
346
- rating = gr.Slider(
347
- minimum=1,
348
- maximum=5,
349
- step=1,
350
- label="Rate the Model (1-5 Stars)",
351
- info="Select a star rating between 1 and 5."
352
- )
353
- submit_rating_btn = gr.Button("Submit Rating")
354
- rating_status = gr.Markdown("**Your rating will be submitted here.**")
355
-
356
- def submit_rating(selected_display_name, user_rating):
357
- if not selected_display_name:
358
- return "❌ Please select a model to rate."
359
- if not user_rating:
360
- return "❌ Please provide a rating."
361
- model_name = display_to_name.get(selected_display_name)
362
- if not model_name:
363
- return "❌ Invalid model selected."
364
-
365
- # Update ratings data
366
- ratings_info = ratings_data.get(model_name, {"total": 0, "count": 0})
367
- ratings_info['total'] += user_rating
368
- ratings_info['count'] += 1
369
- ratings_data[model_name] = ratings_info
370
- save_json(RATINGS_FILE, ratings_data)
371
-
372
- # Update model info display if the rated model is currently selected
373
- if model_dropdown.value == selected_display_name:
374
- updated_info = get_model_info(models[model_name]['info'], ratings_info)
375
- model_info.value = updated_info
376
-
377
- average = (ratings_info['total'] / ratings_info['count'])
378
- return f"βœ… Thank you for rating! Current Average Rating: {average:.2f} ⭐"
379
-
380
- submit_rating_btn.click(
381
- fn=submit_rating,
382
- inputs=[selected_model, rating],
383
- outputs=rating_status
384
- )
385
-
386
- with gr.Tab("πŸ’‘ Recommend Dataset"):
387
- with gr.Column():
388
- dataset_name = gr.Textbox(
389
- label="Dataset Name",
390
- placeholder="Enter the name of the dataset"
391
- )
392
- dataset_url = gr.Textbox(
393
- label="Dataset URL",
394
- placeholder="Enter the Roboflow dataset URL"
395
- )
396
- recommend_btn = gr.Button("Recommend Dataset")
397
- recommend_status = gr.Markdown("**Your recommendation status will appear here.**")
398
-
399
- def recommend_dataset(name, url):
400
- if not name or not url:
401
- return "❌ Please provide both the dataset name and URL."
402
-
403
- if not is_valid_roboflow_url(url):
404
- return "❌ Invalid URL. Please provide a valid Roboflow dataset URL."
405
-
406
- # Check for duplicates
407
- for dataset in recommended_datasets:
408
- if dataset['name'].lower() == name.lower() or dataset['url'] == url:
409
- return "❌ This dataset has already been recommended."
410
-
411
- # Add to recommended datasets
412
- recommended_datasets.append({"name": name, "url": url})
413
- save_json(RECOMMENDED_DATASETS_FILE, recommended_datasets)
414
-
415
- return f"βœ… Thank you for recommending the dataset **{name}**!"
416
-
417
- recommend_btn.click(
418
- fn=recommend_dataset,
419
- inputs=[dataset_name, dataset_url],
420
- outputs=recommend_status
421
- )
422
-
423
- with gr.Tab("πŸ“„ Recommended Datasets"):
424
- with gr.Column():
425
- recommended_display = gr.Markdown("### Recommended Roboflow Datasets\n")
426
-
427
- def display_recommended_datasets():
428
- if not recommended_datasets:
429
- return "No datasets have been recommended yet."
430
- dataset_md = "\n".join([f"- [{dataset['name']}]({dataset['url']})" for dataset in recommended_datasets])
431
- return dataset_md
432
-
433
- # Display the recommended datasets
434
- recommended_display.value = display_recommended_datasets()
435
-
436
- with gr.Tab("πŸ† Top Model"):
437
- with gr.Column():
438
- top_model_md = gr.Markdown(get_top_model(detections_data.get('detections_per_model', {}), models))
439
-
440
  gr.Markdown(
441
  """
442
  ---
@@ -447,4 +233,4 @@ def main():
447
  demo.launch()
448
 
449
  if __name__ == "__main__":
450
- main()
 
7
  from ultralytics import YOLO
8
  import requests
9
 
 
10
  MODELS_DIR = "models"
11
  MODELS_INFO_FILE = "models_info.json"
12
  TEMP_DIR = "temp"
13
  OUTPUT_DIR = "outputs"
14
 
 
 
 
 
 
15
  def download_file(url, dest_path):
16
  """
17
  Download a file from a URL to the destination path.
 
63
  continue
64
 
65
  try:
66
+
67
  model = YOLO(model_path)
68
  models[model_name] = {
69
  'display_name': display_name,
 
76
 
77
  return models
78
 
79
+ def get_model_info(model_info):
80
  """
81
+ Retrieve formatted model information for display.
82
  Args:
83
  model_info (dict): The model's information dictionary.
 
84
  Returns:
85
+ str: A formatted string containing model details.
86
  """
87
  info = model_info
88
  class_ids = info.get('class_ids', {})
 
93
  class_image_counts_formatted = "\n".join([f"{cname}: {count}" for cname, count in class_image_counts.items()])
94
  datasets_used_formatted = "\n".join([f"- {dataset}" for dataset in datasets_used])
95
 
 
 
 
 
 
96
  info_text = (
97
  f"**{info.get('display_name', 'Model Name')}**\n\n"
98
  f"**Architecture:** {info.get('architecture', 'N/A')}\n\n"
 
105
  f"**Number of Images Trained On:** {info.get('num_images', 'N/A')}\n\n"
106
  f"**Class IDs:**\n{class_ids_formatted}\n\n"
107
  f"**Datasets Used:**\n{datasets_used_formatted}\n\n"
108
+ f"**Class Image Counts:**\n{class_image_counts_formatted}"
 
109
  )
110
  return info_text
111
 
 
125
  if not model:
126
  return "Error: Model not found.", None, None
127
  try:
128
+
129
  os.makedirs(TEMP_DIR, exist_ok=True)
130
  os.makedirs(OUTPUT_DIR, exist_ok=True)
131
 
 
137
  latest_run = sorted(Path("runs/detect").glob("predict*"), key=os.path.getmtime)[-1]
138
  output_image_path = os.path.join(latest_run, Path(input_image_path).name)
139
  if not os.path.isfile(output_image_path):
140
+
141
  output_image_path = results[0].save()[0]
142
 
143
  final_output_path = os.path.join(OUTPUT_DIR, f"{model_name}_output_image.jpg")
 
145
 
146
  output_image = Image.open(final_output_path)
147
 
148
+ return "βœ… Prediction completed successfully.", output_image, final_output_path
 
 
149
  except Exception as e:
150
  return f"❌ Error during prediction: {str(e)}", None, None
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  def main():
153
+
154
  models = load_models()
155
  if not models:
156
  print("No models loaded. Please check your models_info.json and model URLs.")
157
  return
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  with gr.Blocks() as demo:
160
  gr.Markdown("# πŸ§ͺ YOLOv11 Model Tester")
161
  gr.Markdown(
 
164
  """
165
  )
166
 
 
 
 
 
 
 
 
 
 
167
  with gr.Row():
168
  model_dropdown = gr.Dropdown(
169
  choices=[models[m]['display_name'] for m in models],
 
181
  if not model_name:
182
  return "Model information not available."
183
  model_entry = models[model_name]['info']
184
+ return get_model_info(model_entry)
 
185
 
186
  model_dropdown.change(
187
  fn=update_model_info,
 
204
  image_input = gr.Image(
205
  type='pil',
206
  label="Upload Image for Prediction"
207
+
208
  )
209
  image_predict_btn = gr.Button("πŸ” Predict on Image")
210
  image_status = gr.Markdown("**Status will appear here.**")
 
215
  if not selected_display_name:
216
  return "❌ Please select a model.", None, None
217
  model_name = display_to_name.get(selected_display_name)
218
+ return predict_image(model_name, image, confidence, models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
  image_predict_btn.click(
221
  fn=process_image,
 
223
  outputs=[image_status, image_output, image_download_btn]
224
  )
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  gr.Markdown(
227
  """
228
  ---
 
233
  demo.launch()
234
 
235
  if __name__ == "__main__":
236
+ main()