Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ activity_categories = {
|
|
15 |
"Working": ["laptop", "computer", "keyboard", "office chair"],
|
16 |
"Meal Time": ["fork", "spoon", "plate", "food"],
|
17 |
"Exercise": ["dumbbell", "bicycle", "yoga mat", "treadmill"],
|
18 |
-
"Outdoors": ["car", "tree", "bicycle", "road"],
|
19 |
# Add more categories and objects as needed
|
20 |
}
|
21 |
|
@@ -63,11 +63,14 @@ def generate_journal_with_images(video_path, frame_interval=30):
|
|
63 |
# Make predictions using YOLOv10 on the current frame
|
64 |
results = model.predict(source=frame_rgb, device=device)
|
65 |
|
66 |
-
#
|
67 |
-
detected_objects = [
|
|
|
|
|
|
|
68 |
|
69 |
-
# Only process frames where objects are detected
|
70 |
-
if detected_objects: # If there are detected objects
|
71 |
|
72 |
# Plot bounding boxes and labels on the image
|
73 |
annotated_frame = results[0].plot() # Plot detection results on the frame
|
@@ -93,7 +96,7 @@ def generate_journal_with_images(video_path, frame_interval=30):
|
|
93 |
|
94 |
cap.release()
|
95 |
|
96 |
-
return journal_entries, image_paths
|
97 |
|
98 |
|
99 |
def display_journal_with_images(video):
|
|
|
15 |
"Working": ["laptop", "computer", "keyboard", "office chair"],
|
16 |
"Meal Time": ["fork", "spoon", "plate", "food"],
|
17 |
"Exercise": ["dumbbell", "bicycle", "yoga mat", "treadmill"],
|
18 |
+
"Outdoors": ["car", "tree", "bicycle", "road","subway","metro"],
|
19 |
# Add more categories and objects as needed
|
20 |
}
|
21 |
|
|
|
63 |
# Make predictions using YOLOv10 on the current frame
|
64 |
results = model.predict(source=frame_rgb, device=device)
|
65 |
|
66 |
+
# Filter detected objects based on confidence threshold
|
67 |
+
detected_objects = []
|
68 |
+
for box in results[0].boxes:
|
69 |
+
if box.conf >= confidence_threshold: # Only include objects with confidence >= 0.8
|
70 |
+
detected_objects.append(model.names[int(box.cls)])
|
71 |
|
72 |
+
# Only process frames where objects with confidence >= threshold are detected
|
73 |
+
if detected_objects: # If there are high-confidence detected objects
|
74 |
|
75 |
# Plot bounding boxes and labels on the image
|
76 |
annotated_frame = results[0].plot() # Plot detection results on the frame
|
|
|
96 |
|
97 |
cap.release()
|
98 |
|
99 |
+
return journal_entries, image_paths
|
100 |
|
101 |
|
102 |
def display_journal_with_images(video):
|