File size: 16,009 Bytes
6e251da c900cba 833e3ae c900cba 6e251da 833e3ae 6e251da c900cba 833e3ae c900cba dc56a44 b84d2ee 98a1522 abcebd2 6e251da 833e3ae 4cea969 abcebd2 4cea969 dc56a44 98a1522 833e3ae dc56a44 98a1522 dc56a44 833e3ae dc56a44 98a1522 dc56a44 98a1522 dc56a44 98a1522 dc56a44 98a1522 dc56a44 98a1522 dc56a44 98a1522 833e3ae dc56a44 98a1522 6e251da b84d2ee 98a1522 dc56a44 abcebd2 098ecf6 833e3ae 098ecf6 98a1522 dc56a44 abcebd2 98a1522 abcebd2 98a1522 833e3ae abcebd2 098ecf6 4cea969 c900cba dc56a44 6e251da c900cba 833e3ae 79ab49c 6e251da c900cba b1326c8 6e251da b1326c8 1803e28 b1326c8 1803e28 b1326c8 833e3ae b1326c8 b84d2ee 833e3ae b84d2ee 833e3ae b1326c8 833e3ae 79ab49c b1326c8 79ab49c b1326c8 79ab49c b1326c8 79ab49c dc56a44 833e3ae dc56a44 833e3ae b84d2ee dc56a44 b84d2ee 833e3ae b84d2ee 79ab49c dc56a44 b84d2ee 833e3ae dc56a44 833e3ae b84d2ee 4cea969 b84d2ee fca81c0 9b66cdf b84d2ee 833e3ae b84d2ee 55cad48 833e3ae b84d2ee dc56a44 b1326c8 79ab49c b1326c8 dc56a44 833e3ae 79ab49c b1326c8 833e3ae 79ab49c b1326c8 dc56a44 833e3ae dc56a44 833e3ae dc56a44 833e3ae dc56a44 833e3ae dc56a44 833e3ae b1326c8 dc56a44 6e251da 4cea969 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 |
import streamlit as st
import cv2
import numpy as np
import tempfile
from PIL import Image
import torch
import torch.nn as nn
from torchvision import transforms, models
import time
from collections import deque
import yaml
from ultralytics import YOLO
# Set page config
st.set_page_config(
page_title="Advanced Dog Language Understanding",
page_icon="🐕",
layout="wide"
)
class DogBehaviorAnalyzer:
def __init__(self):
self.behaviors = {
'tail_wagging': {'threshold': 0.15, 'description': 'Your dog is displaying happiness and excitement!'},
'movement': {'threshold': 0.02, 'description': 'Your dog is active and moving around.'},
'stationary': {'threshold': 0.01, 'description': 'Your dog is calm and still.'},
'high_activity': {'threshold': 0.05, 'description': 'Your dog is very energetic!'},
'barking': {'threshold': 0.10, 'description': 'Your dog is trying to communicate!'},
'jumping': {'threshold': 0.12, 'description': 'Your dog is showing excitement through jumping!'},
'ears_perked': {'threshold': 0.08, 'description': 'Your dog is alert and attentive!'}
}
self.suggestions = {
'tail_wagging': [
"This is a great time for positive reinforcement training!",
"Consider engaging in some fun play activities",
"Your dog is in a social mood - perfect for bonding exercises"
],
'movement': [
"A good opportunity for some basic training exercises",
"Consider introducing some puzzle toys",
"This energy level is perfect for a short training session"
],
'stationary': [
"Perfect time for gentle petting and calming interactions",
"Consider offering a chew toy for mental stimulation",
"Good moment for quiet bonding or rest"
],
'high_activity': [
"Your dog might benefit from structured exercise",
"Consider redirecting energy into agility training",
"A good play session with toys would be beneficial",
"Make sure fresh water is available"
],
'barking': [
"Try to identify what's triggering the barking",
"Practice 'quiet' command training",
"Redirect attention with engaging toys",
"Consider working on bark control exercises"
],
'jumping': [
"Practice the 'four paws on the floor' training",
"Redirect jumping energy into trick training",
"Work on impulse control exercises",
"Try teaching alternative greetings like 'sit' for attention"
],
'ears_perked': [
"Great time for sound recognition training",
"Practice attention and focus exercises",
"Good moment for environmental awareness training",
"Consider introducing new sounds or stimuli for enrichment"
]
}
# Motion detection parameters
self.history = []
self.max_history = 10
self.prev_frame = None
self.motion_history = deque(maxlen=5) # Store recent motion scores
def detect_motion(self, frame):
"""Detect motion in frame with improved sensitivity"""
# Resize frame for consistent motion detection
frame = cv2.resize(frame, (300, 300))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if self.prev_frame is None:
self.prev_frame = gray
return 0.0
frame_delta = cv2.absdiff(self.prev_frame, gray)
thresh = cv2.threshold(frame_delta, 20, 255, cv2.THRESH_BINARY)[1] # Lower threshold for better sensitivity
thresh = cv2.dilate(thresh, None, iterations=2)
# Calculate motion score
motion_score = np.sum(thresh > 0) / thresh.size
self.prev_frame = gray
# Add to motion history
self.motion_history.append(motion_score)
# Return average of recent motion scores for stability
return np.mean(self.motion_history) if len(self.motion_history) > 0 else motion_score
def detect_color_changes(self, frame):
"""Detect significant color changes with improved sensitivity"""
frame = cv2.resize(frame, (300, 300))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Define range for common dog colors
color_ranges = [
(np.array([0, 30, 30]), np.array([30, 255, 255])), # Brown/Red
(np.array([0, 0, 0]), np.array([180, 50, 255])), # White/Gray/Black
]
total_change_score = 0
for lower, upper in color_ranges:
mask = cv2.inRange(hsv, lower, upper)
if len(self.history) > 0:
prev_mask = self.history[-1]
diff = cv2.absdiff(mask, prev_mask)
change_score = np.sum(diff > 0) / diff.size
total_change_score = max(total_change_score, change_score)
self.history.append(mask)
if len(self.history) > self.max_history:
self.history.pop(0)
return total_change_score
def analyze_frame(self, frame):
"""Analyze frame with improved behavior detection logic"""
motion_score = self.detect_motion(frame)
color_change_score = self.detect_color_changes(frame)
audio_score = self.detect_audio(frame) if hasattr(frame, 'audio') else 0
detected_behaviors = []
# High activity detection (running, jumping)
if motion_score > self.behaviors['high_activity']['threshold']:
detected_behaviors.append(('high_activity', motion_score))
# Jumping detection (vertical motion)
if self.detect_vertical_motion(frame) > self.behaviors['jumping']['threshold']:
detected_behaviors.append(('jumping', motion_score * 1.2))
# Regular movement detection
elif motion_score > self.behaviors['movement']['threshold']:
detected_behaviors.append(('movement', motion_score))
# Stationary detection - only if very little motion
elif motion_score < self.behaviors['stationary']['threshold']:
detected_behaviors.append(('stationary', 1.0 - motion_score))
# Tail wagging detection - based on localized color changes
if color_change_score > self.behaviors['tail_wagging']['threshold']:
detected_behaviors.append(('tail_wagging', color_change_score))
# Ears perked detection - based on ear region analysis
ears_score = self.detect_ear_position(frame)
if ears_score > self.behaviors['ears_perked']['threshold']:
detected_behaviors.append(('ears_perked', ears_score))
# Barking detection - based on audio analysis
if audio_score > self.behaviors['barking']['threshold']:
detected_behaviors.append(('barking', audio_score))
# Debug information
if not detected_behaviors:
st.sidebar.write(f"Debug - Motion Score: {motion_score:.4f}")
st.sidebar.write(f"Debug - Color Change Score: {color_change_score:.4f}")
return detected_behaviors
def detect_vertical_motion(self, frame):
"""Detect vertical motion for jumping behavior"""
# Simple implementation - can be enhanced with more sophisticated motion tracking
if self.prev_frame is None:
return 0.0
frame = cv2.resize(frame, (300, 300))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Calculate optical flow
flow = cv2.calcOpticalFlowFarneback(self.prev_frame, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# Extract vertical motion component
vertical_motion = np.abs(flow[..., 1]).mean()
return vertical_motion
def detect_ear_position(self, frame):
"""Detect ear position for ears_perked behavior"""
# Placeholder implementation - can be enhanced with actual ear detection model
# For now, using simple edge detection in upper region of frame
frame = cv2.resize(frame, (300, 300))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Focus on upper region where ears typically are
upper_region = gray[0:100, :]
edges = cv2.Canny(upper_region, 100, 200)
return np.sum(edges > 0) / edges.size
def detect_audio(self, frame):
"""Detect audio for barking behavior"""
# Placeholder - actual implementation would need audio processing
# Return 0 as this is just a placeholder
return 0.0
def get_suggestions(self, detected_behaviors, behavior_counts):
"""Generate suggestions based on detected behaviors and their frequency"""
active_suggestions = []
# Get total frames analyzed
total_frames = sum(behavior_counts.values())
if total_frames == 0:
return []
# Calculate behavior percentages
behavior_percentages = {
behavior: count / total_frames * 100
for behavior, count in behavior_counts.items()
}
# Generate relevant suggestions based on current behaviors and their frequencies
for behavior, _ in detected_behaviors:
if behavior in self.suggestions:
# Select suggestions based on how frequently the behavior occurs
if behavior_percentages[behavior] > 30: # If behavior occurs more than 30% of the time
suggestions = self.suggestions[behavior]
active_suggestions.extend(suggestions[:2]) # Add top 2 suggestions
# Add general suggestions based on overall behavior patterns
if behavior_percentages.get('high_activity', 0) > 50:
active_suggestions.append("Consider incorporating more calming activities in your routine")
elif behavior_percentages.get('stationary', 0) > 70:
active_suggestions.append("Your dog might benefit from more physical activity")
# Remove duplicates and return
return list(set(active_suggestions))
def main():
st.title("🐕 Dog Behavior Analyzer")
st.write("Upload a video of your dog for behavior analysis!")
analyzer = DogBehaviorAnalyzer()
video_file = st.file_uploader("Upload Video", type=['mp4', 'avi', 'mov'])
if video_file is not None:
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(video_file.read())
cap = cv2.VideoCapture(tfile.name)
col1, col2 = st.columns(2)
with col1:
st.subheader("Video Analysis")
video_placeholder = st.empty()
with col2:
st.subheader("Real-time Behavior Detection")
analysis_placeholder = st.empty()
progress_bar = st.progress(0)
behavior_counts = {behavior: 0 for behavior in analyzer.behaviors.keys()}
confidence_history = {behavior: [] for behavior in analyzer.behaviors.keys()}
frame_count = 0
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_count += 1
progress = frame_count / total_frames
progress_bar.progress(progress)
# Analyze frame
detected_behaviors = analyzer.analyze_frame(frame)
# Draw behavior labels on frame
y_pos = 30
for behavior, conf in detected_behaviors:
behavior_counts[behavior] += 1
confidence_history[behavior].append(conf)
cv2.putText(frame,
f"{behavior.replace('_', ' ').title()}: {conf:.2f}",
(10, y_pos),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
2)
y_pos += 30
video_placeholder.image(
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),
channels="RGB",
use_container_width=True
)
# Update analysis display
analysis_text = "Detected Behaviors:\n\n"
for behavior, count in behavior_counts.items():
if count > 0:
avg_conf = sum(confidence_history[behavior]) / len(confidence_history[behavior])
analysis_text += f"• {behavior.replace('_', ' ').title()}:\n"
analysis_text += f" Count: {count} | Confidence: {avg_conf:.2f}\n"
analysis_text += f" {analyzer.behaviors[behavior]['description']}\n\n"
# Get and display suggestions
suggestions = analyzer.get_suggestions(detected_behaviors, behavior_counts)
if suggestions:
analysis_text += "\nSuggestions:\n\n"
for suggestion in suggestions:
analysis_text += f"💡 {suggestion}\n"
analysis_placeholder.text_area(
"Analysis Results",
analysis_text,
height=300,
key=f"analysis_text_{frame_count}"
)
time.sleep(0.05)
cap.release()
# Final analysis
st.subheader("Comprehensive Analysis")
# Create metrics
col1, col2, col3 = st.columns(3)
with col1:
most_common = max(behavior_counts.items(), key=lambda x: x[1])[0] if any(behavior_counts.values()) else "None"
st.metric("Primary Behavior", most_common.replace('_', ' ').title())
with col2:
total_behaviors = sum(behavior_counts.values())
st.metric("Total Behaviors", total_behaviors)
with col3:
valid_confidences = [conf for confs in confidence_history.values() if confs for conf in confs]
avg_confidence = np.mean(valid_confidences) if valid_confidences else 0
st.metric("Average Confidence", f"{avg_confidence:.2%}")
# Behavior distribution chart
if any(behavior_counts.values()):
st.subheader("Behavior Distribution")
behavior_data = {k.replace('_', ' ').title(): v for k, v in behavior_counts.items() if v > 0}
st.bar_chart(behavior_data)
# Recommendations
st.subheader("Behavior Insights")
recommendations = []
if behavior_counts['tail_wagging'] > total_behaviors * 0.3:
recommendations.append("• Your dog shows frequent happiness - great time for positive reinforcement!")
if behavior_counts['high_activity'] > total_behaviors * 0.4:
recommendations.append("• High energy levels detected - consider more physical exercise")
if behavior_counts['stationary'] > total_behaviors * 0.5:
recommendations.append("• Your dog appears calm - good for training sessions")
for rec in recommendations:
st.write(rec)
else:
st.write("Upload a video to see behavior analysis!")
if __name__ == "__main__":
main() |