ombhojane commited on
Commit
098ecf6
Β·
verified Β·
1 Parent(s): 9b66cdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -73
app.py CHANGED
@@ -11,6 +11,7 @@ from PIL import Image, ImageDraw
11
  import base64
12
  from io import BytesIO
13
  import pandas as pd
 
14
 
15
  # Set page config
16
  st.set_page_config(
@@ -21,85 +22,80 @@ st.set_page_config(
21
 
22
  class DogBehaviorAnalyzer:
23
  def __init__(self):
24
- # Initialize model (using pretrained ResNet for this example)
25
- self.model = models.resnet50(pretrained=True)
26
- self.model.eval()
27
-
28
- # Define image transformations
29
- self.transform = transforms.Compose([
30
- transforms.Resize((224, 224)),
31
- transforms.ToTensor(),
32
- transforms.Normalize(mean=[0.485, 0.456, 0.406],
33
- std=[0.229, 0.224, 0.225])
34
- ])
35
-
36
- # Enhanced behavior mappings with emotions and tips
37
- self.behaviors = {
38
- 'tail_wagging': {
39
- 'emotion': 'Happy and excited',
40
- 'description': 'Your dog is expressing joy and enthusiasm!',
41
- 'tips': [
42
- 'This is a great time for positive reinforcement training',
43
- 'Consider engaging in play or exercise',
44
- 'Use this excitement for teaching new tricks'
45
- ]
46
- },
47
- 'barking': {
48
- 'emotion': 'Alert or communicative',
49
- 'description': 'Your dog is trying to communicate or alert you.',
50
- 'tips': [
51
- 'Check what triggered the barking',
52
- 'Use positive reinforcement for quiet behavior',
53
- 'Consider training "quiet" and "speak" commands'
54
- ]
55
- },
56
- 'ears_perked': {
57
- 'emotion': 'Alert and interested',
58
- 'description': 'Your dog is focused and attentive.',
59
- 'tips': [
60
- 'Great moment for training exercises',
61
- 'Consider mental stimulation activities',
62
- 'Use this attention for bonding exercises'
63
- ]
64
- },
65
- 'lying_down': {
66
- 'emotion': 'Relaxed and comfortable',
67
- 'description': 'Your dog is calm and at ease.',
68
- 'tips': [
69
- 'Perfect time for gentle petting',
70
- 'Maintain a peaceful environment',
71
- 'Consider quiet bonding activities'
72
- ]
73
- },
74
- 'jumping': {
75
- 'emotion': 'Excited and playful',
76
- 'description': 'Your dog is energetic and seeking interaction!',
77
- 'tips': [
78
- 'Channel energy into structured play',
79
- 'Practice "four paws on floor" training',
80
- 'Consider agility exercises'
81
- ]
82
- }
83
  }
84
 
85
- def analyze_frame(self, frame):
86
- # Convert frame to PIL Image
87
- image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # Transform image
90
- input_tensor = self.transform(image)
91
- input_batch = input_tensor.unsqueeze(0)
92
 
93
- # Simulate behavior detection
94
- # In a real implementation, you'd use a properly trained model
95
- behaviors = []
96
- confidence_scores = np.random.random(len(self.behaviors))
 
 
 
 
 
 
 
 
 
97
 
98
- for behavior, score in zip(self.behaviors.keys(), confidence_scores):
99
- if score > 0.7: # Threshold for detection
100
- behaviors.append(behavior)
 
 
 
 
 
 
101
 
102
- return behaviors
 
 
 
 
 
 
 
 
 
103
 
104
  def create_animation(self, behavior):
105
  """Create simple animations for behaviors"""
@@ -119,6 +115,57 @@ class DogBehaviorAnalyzer:
119
  img.save(buffered, format="PNG")
120
  return base64.b64encode(buffered.getvalue()).decode()
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  def main():
123
  st.title("πŸ• Dog Language Understanding")
124
  st.write("Upload a video of your dog to analyze their behavior and emotions!")
 
11
  import base64
12
  from io import BytesIO
13
  import pandas as pd
14
+ from tensorflow.keras import layers, Model
15
 
16
  # Set page config
17
  st.set_page_config(
 
22
 
23
  class DogBehaviorAnalyzer:
24
  def __init__(self):
25
+ # Use YOLOv4 instead of ResNet
26
+ self.model = self.load_yolov4_model()
27
+
28
+ # Add support for sensor data (if available)
29
+ self.sensor_model = self.load_sensor_model()
30
+
31
+ # Define CNN-LSTM fusion model
32
+ self.fusion_model = self.create_fusion_model()
33
+
34
+ # Enhanced behavior detection with confidence thresholds
35
+ self.behavior_thresholds = {
36
+ 'tail_wagging': 0.85,
37
+ 'barking': 0.90,
38
+ 'ears_perked': 0.85,
39
+ 'lying_down': 0.80,
40
+ 'jumping': 0.85,
41
+ 'standing': 0.80,
42
+ 'sitting': 0.80,
43
+ 'running': 0.90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  }
45
 
46
+ def create_fusion_model(self):
47
+ """Create CNN-LSTM fusion model for better accuracy"""
48
+ # Implementation based on research paper architecture
49
+ video_input = layers.Input(shape=(None, 224, 224, 3))
50
+ sensor_input = layers.Input(shape=(None, sensor_features))
51
+
52
+ # CNN for video processing
53
+ cnn = layers.Conv2D(64, (3, 3), activation='relu')(video_input)
54
+ # ... additional CNN layers ...
55
+
56
+ # LSTM for temporal features
57
+ lstm = layers.LSTM(128, return_sequences=True)(cnn)
58
+
59
+ # Fusion layer
60
+ fusion = layers.Concatenate()([lstm, sensor_input])
61
 
62
+ # Output layer
63
+ output = layers.Dense(len(self.behaviors), activation='softmax')(fusion)
 
64
 
65
+ return Model(inputs=[video_input, sensor_input], outputs=output)
66
+
67
+ def analyze_frame(self, frame, sensor_data=None):
68
+ """Enhanced frame analysis using fusion model"""
69
+ # Convert frame to appropriate format
70
+ processed_frame = self.preprocess_frame(frame)
71
+
72
+ if sensor_data is not None:
73
+ # Use fusion model for more accurate detection
74
+ predictions = self.fusion_model.predict([processed_frame, sensor_data])
75
+ else:
76
+ # Fallback to video-only analysis
77
+ predictions = self.model.predict(processed_frame)
78
 
79
+ # Apply confidence thresholds
80
+ detected_behaviors = []
81
+ for behavior, confidence in zip(self.behaviors.keys(), predictions[0]):
82
+ if confidence > self.behavior_thresholds[behavior]:
83
+ detected_behaviors.append({
84
+ 'behavior': behavior,
85
+ 'confidence': float(confidence),
86
+ 'timestamp': time.time()
87
+ })
88
 
89
+ return detected_behaviors
90
+
91
+ def validate_detection(self, behaviors, previous_behaviors):
92
+ """Add temporal consistency check"""
93
+ validated_behaviors = []
94
+ for behavior in behaviors:
95
+ # Check temporal consistency
96
+ if self.is_temporally_consistent(behavior, previous_behaviors):
97
+ validated_behaviors.append(behavior)
98
+ return validated_behaviors
99
 
100
  def create_animation(self, behavior):
101
  """Create simple animations for behaviors"""
 
115
  img.save(buffered, format="PNG")
116
  return base64.b64encode(buffered.getvalue()).decode()
117
 
118
+ def create_visualization(self, behavior, frame):
119
+ """Create more accurate behavior visualizations"""
120
+ # Create overlay on actual frame instead of generic shapes
121
+ overlay = frame.copy()
122
+
123
+ # Get dog keypoints using pose estimation
124
+ keypoints = self.detect_dog_keypoints(frame)
125
+
126
+ if keypoints is not None:
127
+ if behavior == 'tail_wagging':
128
+ # Draw tail trajectory
129
+ self.draw_tail_trajectory(overlay, keypoints)
130
+ elif behavior == 'sitting':
131
+ # Draw pose skeleton
132
+ self.draw_pose_skeleton(overlay, keypoints)
133
+ # ... other behaviors ...
134
+
135
+ return cv2.addWeighted(frame, 0.7, overlay, 0.3, 0)
136
+
137
+ def evaluate_detection_quality(self, detections):
138
+ """Evaluate detection quality using metrics from the paper"""
139
+ metrics = {
140
+ 'accuracy': 0,
141
+ 'precision': 0,
142
+ 'recall': 0,
143
+ 'f_score': 0
144
+ }
145
+
146
+ # Calculate metrics based on paper formulas
147
+ true_positives = len([d for d in detections if d['confidence'] > 0.9])
148
+ false_positives = len([d for d in detections if d['confidence'] < 0.7])
149
+
150
+ metrics['precision'] = true_positives / (true_positives + false_positives)
151
+ # ... calculate other metrics ...
152
+
153
+ return metrics
154
+
155
+ def analyze_sequence(self, frames, window_size=30):
156
+ """Analyze behavior over multiple frames"""
157
+ sequence_behaviors = []
158
+
159
+ for i in range(len(frames) - window_size):
160
+ window = frames[i:i+window_size]
161
+ frame_behaviors = [self.analyze_frame(f) for f in window]
162
+
163
+ # Apply temporal smoothing
164
+ smoothed_behavior = self.temporal_smoothing(frame_behaviors)
165
+ sequence_behaviors.append(smoothed_behavior)
166
+
167
+ return sequence_behaviors
168
+
169
  def main():
170
  st.title("πŸ• Dog Language Understanding")
171
  st.write("Upload a video of your dog to analyze their behavior and emotions!")