ArchiMathur commited on
Commit
b0b72e4
·
verified ·
1 Parent(s): 46df183

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -77,43 +77,58 @@ elif input_option == "Use Webcam":
77
  # Release the camera
78
  camera.release()
79
 
 
 
 
 
 
80
  elif input_option == "Upload Video":
81
- uploaded_video = st.file_uploader("choose a video",type=["mp4","avi","mov","mkv"])
82
  if uploaded_video is not None:
83
  # Save the uploaded video temporarily
84
  temp_video_path = "temp_video.mp4"
85
  with open(temp_video_path, "wb") as f:
86
  f.write(uploaded_video.read())
87
 
 
 
 
88
  # Open the video file
89
  video_capture = cv2.VideoCapture(temp_video_path)
90
 
91
- # Create a placeholder for the video frames
92
  video_frame_placeholder = st.empty()
 
93
 
94
  # Loop through video frames
95
  while video_capture.isOpened():
96
  ret, frame = video_capture.read()
97
  if not ret:
98
- st.write("Finished processing video.")
99
  break
100
 
101
- # Make predictions
102
  results = model.predict(source=frame, conf=0.5)
103
 
104
- # Draw bounding boxes on the frame
105
  for result in results:
106
  boxes = result.boxes.xyxy
107
  for box in boxes:
108
  x1, y1, x2, y2 = box[:4].astype(int)
109
  frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
 
110
 
111
- # Convert frame to RGB
112
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
113
 
114
- # Display the frame
115
  video_frame_placeholder.image(rgb_frame, channels="RGB", use_column_width=True)
116
 
 
 
 
 
 
 
117
  # Release the video capture
118
  video_capture.release()
119
 
 
77
  # Release the camera
78
  camera.release()
79
 
80
+ import streamlit as st
81
+ import cv2
82
+
83
+ # Your fire detection model should be loaded here, e.g., `model = load_your_model()`
84
+
85
  elif input_option == "Upload Video":
86
+ uploaded_video = st.file_uploader("Choose a video", type=["mp4", "avi", "mov", "mkv"])
87
  if uploaded_video is not None:
88
  # Save the uploaded video temporarily
89
  temp_video_path = "temp_video.mp4"
90
  with open(temp_video_path, "wb") as f:
91
  f.write(uploaded_video.read())
92
 
93
+ # Display the uploaded video
94
+ st.video(temp_video_path)
95
+
96
  # Open the video file
97
  video_capture = cv2.VideoCapture(temp_video_path)
98
 
99
+ # Create a placeholder for video frame processing
100
  video_frame_placeholder = st.empty()
101
+ fire_detected = False
102
 
103
  # Loop through video frames
104
  while video_capture.isOpened():
105
  ret, frame = video_capture.read()
106
  if not ret:
 
107
  break
108
 
109
+ # Make predictions using your fire detection model
110
  results = model.predict(source=frame, conf=0.5)
111
 
112
+ # Draw bounding boxes on the frame if fire is detected
113
  for result in results:
114
  boxes = result.boxes.xyxy
115
  for box in boxes:
116
  x1, y1, x2, y2 = box[:4].astype(int)
117
  frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
118
+ fire_detected = True # Set fire_detected flag if a bounding box is found
119
 
120
+ # Convert the frame to RGB format
121
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
122
 
123
+ # Display the processed frame
124
  video_frame_placeholder.image(rgb_frame, channels="RGB", use_column_width=True)
125
 
126
+ # Display detection result
127
+ if fire_detected:
128
+ st.write("Fire detected in the video.")
129
+ else:
130
+ st.write("No fire detected in the video.")
131
+
132
  # Release the video capture
133
  video_capture.release()
134