thimwai commited on
Commit
6c48b79
·
verified ·
1 Parent(s): 0b929b8

Initial file upload

Browse files
Files changed (2) hide show
  1. app.py +36 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import numpy as np
5
+
6
+ # Load the YOLOv8 model
7
+ model = YOLO("./model/best.pt")
8
+
9
+ def detect_emotion(image):
10
+ """
11
+ Perform YOLO8 inference on the uploaded image.
12
+ :param image: Input image from the Gradio interface
13
+ :return: Annotated image with bounding boxes and emotion labels
14
+ """
15
+ # Convert PIL image to OpenCV format
16
+ image = np.array(image)
17
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
18
+
19
+ # Perform inference
20
+ results = model(image)
21
+
22
+ # Annotate the image with predictions
23
+ annotated_image = results[0].plot()
24
+
25
+ # Convert OpenCV BGR image back to RGB for display
26
+ annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
27
+ return annotated_image
28
+
29
+ # Create Gradio interface
30
+ gr.Interface(
31
+ fn=detect_emotion,
32
+ inputs=gr.Image(type="pil"),
33
+ outputs=gr.Image(type="numpy"),
34
+ title="YOLO8 Object Detection",
35
+ description="Upload an image, and the model will detect the object with bounding boxes."
36
+ ).launch(share=True) # Added share=True to expose a public link
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ ultralytics
3
+ cv2
4
+ numpy