crop-detection / app.py
jeremyarancio's picture
Init
fd7a708
raw
history blame
1.57 kB
from pathlib import Path
import gradio as gr
from ultralytics import YOLO
from PIL import Image
# Load YOLOv8n model
MODEL = YOLO('weights/best.pt')
IMAGES_PATH = Path("images/")
INF_PARAMETERS = {
"imgsz": 640, # image size
"conf": 0.8, # confidence threshold
"max_det": 1 # maximum number of detections
}
EXAMPLES = [path for path in IMAGES_PATH.iterdir()]
# Function to detect objects and crop the image
def detect_and_crop(image: Image.Image) -> Image.Image:
# Perform object detection
results = MODEL.predict(image,**INF_PARAMETERS)
result = results[0]
for box in result.boxes.xyxy.cpu().numpy():
cropped_image = image.crop(box=box)
return cropped_image
# Gradio UI
title = "Crop-Detection"
description = """## πŸ‹β€πŸŸ© Automatically crop product pictures! πŸ‹β€πŸŸ©
When contributors use the mobile app, they are asked to take pictures of the product, then to crop it.
To assist users during the process, we create a crop-detection model desin to detect the product edges.
We fine-tuned Yolov8n on images extracted from the Open Food Facts database.
Check the [model repo page](https://huggingface.co/openfoodfacts/crop-detection) for more information.
"""
# Gradio Interface
demo = gr.Interface(
fn=detect_and_crop,
inputs=gr.Image(type="pil", width=300),
outputs=gr.Image(type="pil", width=300),
title=title,
description=description,
allow_flagging="never",
examples=EXAMPLES
)
# Launch the Gradio app
if __name__ == "__main__":
demo.launch()