Spaces:
Runtime error
Runtime error
File size: 2,478 Bytes
e88cdb9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import json
import gradio as gr
import yolov5
from PIL import Image
from huggingface_hub import hf_hub_download
app_title = "Clash of Clans Object Detection"
models_ids = ['keremberke/yolov5n-clash-of-clans', 'keremberke/yolov5s-clash-of-clans', 'keremberke/yolov5m-clash-of-clans']
article = f"<p style='text-align: center'> <a href='https://huggingface.co/{models_ids[-1]}'>huggingface.co/{models_ids[-1]}</a> | <a href='https://huggingface.co/keremberke/clash-of-clans-object-detection'>huggingface.co/keremberke/clash-of-clans-object-detection</a> | <a href='https://github.com/keremberke/awesome-yolov5-models'>awesome-yolov5-models</a> </p>"
current_model_id = models_ids[-1]
model = yolov5.load(current_model_id)
examples = [['test_images/IMG_1879_jpg.rf.c0e9cd93962f7cf2df6fbeef63ab6ed9.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans'], ['test_images/IMG_1906_jpg.rf.e3d1ef9a4c55d6576e95ba057984d204.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans'], ['test_images/IMG_1931_jpg.rf.16f42f6d309c3de5661625af454aeb0c.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans'], ['test_images/IMG_2016_jpg.rf.6013a2119e90b56bb2a07d83954dc637.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans'], ['test_images/IMG_2023_jpg.rf.01d180bc2ed3d5bab7b0026cd8e4c09a.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans'], ['test_images/IMG_2034_jpg.rf.40ab3489487d018d0112f49622f0f9a0.jpg', 0.25, 'keremberke/yolov5m-clash-of-clans']]
def predict(image, threshold=0.25, model_id=None):
# update model if required
global current_model_id
global model
if model_id != current_model_id:
model = yolov5.load(model_id)
current_model_id = model_id
# get model input size
config_path = hf_hub_download(repo_id=model_id, filename="config.json")
with open(config_path, "r") as f:
config = json.load(f)
input_size = config["input_size"]
# perform inference
model.conf = threshold
results = model(image, size=input_size)
numpy_image = results.render()[0]
output_image = Image.fromarray(numpy_image)
return output_image
gr.Interface(
title=app_title,
description="Created by 'keremberke'",
article=article,
fn=predict,
inputs=[
gr.Image(type="pil"),
gr.Slider(maximum=1, step=0.01, value=0.25),
gr.Dropdown(models_ids, value=models_ids[-1]),
],
outputs=gr.Image(type="pil"),
examples=examples,
cache_examples=True if examples else False,
).launch(enable_queue=True)
|