yolov8newultlt / app.py
MvitHYF's picture
Upload app.py
64abe5b verified
#app7.py
import gradio as gr
import torch
from PIL import Image
from torchvision import transforms
from ultralyticsplus import YOLO, render_result
# import matplotlib.pyplot as plt
import numpy as np
torch.hub.download_url_to_file(
'https://i.postimg.cc/g2xGJ4Qs/NSTA-Test-IMG-3276.jpg', 'NSTA.jpg')
torch.hub.download_url_to_file(
'https://i.postimg.cc/BZCSwj2T/NSTB-Test-IMG-1472.jpg', 'NSTB.jpg')
torch.hub.download_url_to_file(
'https://i.postimg.cc/yYY1q7Tw/NSTC-Test-IMG-0118.jpg', 'NSTC.jpg')
torch.hub.download_url_to_file(
'https://i.postimg.cc/zD9ZQX6z/KCCA-Test-IMG-3555.jpg', 'KCCA.jpg')
torch.hub.download_url_to_file(
'https://i.postimg.cc/vZLPXP7L/KCCB-Test-IMG-3733.jpg', 'KCCB.jpg')
torch.hub.download_url_to_file(
'https://i.postimg.cc/BZFYqFmF/KCCC-Test-IMG-3892.jpg', 'KCCC.jpg')
def detect_objects(image_path, selected_model):
# Open the image file and resize it
image = Image.open(image_path)
resized_image = image.resize((1024, 768))
#default
# model_path = ('MvitHYF/v8mvitcocoaseed2024')
# Load the model
nstcurrentmodel = "NST Model"
kcccurrentmodel = "KCC Model"
currentmodel = str(selected_model)
nstcurrentmodel = "NST Model"
if currentmodel == nstcurrentmodel:
print("this is nst model")
#best of NST Model
#model_path = ('code/runs/train45/best.pt')
model_path = ('MvitHYF/v8mvitcocoaseed2024')
elif currentmodel == kcccurrentmodel:
print("this is kcc model")
#model_path = ('code/runs/kcc/v8/train83/best.pt')
model_path = ('MvitHYF/kccv8mvitcocoaseed2024')
#model_path = ('code/runs/train45/best.pt')
# model_path = ('MvitHYF/v8mvitcocoaseed2024')
model = YOLO(model_path)
# model = YOLO('MvitHYF/v8mvitcocoaseed2024')
# Set model parameters
model.overrides['conf'] = 'null' # NMS confidence threshold
model.overrides['iou'] = 0.70 # NMS IoU threshold
model.overrides['agnostic_nms'] = True # NMS class-agnostic
model.overrides['max_det'] = 1000 # maximum number of detections per image
# Perform inference
results = model.predict(resized_image)
#debug check count
# print("see")
# print(results)
cls = results[0].boxes.cls
# print(cls)
strcls = str(cls)
# print(type(strcls))
# print(strcls)
count_classa = strcls.count('0')
# print('Count of classA:', count_classa)
count_classb = strcls.count('1')
# print('Count of classB', count_classb)
count_classc = strcls.count('2')
# print('Count of classC:', count_classc)
intcount_classa = int(count_classa)
intcount_classb = int(count_classb)
intcount_classc = int(count_classc)
total = intcount_classa + intcount_classb + intcount_classc
# print("end see")
# gr.Image(label="Pie Graph")
# Format the output to print the counts
output_counts = f"Totoal cocoa seeds: {total}\nClass A: {count_classa} seeds\nClass B: {count_classb} seeds\nClass C: {count_classc} seeds"
# Render results
render = render_result(model=model, image=resized_image, result=results[0])
print("Selected model:", selected_model)
#return render, output_counts, plotbar
return render, output_counts, "You have selected the " + str(selected_model)
#csspath = 'code/yolov8newultlt/gradio.css'
with gr.Blocks(theme='ParityError/LimeFace') as demo:
with gr.Row():
with gr.Column():
gr.Interface(fn=detect_objects,
inputs=[gr.Image(type="filepath", label="Upload an Image"), gr.Dropdown(choices=["NST Model", "KCC Model"])],
outputs=[gr.Image(type="filepath", label="Result"), gr.Textbox(label="Detection Counts"), gr.Textbox(label="Selected Model")],
title="YOLOv8 Cocoa Seed Classification",
description="Upload an image to detect objects using YOLO.",
#html = gr.HTML(value="<p>This is another paragraph123.</p>"),
examples = [["NSTA.jpg"],
["NSTB.jpg"],
["NSTC.jpg"],
["KCCA.jpg"],
["KCCB.jpg"],
["KCCC.jpg"]],
cache_examples = bool(False)
#css=csspath,
)
# with gr.Row(): #original Column
# with gr.Row(): #original Column
# example = [[gr.Image(value="NSTA.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')],
# [gr.Image(value="NSTB.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')],
# [gr.Image(value="NSTC.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')],
# [gr.Image(value="KCCA.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')],
# [gr.Image(value="KCCB.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')],
# [gr.Image(value="KCCC.jpg", interactive = bool(True)), gr.Markdown(value='**label 5**')]]
with gr.Row():
with gr.Row():
# gr.HTML(value="<b>Class A</b> <p>Class A is the best from all 3 classes. It have the best of physical appreance eg. shape, size, texture</p>"),
gr.HTML(value="<p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; NSTA &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; NSTB &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; NTSC &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; KCCA &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; KCCB &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; KCCC</p> <dl> <dt><b>Class A</b></dt> <dd>Class A is the best from all 3 classes. It have the best of physical appreance eg. shape, size, texture</dd> </dl> <dt><b>Class B</b></dt> <dd>Class B most of the cocoa seed have physical appreance similar to class A. <br> But the size must me smaller and texture is not smmoth as class A</dd> <dt><b>Class C</b></dt> <dd>Class C is the worst from all 3 classes. Its the smallest, rough texter and have a irregular shape </dd> </dl></dl>")
if __name__ == "__main__":
demo.queue().launch(share=True)