# import gradio as gr # import spaces # from huggingface_hub import hf_hub_download # # Import YOLOv9 # import yolov9 # # def download_models(model_id): # # hf_hub_download("SakshiRathi77/void-space-detection/weights", filename=f"{model_id}", local_dir=f"./") # # return f"./{model_id}" # def download_models(model_id): # hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./") # return f"./{model_id}" # def yolov9_inference(img_path, image_size, conf_threshold, iou_threshold): # """ # Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust # the input size and apply test time augmentation. # :param model_path: Path to the YOLOv9 model file. # :param conf_threshold: Confidence threshold for NMS. # :param iou_threshold: IoU threshold for NMS. # :param img_path: Path to the image file. # :param size: Optional, input size for inference. # :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying. # """ # # Load the model # model_path = download_models() # # model = yolov9.load("./best.pt") # # Set model parameters # model.conf = conf_threshold # model.iou = iou_threshold # # Perform inference # results = model(img_path, size=image_size) # # Optionally, show detection bounding boxes on image # output = results.render() # return output[0] # def app(): # with gr.Blocks(): # with gr.Row(): # with gr.Column(): # img_path = gr.Image(type="filepath", label="Image") # image_size = gr.Slider( # label="Image Size", # minimum=320, # maximum=1280, # step=32, # value=640, # ) # conf_threshold = gr.Slider( # label="Confidence Threshold", # minimum=0.1, # maximum=1.0, # step=0.1, # value=0.4, # ) # iou_threshold = gr.Slider( # label="IoU Threshold", # minimum=0.1, # maximum=1.0, # step=0.1, # value=0.5, # ) # yolov9_infer = gr.Button(value="Inference") # with gr.Column(): # output_numpy = gr.Image(type="numpy",label="Output") # yolov9_infer.click( # fn=yolov9_inference, # inputs=[ # img_path, # # model_path, # image_size, # conf_threshold, # iou_threshold, # ], # outputs=[output_numpy], # ) # gradio_app = gr.Blocks() # with gradio_app: # gr.HTML( # """ #

# YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information #

# """) # gr.HTML( # """ #

# Follow me for more! #

# """) # with gr.Row(): # with gr.Column(): # app() # gradio_app.launch(debug=True) # make sure you have the following dependencies # import gradio as gr # import torch # from torchvision import transforms # from PIL import Image # # Load the YOLOv9 model # model_path = "best.pt" # Replace with the path to your YOLOv9 model # model = torch.load(model_path) # # Define preprocessing transforms # preprocess = transforms.Compose([ # transforms.Resize((640, 640)), # Resize image to model input size # transforms.ToTensor(), # Convert image to tensor # ]) # # Define a function to perform inference # def detect_void(image): # # Preprocess the input image # image = Image.fromarray(image) # image = preprocess(image).unsqueeze(0) # Add batch dimension # # Perform inference # with torch.no_grad(): # output = model(image) # # Post-process the output if needed # # For example, draw bounding boxes on the image # # Convert the image back to numpy array # # and return the result # return output.squeeze().numpy() # # Define Gradio interface components # input_image = gr.inputs.Image(shape=(640, 640), label="Input Image") # output_image = gr.outputs.Image(label="Output Image") # # Create Gradio interface # gr.Interface(fn=detect_void, inputs=input_image, outputs=output_image, title="Void Detection App").launch() import gradio as gr import spaces from huggingface_hub import hf_hub_download def download_models(model_id): hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./") return f"./{model_id}" def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold): """ Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust the input size and apply test time augmentation. :param model_path: Path to the YOLOv9 model file. :param conf_threshold: Confidence threshold for NMS. :param iou_threshold: IoU threshold for NMS. :param img_path: Path to the image file. :param size: Optional, input size for inference. :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying. """ # Import YOLOv9 import yolov9 # Load the model model_path = download_models(model_id) model = yolov9.load(model_path) # Set model parameters model.conf = conf_threshold model.iou = iou_threshold # Perform inference results = model(img_path, size=image_size) # Optionally, show detection bounding boxes on image output = results.render() return output[0] def app(): with gr.Blocks(): with gr.Row(): with gr.Column(): img_path = gr.Image(type="filepath", label="Image") model_path = gr.Dropdown( label="Model", choices=[ "yolov9-c.pt", ], value="yolov9-c.pt", ) image_size = gr.Slider( label="Image Size", minimum=320, maximum=1280, step=32, value=640, ) conf_threshold = gr.Slider( label="Confidence Threshold", minimum=0.1, maximum=1.0, step=0.1, value=0.4, ) iou_threshold = gr.Slider( label="IoU Threshold", minimum=0.1, maximum=1.0, step=0.1, value=0.5, ) yolov9_infer = gr.Button(value="Inference") with gr.Column(): output_numpy = gr.Image(type="numpy",label="Output") yolov9_infer.click( fn=yolov9_inference, inputs=[ img_path, model_path, image_size, conf_threshold, iou_threshold, ], outputs=[output_numpy], ) gradio_app = gr.Blocks() with gradio_app: gr.HTML( """

YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information

""") gr.HTML( """

Follow me for more! Twitter | Github | Linkedin | HuggingFace

""") with gr.Row(): with gr.Column(): app() gradio_app.launch(debug=True)