bone_age / app.py
Medvira's picture
Update app.py
c8bb09b verified
raw
history blame
1.74 kB
import gradio as gr
import torch
import onnxruntime as ort
import os
import gdown
# Define the model URL and output path
model_url = "https://drive.google.com/file/d/18HYScsRJuRmfzL0E0BW35uaA542Vd5M5/view?usp=sharing"
model_path = os.path.join(os.getcwd(),"bone_age_model.onnx")
# Check if the model file exists and download if it does not
if not os.path.exists(model_path):
gdown.download(model_url, model_path, quiet=False)
# Initialize the ONNX session
session = ort.InferenceSession(model_path)
# Define the inference function
def inference(sample_name):
sample_path = os.path.join(os.getcwd(),f'{sample_name}.pth')
sample = torch.load(sample_path)
age = sample['boneage'].item()
outputs = session.run(None, {"input": sample['path'].numpy()})
predicted_age = (outputs[0]*41.172)+127.329
# Get the image path and load the image
image_path = sample['path'][0]
image = Image.open(image_path)
return {
'Bone age': age,
'Predicted Bone age': predicted_age[0][0],
'Image': image
}
# List of sample file names
sample_files = sorted(os.listdir(os.path.join(os.getcwd(),'samples','*.pth')))
sample_names = [os.path.basename(x).split('.pth')[0] for x in sample_files]
# Create Gradio interface
dropdown = gr.inputs.Dropdown(choices=sample_names, label="Select a sample")
iface = gr.Interface(
fn=inference,
inputs=dropdown,
outputs=[
gr.outputs.Textbox(label="Bone Age"),
gr.outputs.Textbox(label="Predicted Bone Age"),
gr.outputs.Image(label="Image")
],
title="Bone Age Prediction",
description="Select a sample from the dropdown to see the bone age and predicted bone age."
)
# Launch the app
iface.launch()