File size: 3,762 Bytes
81ebb35 f838d54 81ebb35 f838d54 81ebb35 7d68828 81ebb35 8f97687 f838d54 8f97687 81ebb35 f838d54 81ebb35 9ecd586 81ebb35 8f97687 f838d54 81ebb35 9ecd586 81ebb35 f838d54 81ebb35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import pandas as pd
import numpy as np
import onnxruntime as ort
import os
from tqdm import tqdm
import timm
import torchvision.transforms as T
from PIL import Image
import torch
def is_gpu_available():
"""Check if the python package `onnxruntime-gpu` is installed."""
return torch.cuda.is_available()
class PytorchWorker:
"""Run inference using ONNX runtime."""
def __init__(self, model_path: str, model_name: str, number_of_categories: int = 1784):
def _load_model(model_name, model_path):
print("Setting up Pytorch Model")
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using devide: {self.device}")
model = timm.create_model(model_name, num_classes=number_of_categories, pretrained=False)
# if not torch.cuda.is_available():
# model_ckpt = torch.load(model_path, map_location=torch.device("cpu"))
# else:
# model_ckpt = torch.load(model_path)
model_ckpt = torch.load(model_path, map_location=self.device)
model.load_state_dict(model_ckpt)
return model.to(self.device).eval()
self.model = _load_model(model_name, model_path)
self.transforms = T.Compose([T.Resize((256, 256)),
T.ToTensor(),
T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
def predict_image(self, image: np.ndarray) -> list():
"""Run inference using ONNX runtime.
:param image: Input image as numpy array.
:return: A list with logits and confidences.
"""
logits = self.model(self.transforms(image).unsqueeze(0).to(self.device))
return logits.tolist()
def make_submission(test_metadata, model_paths, model_name, output_csv_path="./submission.csv", images_root_path="/tmp/data/private_testset"):
"""Make submission with given """
models = []
for m in model_paths:
models.append(PytorchWorker(m, model_name))
predictions = []
for _, row in tqdm(test_metadata.iterrows(), total=len(test_metadata)):
image_path = os.path.join(images_root_path, row.filename)
test_image = Image.open(image_path).convert("RGB")
# flipped_image = test_image.transpose(Image.FLIP_LEFT_RIGHT)
result_logits = []
for model in models:
result_logits += model.predict_image(test_image)
# result_logits += model.predict_image(flipped_image)
logits = np.average(np.array(result_logits), 0)
predictions.append(np.argmax(logits))
test_metadata["class_id"] = predictions
user_pred_df = test_metadata.drop_duplicates("observation_id", keep="first")
user_pred_df[["observation_id", "class_id"]].to_csv(output_csv_path, index=None)
if __name__ == "__main__":
import zipfile
with zipfile.ZipFile("/tmp/data/private_testset.zip", 'r') as zip_ref:
zip_ref.extractall("/tmp/data")
# MODEL_PATH = "pytorch_model.bin"
MODEL_PATH = ["__best_accuracy.pth",
# "2405_cls_boost_best_accuracy.pth"
]
# MODEL_NAME = "tf_efficientnet_b1.ap_in1k"
MODEL_NAME = "swinv2_tiny_window16_256.ms_in1k"
metadata_file_path = "./SnakeCLEF2024-TestMetadata.csv"
# metadata_file_path = "/home/zeleznyt/mnt/data-ntis/projects/korpusy_cv/SnakeCLEF2024/SnakeCLEF2023-ValMetadata.csv"
test_metadata = pd.read_csv(metadata_file_path)
make_submission(
test_metadata=test_metadata,
model_paths=MODEL_PATH,
model_name=MODEL_NAME,
# images_root_path='/home/zeleznyt/mnt/data-ntis/projects/korpusy_cv/SnakeCLEF2024/val/SnakeCLEF2023-medium_size'
)
|