File size: 4,358 Bytes
2868ba8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
diff --git a/annotator/hed/__init__.py b/annotator/hed/__init__.py
index 42d8dc6..1587035 100644
--- a/annotator/hed/__init__.py
+++ b/annotator/hed/__init__.py
@@ -1,8 +1,12 @@
+import pathlib
+
import numpy as np
import cv2
import torch
from einops import rearrange
+root_dir = pathlib.Path(__file__).parents[2]
+
class Network(torch.nn.Module):
def __init__(self):
@@ -64,7 +68,7 @@ class Network(torch.nn.Module):
torch.nn.Sigmoid()
)
- self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load('./annotator/ckpts/network-bsds500.pth').items()})
+ self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load(f'{root_dir}/annotator/ckpts/network-bsds500.pth').items()})
# end
def forward(self, tenInput):
diff --git a/annotator/midas/api.py b/annotator/midas/api.py
index 9fa305e..d8594ea 100644
--- a/annotator/midas/api.py
+++ b/annotator/midas/api.py
@@ -1,5 +1,7 @@
# based on https://github.com/isl-org/MiDaS
+import pathlib
+
import cv2
import torch
import torch.nn as nn
@@ -10,10 +12,11 @@ from .midas.midas_net import MidasNet
from .midas.midas_net_custom import MidasNet_small
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
+root_dir = pathlib.Path(__file__).parents[2]
ISL_PATHS = {
- "dpt_large": "annotator/ckpts/dpt_large-midas-2f21e586.pt",
- "dpt_hybrid": "annotator/ckpts/dpt_hybrid-midas-501f0c75.pt",
+ "dpt_large": f"{root_dir}/annotator/ckpts/dpt_large-midas-2f21e586.pt",
+ "dpt_hybrid": f"{root_dir}/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt",
"midas_v21": "",
"midas_v21_small": "",
}
diff --git a/annotator/mlsd/__init__.py b/annotator/mlsd/__init__.py
index 75db717..f310fe6 100644
--- a/annotator/mlsd/__init__.py
+++ b/annotator/mlsd/__init__.py
@@ -1,3 +1,5 @@
+import pathlib
+
import cv2
import numpy as np
import torch
@@ -8,8 +10,9 @@ from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
from .utils import pred_lines
+root_dir = pathlib.Path(__file__).parents[2]
-model_path = './annotator/ckpts/mlsd_large_512_fp32.pth'
+model_path = f'{root_dir}/annotator/ckpts/mlsd_large_512_fp32.pth'
model = MobileV2_MLSD_Large()
model.load_state_dict(torch.load(model_path), strict=True)
model = model.cuda().eval()
diff --git a/annotator/openpose/__init__.py b/annotator/openpose/__init__.py
index 47d50a5..2369eed 100644
--- a/annotator/openpose/__init__.py
+++ b/annotator/openpose/__init__.py
@@ -1,4 +1,5 @@
import os
+import pathlib
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch
@@ -7,8 +8,10 @@ from . import util
from .body import Body
from .hand import Hand
-body_estimation = Body('./annotator/ckpts/body_pose_model.pth')
-hand_estimation = Hand('./annotator/ckpts/hand_pose_model.pth')
+root_dir = pathlib.Path(__file__).parents[2]
+
+body_estimation = Body(f'{root_dir}/annotator/ckpts/body_pose_model.pth')
+hand_estimation = Hand(f'{root_dir}/annotator/ckpts/hand_pose_model.pth')
def apply_openpose(oriImg, hand=False):
diff --git a/annotator/uniformer/__init__.py b/annotator/uniformer/__init__.py
index 500e53c..4061dbe 100644
--- a/annotator/uniformer/__init__.py
+++ b/annotator/uniformer/__init__.py
@@ -1,9 +1,12 @@
+import pathlib
+
from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
from annotator.uniformer.mmseg.core.evaluation import get_palette
+root_dir = pathlib.Path(__file__).parents[2]
-checkpoint_file = "annotator/ckpts/upernet_global_small.pth"
-config_file = 'annotator/uniformer/exp/upernet_global_small/config.py'
+checkpoint_file = f"{root_dir}/annotator/ckpts/upernet_global_small.pth"
+config_file = f'{root_dir}/annotator/uniformer/exp/upernet_global_small/config.py'
model = init_segmentor(config_file, checkpoint_file).cuda()
diff --git a/annotator/util.py b/annotator/util.py
index 7cde937..10a6d58 100644
--- a/annotator/util.py
+++ b/annotator/util.py
@@ -25,7 +25,7 @@ def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
- k = float(resolution) / min(H, W)
+ k = float(resolution) / max(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
|