File size: 3,041 Bytes
e67f813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#https://www.kaggle.com/code/kmader/segmenting-buildings-in-satellite-images

from keras import models, layers
from skimage.io import imread
import numpy as np
import cv2

t_x_shape = (300, 300, 3)
GAUSSIAN_NOISE = 0.1
BASE_DEPTH = 16
BLOCK_COUNT = 1
SPATIAL_DROPOUT = 0.25
EDGE_CROP = 16


def conv_bn(x, filt, dl_rate=(1,1), preblock = False):
    y = layers.Convolution2D(filt, (3, 3), 
                             activation='linear', 
                             padding='same', 
                             dilation_rate=dl_rate,
                            use_bias=False)(x)
    if preblock: return y
    y = layers.BatchNormalization()(y)
    return layers.Activation('elu')(y)

def keras_model():
    in_layer = layers.Input(t_x_shape, name = 'RGB_Input')
    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c = conv_bn(pp_in_layer, BASE_DEPTH//2)
    c = conv_bn(c, BASE_DEPTH//2)
    c = conv_bn(c, BASE_DEPTH)

    skip_layers = [pp_in_layer]
    for j in range(BLOCK_COUNT):
        depth_steps = int(np.log2(t_x_shape[0])-2)
        d = layers.concatenate(skip_layers+[conv_bn(c, BASE_DEPTH*2**j, (2**i, 2**i), preblock=True) 
                                            for i in range(depth_steps)])
        d = layers.SpatialDropout2D(SPATIAL_DROPOUT)(d)
        d = layers.BatchNormalization()(d)
        d = layers.Activation('elu')(d)
        # bottleneck
        d = conv_bn(d, BASE_DEPTH*2**(j+1))
        skip_layers += [c]
        c = d
    d = layers.Convolution2D(1, (1, 1), activation='sigmoid', padding='same')(d)
    d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    
    seg_model = models.Model(inputs = [in_layer],outputs = [d])
    
    return seg_model

def load_model(weight_path):
    seg_model = keras_model()
    seg_model.load_weights(weight_path)
    return seg_model

def preprocess_image(img_data):
    img_data = imread(img_data)
    
    if img_data.shape[:2] == (300, 300):
        img_data = img_data
    else:
        img_data = cv2.resize(img_data, (300, 300))

    out_img = []
    out_img += [img_data]

    out_img = (np.stack(out_img, 0)/255.0).astype(np.float32)
    return out_img


def attempt_download_from_hub(repo_id, hf_token=None):
    # https://github.com/fcakyon/yolov5-pip/blob/main/yolov5/utils/downloads.py
    from huggingface_hub import hf_hub_download, list_repo_files
    from huggingface_hub.utils._errors import RepositoryNotFoundError
    from huggingface_hub.utils._validators import HFValidationError
    try:
        repo_files = list_repo_files(repo_id=repo_id, repo_type='model', token=hf_token)
        model_file = [f for f in repo_files if f.endswith('.h5')][0]
        file = hf_hub_download(
            repo_id=repo_id,
            filename=model_file,
            repo_type='model',
            token=hf_token,
        )
        return file
    except (RepositoryNotFoundError, HFValidationError):
        return None