kittendev commited on
Commit
270b80e
1 Parent(s): 650351e

Update tools/ai/torch_utils.py

Browse files
Files changed (1) hide show
  1. tools/ai/torch_utils.py +122 -122
tools/ai/torch_utils.py CHANGED
@@ -1,123 +1,123 @@
1
- import cv2
2
- import math
3
- import torch
4
- import random
5
- import numpy as np
6
-
7
- import torch.nn.functional as F
8
-
9
- from torch.optim.lr_scheduler import LambdaLR
10
-
11
- def set_seed(seed):
12
- random.seed(seed)
13
- np.random.seed(seed)
14
-
15
- torch.manual_seed(seed)
16
- if torch.cuda.is_available():
17
- torch.cuda.manual_seed_all(seed)
18
-
19
- def rotation(x, k):
20
- return torch.rot90(x, k, (1, 2))
21
-
22
- def interleave(x, size):
23
- s = list(x.shape)
24
- return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
25
-
26
- def de_interleave(x, size):
27
- s = list(x.shape)
28
- return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
29
-
30
- def resize_for_tensors(tensors, size, mode='bilinear', align_corners=False):
31
- return F.interpolate(tensors, size, mode=mode, align_corners=align_corners)
32
-
33
- def L1_Loss(A_tensors, B_tensors):
34
- return torch.abs(A_tensors - B_tensors)
35
-
36
- def L2_Loss(A_tensors, B_tensors):
37
- return torch.pow(A_tensors - B_tensors, 2)
38
-
39
- # ratio = 0.2, top=20%
40
- def Online_Hard_Example_Mining(values, ratio=0.2):
41
- b, c, h, w = values.size()
42
- return torch.topk(values.reshape(b, -1), k=int(c * h * w * ratio), dim=-1)[0]
43
-
44
- def shannon_entropy_loss(logits, activation=torch.sigmoid, epsilon=1e-5):
45
- v = activation(logits)
46
- return -torch.sum(v * torch.log(v+epsilon), dim=1).mean()
47
-
48
- def make_cam(x, epsilon=1e-5):
49
- # relu(x) = max(x, 0)
50
- x = F.relu(x)
51
-
52
- b, c, h, w = x.size()
53
-
54
- flat_x = x.view(b, c, (h * w))
55
- max_value = flat_x.max(axis=-1)[0].view((b, c, 1, 1))
56
-
57
- return F.relu(x - epsilon) / (max_value + epsilon)
58
-
59
- def one_hot_embedding(label, classes):
60
- """Embedding labels to one-hot form.
61
-
62
- Args:
63
- labels: (int) class labels.
64
- num_classes: (int) number of classes.
65
-
66
- Returns:
67
- (tensor) encoded labels, sized [N, #classes].
68
- """
69
-
70
- vector = np.zeros((classes), dtype = np.float32)
71
- if len(label) > 0:
72
- vector[label] = 1.
73
- return vector
74
-
75
- def calculate_parameters(model):
76
- return sum(param.numel() for param in model.parameters())/1000000.0
77
-
78
- def get_learning_rate_from_optimizer(optimizer):
79
- return optimizer.param_groups[0]['lr']
80
-
81
- def get_numpy_from_tensor(tensor):
82
- return tensor.cpu().detach().numpy()
83
-
84
- def load_model(model, model_path, parallel=False):
85
- if parallel:
86
- model.module.load_state_dict(torch.load(model_path))
87
- else:
88
- model.load_state_dict(torch.load(model_path))
89
-
90
- def save_model(model, model_path, parallel=False):
91
- if parallel:
92
- torch.save(model.module.state_dict(), model_path)
93
- else:
94
- torch.save(model.state_dict(), model_path)
95
-
96
- def transfer_model(pretrained_model, model):
97
- pretrained_dict = pretrained_model.state_dict()
98
- model_dict = model.state_dict()
99
-
100
- pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
101
-
102
- model_dict.update(pretrained_dict)
103
- model.load_state_dict(model_dict)
104
-
105
- def get_learning_rate(optimizer):
106
- lr=[]
107
- for param_group in optimizer.param_groups:
108
- lr +=[ param_group['lr'] ]
109
- return lr
110
-
111
- def get_cosine_schedule_with_warmup(optimizer,
112
- warmup_iteration,
113
- max_iteration,
114
- cycles=7./16.
115
- ):
116
- def _lr_lambda(current_iteration):
117
- if current_iteration < warmup_iteration:
118
- return float(current_iteration) / float(max(1, warmup_iteration))
119
-
120
- no_progress = float(current_iteration - warmup_iteration) / float(max(1, max_iteration - warmup_iteration))
121
- return max(0., math.cos(math.pi * cycles * no_progress))
122
-
123
  return LambdaLR(optimizer, _lr_lambda, -1)
 
1
+ import cv2
2
+ import math
3
+ import torch
4
+ import random
5
+ import numpy as np
6
+
7
+ import torch.nn.functional as F
8
+
9
+ from torch.optim.lr_scheduler import LambdaLR
10
+
11
+ def set_seed(seed):
12
+ random.seed(seed)
13
+ np.random.seed(seed)
14
+
15
+ torch.manual_seed(seed)
16
+ if torch.cuda.is_available():
17
+ torch.cuda.manual_seed_all(seed)
18
+
19
+ def rotation(x, k):
20
+ return torch.rot90(x, k, (1, 2))
21
+
22
+ def interleave(x, size):
23
+ s = list(x.shape)
24
+ return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
25
+
26
+ def de_interleave(x, size):
27
+ s = list(x.shape)
28
+ return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
29
+
30
+ def resize_for_tensors(tensors, size, mode='bilinear', align_corners=False):
31
+ return F.interpolate(tensors, size, mode=mode, align_corners=align_corners)
32
+
33
+ def L1_Loss(A_tensors, B_tensors):
34
+ return torch.abs(A_tensors - B_tensors)
35
+
36
+ def L2_Loss(A_tensors, B_tensors):
37
+ return torch.pow(A_tensors - B_tensors, 2)
38
+
39
+ # ratio = 0.2, top=20%
40
+ def Online_Hard_Example_Mining(values, ratio=0.2):
41
+ b, c, h, w = values.size()
42
+ return torch.topk(values.reshape(b, -1), k=int(c * h * w * ratio), dim=-1)[0]
43
+
44
+ def shannon_entropy_loss(logits, activation=torch.sigmoid, epsilon=1e-5):
45
+ v = activation(logits)
46
+ return -torch.sum(v * torch.log(v+epsilon), dim=1).mean()
47
+
48
+ def make_cam(x, epsilon=1e-5):
49
+ # relu(x) = max(x, 0)
50
+ x = F.relu(x)
51
+
52
+ b, c, h, w = x.size()
53
+
54
+ flat_x = x.view(b, c, (h * w))
55
+ max_value = flat_x.max(axis=-1)[0].view((b, c, 1, 1))
56
+
57
+ return F.relu(x - epsilon) / (max_value + epsilon)
58
+
59
+ def one_hot_embedding(label, classes):
60
+ """Embedding labels to one-hot form.
61
+
62
+ Args:
63
+ labels: (int) class labels.
64
+ num_classes: (int) number of classes.
65
+
66
+ Returns:
67
+ (tensor) encoded labels, sized [N, #classes].
68
+ """
69
+
70
+ vector = np.zeros((classes), dtype = np.float32)
71
+ if len(label) > 0:
72
+ vector[label] = 1.
73
+ return vector
74
+
75
+ def calculate_parameters(model):
76
+ return sum(param.numel() for param in model.parameters())/1000000.0
77
+
78
+ def get_learning_rate_from_optimizer(optimizer):
79
+ return optimizer.param_groups[0]['lr']
80
+
81
+ def get_numpy_from_tensor(tensor):
82
+ return tensor.cpu().detach().numpy()
83
+
84
+ def load_model(model, model_path, parallel=False):
85
+ if parallel:
86
+ model.module.load_state_dict(torch.load(model_path))
87
+ else:
88
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
89
+
90
+ def save_model(model, model_path, parallel=False):
91
+ if parallel:
92
+ torch.save(model.module.state_dict(), model_path)
93
+ else:
94
+ torch.save(model.state_dict(), model_path)
95
+
96
+ def transfer_model(pretrained_model, model):
97
+ pretrained_dict = pretrained_model.state_dict()
98
+ model_dict = model.state_dict()
99
+
100
+ pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
101
+
102
+ model_dict.update(pretrained_dict)
103
+ model.load_state_dict(model_dict)
104
+
105
+ def get_learning_rate(optimizer):
106
+ lr=[]
107
+ for param_group in optimizer.param_groups:
108
+ lr +=[ param_group['lr'] ]
109
+ return lr
110
+
111
+ def get_cosine_schedule_with_warmup(optimizer,
112
+ warmup_iteration,
113
+ max_iteration,
114
+ cycles=7./16.
115
+ ):
116
+ def _lr_lambda(current_iteration):
117
+ if current_iteration < warmup_iteration:
118
+ return float(current_iteration) / float(max(1, warmup_iteration))
119
+
120
+ no_progress = float(current_iteration - warmup_iteration) / float(max(1, max_iteration - warmup_iteration))
121
+ return max(0., math.cos(math.pi * cycles * no_progress))
122
+
123
  return LambdaLR(optimizer, _lr_lambda, -1)