Hannes Kuchelmeister
commited on
Commit
·
a58327a
1
Parent(s):
f938da8
add missing changes from original repository
Browse files- configs/callbacks/default.yaml +4 -4
- configs/datamodule/focus150.yaml +8 -0
- configs/experiment/focus_example.yaml +31 -0
- configs/hparams_search/focus150_optuna.yaml +64 -0
- configs/hparams_search/focus_optuna.yaml +62 -0
- configs/model/focus150.yaml +9 -0
- configs/train.yaml +3 -3
- configs/trainer/default.yaml +1 -1
- configs/trainer/long.yaml +1 -1
- src/datamodules/focus_datamodule.py +6 -2
- src/models/focus_module.py +1 -0
configs/callbacks/default.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
model_checkpoint:
|
2 |
_target_: pytorch_lightning.callbacks.ModelCheckpoint
|
3 |
-
monitor: "val/
|
4 |
-
mode: "
|
5 |
save_top_k: 1 # save k best models (determined by above metric)
|
6 |
save_last: True # additionaly always save model from last epoch
|
7 |
verbose: False
|
@@ -11,8 +11,8 @@ model_checkpoint:
|
|
11 |
|
12 |
early_stopping:
|
13 |
_target_: pytorch_lightning.callbacks.EarlyStopping
|
14 |
-
monitor: "val/
|
15 |
-
mode: "
|
16 |
patience: 100 # how many validation epochs of not improving until training stops
|
17 |
min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement
|
18 |
|
|
|
1 |
model_checkpoint:
|
2 |
_target_: pytorch_lightning.callbacks.ModelCheckpoint
|
3 |
+
monitor: "val/mae" # name of the logged metric which determines when model is improving
|
4 |
+
mode: "min" # "max" means higher metric value is better, can be also "min"
|
5 |
save_top_k: 1 # save k best models (determined by above metric)
|
6 |
save_last: True # additionaly always save model from last epoch
|
7 |
verbose: False
|
|
|
11 |
|
12 |
early_stopping:
|
13 |
_target_: pytorch_lightning.callbacks.EarlyStopping
|
14 |
+
monitor: "val/mae" # name of the logged metric which determines when model is improving
|
15 |
+
mode: "min" # "max" means higher metric value is better, can be also "min"
|
16 |
patience: 100 # how many validation epochs of not improving until training stops
|
17 |
min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement
|
18 |
|
configs/datamodule/focus150.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_target_: src.datamodules.focus_datamodule.FocusDataModule
|
2 |
+
|
3 |
+
data_dir: ${data_dir}/focus150 # data_dir is specified in config.yaml
|
4 |
+
csv_file: ${data_dir}/focus150/metadata.csv
|
5 |
+
batch_size: 64
|
6 |
+
train_val_test_split_percentage: [0.7, 0.15, 0.15]
|
7 |
+
num_workers: 0
|
8 |
+
pin_memory: False
|
configs/experiment/focus_example.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /datamodule: focus.yaml
|
8 |
+
- override /model: focus.yaml
|
9 |
+
- override /callbacks: default.yaml
|
10 |
+
- override /logger: tensorboard
|
11 |
+
- override /trainer: default.yaml
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
# name of the run determines folder name in logs
|
17 |
+
name: "focus_example"
|
18 |
+
seed: 12345
|
19 |
+
|
20 |
+
trainer:
|
21 |
+
min_epochs: 1
|
22 |
+
max_epochs: 100
|
23 |
+
|
24 |
+
model:
|
25 |
+
lin1_size: 128
|
26 |
+
lin2_size: 256
|
27 |
+
lin3_size: 64
|
28 |
+
|
29 |
+
datamodule:
|
30 |
+
batch_size: 64
|
31 |
+
|
configs/hparams_search/focus150_optuna.yaml
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# example hyperparameter optimization of some experiment with Optuna:
|
4 |
+
# python train.py -m hparams_search=mnist_optuna experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /datamodule: focus150.yaml
|
8 |
+
- override /model: focus150.yaml
|
9 |
+
- override /hydra/sweeper: optuna
|
10 |
+
|
11 |
+
# choose metric which will be optimized by Optuna
|
12 |
+
# make sure this is the correct name of some metric logged in lightning module!
|
13 |
+
optimized_metric: "val/mae_best"
|
14 |
+
|
15 |
+
name: "focus150_optuna"
|
16 |
+
|
17 |
+
# here we define Optuna hyperparameter search
|
18 |
+
# it optimizes for value returned from function with @hydra.main decorator
|
19 |
+
# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper
|
20 |
+
hydra:
|
21 |
+
sweeper:
|
22 |
+
_target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper
|
23 |
+
|
24 |
+
# storage URL to persist optimization results
|
25 |
+
# for example, you can use SQLite if you set 'sqlite:///example.db'
|
26 |
+
storage: null
|
27 |
+
|
28 |
+
# name of the study to persist optimization results
|
29 |
+
study_name: focus150_hyperparameter_search
|
30 |
+
|
31 |
+
# number of parallel workers
|
32 |
+
n_jobs: 1
|
33 |
+
|
34 |
+
# 'minimize' or 'maximize' the objective
|
35 |
+
direction: minimize
|
36 |
+
|
37 |
+
# total number of runs that will be executed
|
38 |
+
n_trials: 25
|
39 |
+
|
40 |
+
# choose Optuna hyperparameter sampler
|
41 |
+
# docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html
|
42 |
+
sampler:
|
43 |
+
_target_: optuna.samplers.TPESampler
|
44 |
+
seed: 12345
|
45 |
+
n_startup_trials: 10 # number of random sampling runs before optimization starts
|
46 |
+
|
47 |
+
# define range of hyperparameters
|
48 |
+
search_space:
|
49 |
+
datamodule.batch_size:
|
50 |
+
type: categorical
|
51 |
+
choices: [32, 64, 128]
|
52 |
+
model.lr:
|
53 |
+
type: float
|
54 |
+
low: 0.0001
|
55 |
+
high: 0.2
|
56 |
+
model.lin1_size:
|
57 |
+
type: categorical
|
58 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
59 |
+
model.lin2_size:
|
60 |
+
type: categorical
|
61 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
62 |
+
model.lin3_size:
|
63 |
+
type: categorical
|
64 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
configs/hparams_search/focus_optuna.yaml
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# example hyperparameter optimization of some experiment with Optuna:
|
4 |
+
# python train.py -m hparams_search=mnist_optuna experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /hydra/sweeper: optuna
|
8 |
+
|
9 |
+
# choose metric which will be optimized by Optuna
|
10 |
+
# make sure this is the correct name of some metric logged in lightning module!
|
11 |
+
optimized_metric: "val/mae_best"
|
12 |
+
|
13 |
+
name: "focus_optuna"
|
14 |
+
|
15 |
+
# here we define Optuna hyperparameter search
|
16 |
+
# it optimizes for value returned from function with @hydra.main decorator
|
17 |
+
# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper
|
18 |
+
hydra:
|
19 |
+
sweeper:
|
20 |
+
_target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper
|
21 |
+
|
22 |
+
# storage URL to persist optimization results
|
23 |
+
# for example, you can use SQLite if you set 'sqlite:///example.db'
|
24 |
+
storage: null
|
25 |
+
|
26 |
+
# name of the study to persist optimization results
|
27 |
+
study_name: focus_hyperparameter_search
|
28 |
+
|
29 |
+
# number of parallel workers
|
30 |
+
n_jobs: 1
|
31 |
+
|
32 |
+
# 'minimize' or 'maximize' the objective
|
33 |
+
direction: minimize
|
34 |
+
|
35 |
+
# total number of runs that will be executed
|
36 |
+
n_trials: 25
|
37 |
+
|
38 |
+
# choose Optuna hyperparameter sampler
|
39 |
+
# docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html
|
40 |
+
sampler:
|
41 |
+
_target_: optuna.samplers.TPESampler
|
42 |
+
seed: 12345
|
43 |
+
n_startup_trials: 10 # number of random sampling runs before optimization starts
|
44 |
+
|
45 |
+
# define range of hyperparameters
|
46 |
+
search_space:
|
47 |
+
datamodule.batch_size:
|
48 |
+
type: categorical
|
49 |
+
choices: [32, 64, 128]
|
50 |
+
model.lr:
|
51 |
+
type: float
|
52 |
+
low: 0.0001
|
53 |
+
high: 0.2
|
54 |
+
model.lin1_size:
|
55 |
+
type: categorical
|
56 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
57 |
+
model.lin2_size:
|
58 |
+
type: categorical
|
59 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
60 |
+
model.lin3_size:
|
61 |
+
type: categorical
|
62 |
+
choices: [32, 64, 128, 256, 512, 1024, 2048]
|
configs/model/focus150.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_target_: src.models.focus_module.FocusLitModule
|
2 |
+
|
3 |
+
input_size: 67500
|
4 |
+
lin1_size: 256
|
5 |
+
lin2_size: 256
|
6 |
+
lin3_size: 256
|
7 |
+
output_size: 1
|
8 |
+
lr: 0.001
|
9 |
+
weight_decay: 0.0005
|
configs/train.yaml
CHANGED
@@ -3,10 +3,10 @@
|
|
3 |
# specify here default training configuration
|
4 |
defaults:
|
5 |
- _self_
|
6 |
-
- datamodule:
|
7 |
-
- model:
|
8 |
- callbacks: default.yaml
|
9 |
-
- logger:
|
10 |
- trainer: long.yaml
|
11 |
- log_dir: default.yaml
|
12 |
|
|
|
3 |
# specify here default training configuration
|
4 |
defaults:
|
5 |
- _self_
|
6 |
+
- datamodule: focus150.yaml
|
7 |
+
- model: focus150.yaml
|
8 |
- callbacks: default.yaml
|
9 |
+
- logger: tensorboard # set logger here or use command line (e.g. `python train.py logger=tensorboard`)
|
10 |
- trainer: long.yaml
|
11 |
- log_dir: default.yaml
|
12 |
|
configs/trainer/default.yaml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
_target_: pytorch_lightning.Trainer
|
2 |
|
3 |
-
gpus:
|
4 |
|
5 |
min_epochs: 1
|
6 |
max_epochs: 10
|
|
|
1 |
_target_: pytorch_lightning.Trainer
|
2 |
|
3 |
+
gpus: 1
|
4 |
|
5 |
min_epochs: 1
|
6 |
max_epochs: 10
|
configs/trainer/long.yaml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
_target_: pytorch_lightning.Trainer
|
2 |
|
3 |
-
gpus:
|
4 |
|
5 |
min_epochs: 1
|
6 |
max_epochs: 100
|
|
|
1 |
_target_: pytorch_lightning.Trainer
|
2 |
|
3 |
+
gpus: 1
|
4 |
|
5 |
min_epochs: 1
|
6 |
max_epochs: 100
|
src/datamodules/focus_datamodule.py
CHANGED
@@ -22,6 +22,8 @@ class FocusDataSet(Dataset):
|
|
22 |
on a sample.
|
23 |
"""
|
24 |
self.metadata = pd.read_csv(csv_file)
|
|
|
|
|
25 |
self.root_dir = root_dir
|
26 |
self.transform = transform
|
27 |
|
@@ -45,9 +47,11 @@ class FocusDataSet(Dataset):
|
|
45 |
if torch.is_tensor(idx):
|
46 |
idx = idx.tolist()
|
47 |
|
48 |
-
img_name = os.path.join(
|
|
|
|
|
49 |
image = io.imread(img_name)
|
50 |
-
focus_value = self.metadata.iloc[idx,
|
51 |
sample = {"image": image, "focus_value": focus_value}
|
52 |
|
53 |
if self.transform:
|
|
|
22 |
on a sample.
|
23 |
"""
|
24 |
self.metadata = pd.read_csv(csv_file)
|
25 |
+
self.col_index_path = self.metadata.columns.get_loc("image_path")
|
26 |
+
self.col_index_focus = self.metadata.columns.get_loc("focus_value")
|
27 |
self.root_dir = root_dir
|
28 |
self.transform = transform
|
29 |
|
|
|
47 |
if torch.is_tensor(idx):
|
48 |
idx = idx.tolist()
|
49 |
|
50 |
+
img_name = os.path.join(
|
51 |
+
self.root_dir, self.metadata.iloc[idx, self.col_index_path]
|
52 |
+
)
|
53 |
image = io.imread(img_name)
|
54 |
+
focus_value = self.metadata.iloc[idx, self.col_index_focus]
|
55 |
sample = {"image": image, "focus_value": focus_value}
|
56 |
|
57 |
if self.transform:
|
src/models/focus_module.py
CHANGED
@@ -134,6 +134,7 @@ class FocusLitModule(LightningModule):
|
|
134 |
return {"loss": loss, "preds": preds, "targets": targets}
|
135 |
|
136 |
def test_epoch_end(self, outputs: List[Any]):
|
|
|
137 |
pass
|
138 |
|
139 |
def on_epoch_end(self):
|
|
|
134 |
return {"loss": loss, "preds": preds, "targets": targets}
|
135 |
|
136 |
def test_epoch_end(self, outputs: List[Any]):
|
137 |
+
print(outputs)
|
138 |
pass
|
139 |
|
140 |
def on_epoch_end(self):
|