Hannes Kuchelmeister commited on
Commit
596687f
1 Parent(s): b04e419

run experiments comparing MSE and MAE

Browse files
Files changed (16) hide show
  1. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/config.yaml +65 -0
  2. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/hydra.yaml +171 -0
  3. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/overrides.yaml +2 -0
  4. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/checkpoints/epoch_011.ckpt +3 -0
  5. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/checkpoints/last.ckpt +3 -0
  6. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/events.out.tfevents.1646833647.8d63a10b1070.1.0 +3 -0
  7. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/events.out.tfevents.1646834776.8d63a10b1070.1.1 +3 -0
  8. logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/hparams.yaml +52 -0
  9. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/config.yaml +65 -0
  10. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/hydra.yaml +171 -0
  11. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/overrides.yaml +2 -0
  12. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/checkpoints/epoch_030.ckpt +3 -0
  13. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/checkpoints/last.ckpt +3 -0
  14. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/events.out.tfevents.1646834851.c73613cd5265.1.0 +3 -0
  15. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/events.out.tfevents.1646835673.c73613cd5265.1.1 +3 -0
  16. logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/hparams.yaml +52 -0
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/config.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ original_work_dir: ${hydra:runtime.cwd}
2
+ data_dir: ${original_work_dir}/data
3
+ print_config: true
4
+ ignore_warnings: true
5
+ train: true
6
+ test: true
7
+ seed: 12345
8
+ name: focusMAE
9
+ datamodule:
10
+ _target_: src.datamodules.focus_datamodule.FocusDataModule
11
+ data_dir: ${data_dir}/focus150
12
+ csv_file: ${data_dir}/focus150/metadata.csv
13
+ batch_size: 128
14
+ train_val_test_split_percentage:
15
+ - 0.7
16
+ - 0.15
17
+ - 0.15
18
+ num_workers: 0
19
+ pin_memory: false
20
+ model:
21
+ _target_: src.models.focus_module.FocusLitModule
22
+ input_size: 67500
23
+ lin1_size: 128
24
+ lin2_size: 256
25
+ lin3_size: 64
26
+ output_size: 1
27
+ lr: 0.0173
28
+ weight_decay: 0.0005
29
+ callbacks:
30
+ model_checkpoint:
31
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
32
+ monitor: val/mae
33
+ mode: min
34
+ save_top_k: 1
35
+ save_last: true
36
+ verbose: false
37
+ dirpath: checkpoints/
38
+ filename: epoch_{epoch:03d}
39
+ auto_insert_metric_name: false
40
+ early_stopping:
41
+ _target_: pytorch_lightning.callbacks.EarlyStopping
42
+ monitor: val/mae
43
+ mode: min
44
+ patience: 100
45
+ min_delta: 0
46
+ model_summary:
47
+ _target_: pytorch_lightning.callbacks.RichModelSummary
48
+ max_depth: -1
49
+ rich_progress_bar:
50
+ _target_: pytorch_lightning.callbacks.RichProgressBar
51
+ logger:
52
+ tensorboard:
53
+ _target_: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
54
+ save_dir: tensorboard/
55
+ name: null
56
+ version: ${name}
57
+ log_graph: false
58
+ default_hp_metric: true
59
+ prefix: ''
60
+ trainer:
61
+ _target_: pytorch_lightning.Trainer
62
+ gpus: 0
63
+ min_epochs: 1
64
+ max_epochs: 100
65
+ resume_from_checkpoint: null
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/hydra.yaml ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: logs/experiments/runs/${name}/${now:%Y-%m-%d}_${now:%H-%M-%S}
4
+ sweep:
5
+ dir: logs/experiments/multiruns/${name}/${now:%Y-%m-%d}_${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ help:
13
+ app_name: ${hydra.job.name}
14
+ header: '${hydra.help.app_name} is powered by Hydra.
15
+
16
+ '
17
+ footer: 'Powered by Hydra (https://hydra.cc)
18
+
19
+ Use --hydra-help to view Hydra specific help
20
+
21
+ '
22
+ template: '${hydra.help.header}
23
+
24
+ == Configuration groups ==
25
+
26
+ Compose your configuration from those groups (group=option)
27
+
28
+
29
+ $APP_CONFIG_GROUPS
30
+
31
+
32
+ == Config ==
33
+
34
+ Override anything in the config (foo.bar=value)
35
+
36
+
37
+ $CONFIG
38
+
39
+
40
+ ${hydra.help.footer}
41
+
42
+ '
43
+ hydra_help:
44
+ template: 'Hydra (${hydra.runtime.version})
45
+
46
+ See https://hydra.cc for more info.
47
+
48
+
49
+ == Flags ==
50
+
51
+ $FLAGS_HELP
52
+
53
+
54
+ == Configuration groups ==
55
+
56
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
57
+ to command line)
58
+
59
+
60
+ $HYDRA_CONFIG_GROUPS
61
+
62
+
63
+ Use ''--cfg hydra'' to Show the Hydra config.
64
+
65
+ '
66
+ hydra_help: ???
67
+ hydra_logging:
68
+ version: 1
69
+ formatters:
70
+ colorlog:
71
+ (): colorlog.ColoredFormatter
72
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: colorlog
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ disable_existing_loggers: false
83
+ job_logging:
84
+ version: 1
85
+ formatters:
86
+ simple:
87
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
88
+ colorlog:
89
+ (): colorlog.ColoredFormatter
90
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
91
+ - %(message)s'
92
+ log_colors:
93
+ DEBUG: purple
94
+ INFO: green
95
+ WARNING: yellow
96
+ ERROR: red
97
+ CRITICAL: red
98
+ handlers:
99
+ console:
100
+ class: logging.StreamHandler
101
+ formatter: colorlog
102
+ stream: ext://sys.stdout
103
+ file:
104
+ class: logging.FileHandler
105
+ formatter: simple
106
+ filename: ${hydra.job.name}.log
107
+ root:
108
+ level: INFO
109
+ handlers:
110
+ - console
111
+ - file
112
+ disable_existing_loggers: false
113
+ env: {}
114
+ searchpath: []
115
+ callbacks: {}
116
+ output_subdir: .hydra
117
+ overrides:
118
+ hydra: []
119
+ task:
120
+ - trainer.gpus=0
121
+ - experiment=focus
122
+ job:
123
+ name: train
124
+ override_dirname: experiment=focus,trainer.gpus=0
125
+ id: ???
126
+ num: ???
127
+ config_name: train.yaml
128
+ env_set: {}
129
+ env_copy: []
130
+ config:
131
+ override_dirname:
132
+ kv_sep: '='
133
+ item_sep: ','
134
+ exclude_keys: []
135
+ runtime:
136
+ version: 1.1.1
137
+ cwd: /usr/src/app
138
+ config_sources:
139
+ - path: hydra.conf
140
+ schema: pkg
141
+ provider: hydra
142
+ - path: /usr/src/app/configs
143
+ schema: file
144
+ provider: main
145
+ - path: hydra_plugins.hydra_colorlog.conf
146
+ schema: pkg
147
+ provider: hydra-colorlog
148
+ - path: ''
149
+ schema: structured
150
+ provider: schema
151
+ choices:
152
+ local: default.yaml
153
+ hparams_search: null
154
+ debug: null
155
+ experiment: focus
156
+ log_dir: default.yaml
157
+ trainer: default.yaml
158
+ logger: tensorboard
159
+ callbacks: default.yaml
160
+ model: focus150.yaml
161
+ datamodule: focus150.yaml
162
+ hydra/env: default
163
+ hydra/callbacks: null
164
+ hydra/job_logging: colorlog
165
+ hydra/hydra_logging: colorlog
166
+ hydra/hydra_help: default
167
+ hydra/help: default
168
+ hydra/sweeper: basic
169
+ hydra/launcher: basic
170
+ hydra/output: default
171
+ verbose: false
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/.hydra/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - trainer.gpus=0
2
+ - experiment=focus
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/checkpoints/epoch_011.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90aa1d9e4bd68b010f536dfba1b02016341a3c580cc02ff6e8e9093d7f0ed9eb
3
+ size 104306758
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b576539ae7626180b1f6dd6843b660934383d6968b6a1cdfd6a0927f6dc6c9
3
+ size 104306758
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/events.out.tfevents.1646833647.8d63a10b1070.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b148e127ed0edf4fb7c53005712b8d2781d9e595c163aeeeeb7fd87910478cd1
3
+ size 36394
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/events.out.tfevents.1646834776.8d63a10b1070.1.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a06d7bd107ca48fd282626806c9f28f03b5c198417ae019a05bb5750b43348f2
3
+ size 179
logs/experiments/runs/focusMAE/2022-03-09_13-47-25/tensorboard/focusMAE/hparams.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ trainer:
2
+ _target_: pytorch_lightning.Trainer
3
+ gpus: 0
4
+ min_epochs: 1
5
+ max_epochs: 100
6
+ resume_from_checkpoint: null
7
+ model:
8
+ _target_: src.models.focus_module.FocusLitModule
9
+ input_size: 67500
10
+ lin1_size: 128
11
+ lin2_size: 256
12
+ lin3_size: 64
13
+ output_size: 1
14
+ lr: 0.0173
15
+ weight_decay: 0.0005
16
+ datamodule:
17
+ _target_: src.datamodules.focus_datamodule.FocusDataModule
18
+ data_dir: /usr/src/app/data/focus150
19
+ csv_file: /usr/src/app/data/focus150/metadata.csv
20
+ batch_size: 128
21
+ train_val_test_split_percentage:
22
+ - 0.7
23
+ - 0.15
24
+ - 0.15
25
+ num_workers: 0
26
+ pin_memory: false
27
+ seed: 12345
28
+ callbacks:
29
+ model_checkpoint:
30
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
31
+ monitor: val/mae
32
+ mode: min
33
+ save_top_k: 1
34
+ save_last: true
35
+ verbose: false
36
+ dirpath: checkpoints/
37
+ filename: epoch_{epoch:03d}
38
+ auto_insert_metric_name: false
39
+ early_stopping:
40
+ _target_: pytorch_lightning.callbacks.EarlyStopping
41
+ monitor: val/mae
42
+ mode: min
43
+ patience: 100
44
+ min_delta: 0
45
+ model_summary:
46
+ _target_: pytorch_lightning.callbacks.RichModelSummary
47
+ max_depth: -1
48
+ rich_progress_bar:
49
+ _target_: pytorch_lightning.callbacks.RichProgressBar
50
+ model/params/total: 8690561
51
+ model/params/trainable: 8690561
52
+ model/params/non_trainable: 0
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/config.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ original_work_dir: ${hydra:runtime.cwd}
2
+ data_dir: ${original_work_dir}/data
3
+ print_config: true
4
+ ignore_warnings: true
5
+ train: true
6
+ test: true
7
+ seed: 12345
8
+ name: focusMSE_150
9
+ datamodule:
10
+ _target_: src.datamodules.focus_datamodule.FocusDataModule
11
+ data_dir: ${data_dir}/focus150
12
+ csv_file: ${data_dir}/focus150/metadata.csv
13
+ batch_size: 128
14
+ train_val_test_split_percentage:
15
+ - 0.7
16
+ - 0.15
17
+ - 0.15
18
+ num_workers: 0
19
+ pin_memory: false
20
+ model:
21
+ _target_: src.models.focus_module.FocusMSELitModule
22
+ input_size: 67500
23
+ lin1_size: 128
24
+ lin2_size: 256
25
+ lin3_size: 64
26
+ output_size: 1
27
+ lr: 0.0173
28
+ weight_decay: 0.0005
29
+ callbacks:
30
+ model_checkpoint:
31
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
32
+ monitor: val/mae
33
+ mode: min
34
+ save_top_k: 1
35
+ save_last: true
36
+ verbose: false
37
+ dirpath: checkpoints/
38
+ filename: epoch_{epoch:03d}
39
+ auto_insert_metric_name: false
40
+ early_stopping:
41
+ _target_: pytorch_lightning.callbacks.EarlyStopping
42
+ monitor: val/mae
43
+ mode: min
44
+ patience: 100
45
+ min_delta: 0
46
+ model_summary:
47
+ _target_: pytorch_lightning.callbacks.RichModelSummary
48
+ max_depth: -1
49
+ rich_progress_bar:
50
+ _target_: pytorch_lightning.callbacks.RichProgressBar
51
+ logger:
52
+ tensorboard:
53
+ _target_: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
54
+ save_dir: tensorboard/
55
+ name: null
56
+ version: ${name}
57
+ log_graph: false
58
+ default_hp_metric: true
59
+ prefix: ''
60
+ trainer:
61
+ _target_: pytorch_lightning.Trainer
62
+ gpus: 0
63
+ min_epochs: 1
64
+ max_epochs: 100
65
+ resume_from_checkpoint: null
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/hydra.yaml ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: logs/experiments/runs/${name}/${now:%Y-%m-%d}_${now:%H-%M-%S}
4
+ sweep:
5
+ dir: logs/experiments/multiruns/${name}/${now:%Y-%m-%d}_${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ help:
13
+ app_name: ${hydra.job.name}
14
+ header: '${hydra.help.app_name} is powered by Hydra.
15
+
16
+ '
17
+ footer: 'Powered by Hydra (https://hydra.cc)
18
+
19
+ Use --hydra-help to view Hydra specific help
20
+
21
+ '
22
+ template: '${hydra.help.header}
23
+
24
+ == Configuration groups ==
25
+
26
+ Compose your configuration from those groups (group=option)
27
+
28
+
29
+ $APP_CONFIG_GROUPS
30
+
31
+
32
+ == Config ==
33
+
34
+ Override anything in the config (foo.bar=value)
35
+
36
+
37
+ $CONFIG
38
+
39
+
40
+ ${hydra.help.footer}
41
+
42
+ '
43
+ hydra_help:
44
+ template: 'Hydra (${hydra.runtime.version})
45
+
46
+ See https://hydra.cc for more info.
47
+
48
+
49
+ == Flags ==
50
+
51
+ $FLAGS_HELP
52
+
53
+
54
+ == Configuration groups ==
55
+
56
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
57
+ to command line)
58
+
59
+
60
+ $HYDRA_CONFIG_GROUPS
61
+
62
+
63
+ Use ''--cfg hydra'' to Show the Hydra config.
64
+
65
+ '
66
+ hydra_help: ???
67
+ hydra_logging:
68
+ version: 1
69
+ formatters:
70
+ colorlog:
71
+ (): colorlog.ColoredFormatter
72
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: colorlog
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ disable_existing_loggers: false
83
+ job_logging:
84
+ version: 1
85
+ formatters:
86
+ simple:
87
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
88
+ colorlog:
89
+ (): colorlog.ColoredFormatter
90
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
91
+ - %(message)s'
92
+ log_colors:
93
+ DEBUG: purple
94
+ INFO: green
95
+ WARNING: yellow
96
+ ERROR: red
97
+ CRITICAL: red
98
+ handlers:
99
+ console:
100
+ class: logging.StreamHandler
101
+ formatter: colorlog
102
+ stream: ext://sys.stdout
103
+ file:
104
+ class: logging.FileHandler
105
+ formatter: simple
106
+ filename: ${hydra.job.name}.log
107
+ root:
108
+ level: INFO
109
+ handlers:
110
+ - console
111
+ - file
112
+ disable_existing_loggers: false
113
+ env: {}
114
+ searchpath: []
115
+ callbacks: {}
116
+ output_subdir: .hydra
117
+ overrides:
118
+ hydra: []
119
+ task:
120
+ - trainer.gpus=0
121
+ - experiment=focusMSE_150
122
+ job:
123
+ name: train
124
+ override_dirname: experiment=focusMSE_150,trainer.gpus=0
125
+ id: ???
126
+ num: ???
127
+ config_name: train.yaml
128
+ env_set: {}
129
+ env_copy: []
130
+ config:
131
+ override_dirname:
132
+ kv_sep: '='
133
+ item_sep: ','
134
+ exclude_keys: []
135
+ runtime:
136
+ version: 1.1.1
137
+ cwd: /usr/src/app
138
+ config_sources:
139
+ - path: hydra.conf
140
+ schema: pkg
141
+ provider: hydra
142
+ - path: /usr/src/app/configs
143
+ schema: file
144
+ provider: main
145
+ - path: hydra_plugins.hydra_colorlog.conf
146
+ schema: pkg
147
+ provider: hydra-colorlog
148
+ - path: ''
149
+ schema: structured
150
+ provider: schema
151
+ choices:
152
+ local: default.yaml
153
+ hparams_search: null
154
+ debug: null
155
+ experiment: focusMSE_150
156
+ log_dir: default.yaml
157
+ trainer: default.yaml
158
+ logger: tensorboard
159
+ callbacks: default.yaml
160
+ model: focus150.yaml
161
+ datamodule: focus150.yaml
162
+ hydra/env: default
163
+ hydra/callbacks: null
164
+ hydra/job_logging: colorlog
165
+ hydra/hydra_logging: colorlog
166
+ hydra/hydra_help: default
167
+ hydra/help: default
168
+ hydra/sweeper: basic
169
+ hydra/launcher: basic
170
+ hydra/output: default
171
+ verbose: false
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/.hydra/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - trainer.gpus=0
2
+ - experiment=focusMSE_150
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/checkpoints/epoch_030.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a759f38a4f5ff96a8f18fa86711cf5bbe9513042e0669ce959ccb8ec43172af0
3
+ size 104306758
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ef0da547be0308b54e1b17887e6eb958150053b2aa75c70a52027a0874286ce
3
+ size 104306758
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/events.out.tfevents.1646834851.c73613cd5265.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9deb8a8bed01134a898e28111d0c9c84f5d6abaae10594d3f7362a9a76f65a
3
+ size 36397
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/events.out.tfevents.1646835673.c73613cd5265.1.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5a747751c7daa7da28b0db6219edbc956ca16a6b62a39ae60ac4179f8f773c
3
+ size 179
logs/experiments/runs/focusMSE_150/2022-03-09_14-07-28/tensorboard/focusMSE_150/hparams.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ trainer:
2
+ _target_: pytorch_lightning.Trainer
3
+ gpus: 0
4
+ min_epochs: 1
5
+ max_epochs: 100
6
+ resume_from_checkpoint: null
7
+ model:
8
+ _target_: src.models.focus_module.FocusMSELitModule
9
+ input_size: 67500
10
+ lin1_size: 128
11
+ lin2_size: 256
12
+ lin3_size: 64
13
+ output_size: 1
14
+ lr: 0.0173
15
+ weight_decay: 0.0005
16
+ datamodule:
17
+ _target_: src.datamodules.focus_datamodule.FocusDataModule
18
+ data_dir: /usr/src/app/data/focus150
19
+ csv_file: /usr/src/app/data/focus150/metadata.csv
20
+ batch_size: 128
21
+ train_val_test_split_percentage:
22
+ - 0.7
23
+ - 0.15
24
+ - 0.15
25
+ num_workers: 0
26
+ pin_memory: false
27
+ seed: 12345
28
+ callbacks:
29
+ model_checkpoint:
30
+ _target_: pytorch_lightning.callbacks.ModelCheckpoint
31
+ monitor: val/mae
32
+ mode: min
33
+ save_top_k: 1
34
+ save_last: true
35
+ verbose: false
36
+ dirpath: checkpoints/
37
+ filename: epoch_{epoch:03d}
38
+ auto_insert_metric_name: false
39
+ early_stopping:
40
+ _target_: pytorch_lightning.callbacks.EarlyStopping
41
+ monitor: val/mae
42
+ mode: min
43
+ patience: 100
44
+ min_delta: 0
45
+ model_summary:
46
+ _target_: pytorch_lightning.callbacks.RichModelSummary
47
+ max_depth: -1
48
+ rich_progress_bar:
49
+ _target_: pytorch_lightning.callbacks.RichProgressBar
50
+ model/params/total: 8690561
51
+ model/params/trainable: 8690561
52
+ model/params/non_trainable: 0