Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,037 Bytes
78e32cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
exp:
dir: ./Exps
name: Apollo
# seed: 614020
datas:
_target_: look2hear.datas.MusdbMoisesdbDataModule
train_dir: ./hdf5_datas
eval_dir: ./eval
codec_type: mp3
codec_options:
bitrate: random
compression: random
complexity: random
vbr: random
sr: 44100
segments: 3
num_stems: 8
snr_range: [-10, 10]
num_samples: 40000
batch_size: 1
num_workers: 8
model:
sr: 44100
win: 20 # ms
feature_dim: 256
layer: 6
discriminator:
_target_: look2hear.discriminators.frequencydis.MultiFrequencyDiscriminator
nch: 2
window: [32, 64, 128, 256, 512, 1024, 2048]
optimizer_g:
_target_: torch.optim.AdamW
lr: 0.001
weight_decay: 0.01
optimizer_d:
_target_: torch.optim.AdamW
lr: 0.0001
weight_decay: 0.01
betas: [0.5, 0.99]
scheduler_g:
_target_: torch.optim.lr_scheduler.StepLR
step_size: 2
gamma: 0.98
scheduler_d:
_target_: torch.optim.lr_scheduler.StepLR
step_size: 2
gamma: 0.98
loss_g:
_target_: look2hear.losses.gan_losses.MultiFrequencyGenLoss
eps: 1e-8
loss_d:
_target_: look2hear.losses.gan_losses.MultiFrequencyDisLoss
eps: 1e-8
metrics:
_target_: look2hear.losses.MultiSrcNegSDR
sdr_type: sisdr
system:
_target_: look2hear.system.audio_litmodule.AudioLightningModule
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: val_loss
patience: 20
mode: min
verbose: true
checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
dirpath: ${exp.dir}/${exp.name}/checkpoints
monitor: val_loss
mode: min
verbose: true
save_top_k: 5
save_last: true
filename: '{epoch}-{val_loss:.4f}'
logger:
_target_: pytorch_lightning.loggers.WandbLogger
name: ${exp.name}
save_dir: ${exp.dir}/${exp.name}/logs
offline: false
project: Audio-Restoration
trainer:
_target_: pytorch_lightning.Trainer
devices: [0,1,2,3,4,5,6,7]
max_epochs: 500
sync_batchnorm: true
default_root_dir: ${exp.dir}/${exp.name}/
accelerator: cuda
limit_train_batches: 1.0
fast_dev_run: false
|