Spaces:
Running
on
Zero
Running
on
Zero
audio: | |
chunk_size: 485100 # samplerate * segment | |
min_mean_abs: 0.001 | |
hop_length: 1024 | |
training: | |
batch_size: 8 | |
gradient_accumulation_steps: 1 | |
grad_clip: 0 | |
segment: 11 | |
shift: 1 | |
samplerate: 44100 | |
channels: 2 | |
normalize: true | |
instruments: ['drums', 'bass', 'other', 'vocals'] | |
target_instrument: null | |
num_epochs: 1000 | |
num_steps: 1000 | |
optimizer: adam | |
lr: 9.0e-05 | |
patience: 2 | |
reduce_factor: 0.95 | |
q: 0.95 | |
coarse_loss_clip: true | |
ema_momentum: 0.999 | |
other_fix: false # it's needed for checking on multisong dataset if other is actually instrumental | |
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true | |
augmentations: | |
enable: true # enable or disable all augmentations (to fast disable if needed) | |
loudness: true # randomly change loudness of each stem on the range (loudness_min; loudness_max) | |
loudness_min: 0.5 | |
loudness_max: 1.5 | |
inference: | |
num_overlap: 4 | |
batch_size: 8 | |
model: htdemucs | |
htdemucs: # see demucs/htdemucs.py for a detailed description | |
# Channels | |
channels: 48 | |
channels_time: | |
growth: 2 | |
# STFT | |
num_subbands: 1 | |
nfft: 4096 | |
wiener_iters: 0 | |
end_iters: 0 | |
wiener_residual: false | |
cac: true | |
# Main structure | |
depth: 4 | |
rewrite: true | |
# Frequency Branch | |
multi_freqs: [] | |
multi_freqs_depth: 3 | |
freq_emb: 0.2 | |
emb_scale: 10 | |
emb_smooth: true | |
# Convolutions | |
kernel_size: 8 | |
stride: 4 | |
time_stride: 2 | |
context: 1 | |
context_enc: 0 | |
# normalization | |
norm_starts: 4 | |
norm_groups: 4 | |
# DConv residual branch | |
dconv_mode: 3 | |
dconv_depth: 2 | |
dconv_comp: 8 | |
dconv_init: 1e-3 | |
# Before the Transformer | |
bottom_channels: 512 | |
# CrossTransformer | |
# ------ Common to all | |
# Regular parameters | |
t_layers: 5 | |
t_hidden_scale: 4.0 | |
t_heads: 8 | |
t_dropout: 0.0 | |
t_layer_scale: True | |
t_gelu: True | |
# ------------- Positional Embedding | |
t_emb: sin | |
t_max_positions: 10000 # for the scaled embedding | |
t_max_period: 10000.0 | |
t_weight_pos_embed: 1.0 | |
t_cape_mean_normalize: True | |
t_cape_augment: True | |
t_cape_glob_loc_scale: [5000.0, 1.0, 1.4] | |
t_sin_random_shift: 0 | |
# ------------- norm before a transformer encoder | |
t_norm_in: True | |
t_norm_in_group: False | |
# ------------- norm inside the encoder | |
t_group_norm: False | |
t_norm_first: True | |
t_norm_out: True | |
# ------------- optim | |
t_weight_decay: 0.0 | |
t_lr: | |
# ------------- sparsity | |
t_sparse_self_attn: False | |
t_sparse_cross_attn: False | |
t_mask_type: diag | |
t_mask_random_seed: 42 | |
t_sparse_attn_window: 400 | |
t_global_window: 100 | |
t_sparsity: 0.95 | |
t_auto_sparsity: False | |
# Cross Encoder First (False) | |
t_cross_first: False | |
# Weight init | |
rescale: 0.1 | |