Spaces:
Build error
Build error
File size: 4,669 Bytes
c983126 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
import math
import torch
import scipy.signal
import deepafx_st.processors.autodiff.signal
from deepafx_st.processors.processor import Processor
@torch.jit.script
def compressor(
x: torch.Tensor,
sample_rate: float,
threshold: torch.Tensor,
ratio: torch.Tensor,
attack_time: torch.Tensor,
release_time: torch.Tensor,
knee_dB: torch.Tensor,
makeup_gain_dB: torch.Tensor,
eps: float = 1e-8,
):
"""Note the `release` parameter is not used."""
# print(f"autodiff comp fs = {sample_rate}")
s = x.size() # should be one 1d
threshold = threshold.squeeze()
ratio = ratio.squeeze()
attack_time = attack_time.squeeze()
makeup_gain_dB = makeup_gain_dB.squeeze()
# uni-polar dB signal
# Turn the input signal into a uni-polar signal on the dB scale
x_G = 20 * torch.log10(torch.abs(x) + 1e-8) # x_uni casts type
# Ensure there are no values of negative infinity
x_G = torch.clamp(x_G, min=-96)
# Static characteristics with knee
y_G = torch.zeros(s).type_as(x)
ratio = ratio.view(-1)
threshold = threshold.view(-1)
attack_time = attack_time.view(-1)
release_time = release_time.view(-1)
knee_dB = knee_dB.view(-1)
makeup_gain_dB = makeup_gain_dB.view(-1)
# Below knee
idx = torch.where((2 * (x_G - threshold)) < -knee_dB)[0]
y_G[idx] = x_G[idx]
# At knee
idx = torch.where((2 * torch.abs(x_G - threshold)) <= knee_dB)[0]
y_G[idx] = x_G[idx] + (
(1 / ratio) * (((x_G[idx] - threshold + knee_dB) / 2) ** 2)
) / (2 * knee_dB)
# Above knee threshold
idx = torch.where((2 * (x_G - threshold)) > knee_dB)[0]
y_G[idx] = threshold + ((x_G[idx] - threshold) / ratio)
x_L = x_G - y_G
# design 1-pole butterworth lowpass
fc = 1.0 / (attack_time * sample_rate)
b, a = deepafx_st.processors.autodiff.signal.butter(fc)
# apply FIR approx of IIR filter
y_L = deepafx_st.processors.autodiff.signal.approx_iir_filter(b, a, x_L)
lin_y_L = torch.pow(10.0, -y_L / 20.0) # convert back to linear
y = lin_y_L * x # apply gain
# apply makeup gain
y *= torch.pow(10.0, makeup_gain_dB / 20.0)
return y
class Compressor(Processor):
def __init__(
self,
sample_rate,
max_threshold=0.0,
min_threshold=-80,
max_ratio=20.0,
min_ratio=1.0,
max_attack=0.1,
min_attack=0.0001,
max_release=1.0,
min_release=0.005,
max_knee=12.0,
min_knee=0.0,
max_mkgain=48.0,
min_mkgain=-48.0,
eps=1e-8,
):
""" """
super().__init__()
self.sample_rate = sample_rate
self.eps = eps
self.ports = [
{
"name": "Threshold",
"min": min_threshold,
"max": max_threshold,
"default": -12.0,
"units": "dB",
},
{
"name": "Ratio",
"min": min_ratio,
"max": max_ratio,
"default": 2.0,
"units": "",
},
{
"name": "Attack",
"min": min_attack,
"max": max_attack,
"default": 0.001,
"units": "s",
},
{
# this is a dummy parameter
"name": "Release (dummy)",
"min": min_release,
"max": max_release,
"default": 0.045,
"units": "s",
},
{
"name": "Knee",
"min": min_knee,
"max": max_knee,
"default": 6.0,
"units": "dB",
},
{
"name": "Makeup Gain",
"min": min_mkgain,
"max": max_mkgain,
"default": 0.0,
"units": "dB",
},
]
self.num_control_params = len(self.ports)
def forward(self, x, p, sample_rate=24000, **kwargs):
"""
Assume that parameters in p are normalized between 0 and 1.
x (tensor): Shape batch x 1 x samples
p (tensor): shape batch x params
"""
bs, ch, s = x.size()
inputs = torch.split(x, 1, 0)
params = torch.split(p, 1, 0)
y = [] # loop over batch dimension
for input, param in zip(inputs, params):
denorm_param = self.denormalize_params(param.view(-1))
y.append(compressor(input.view(-1), sample_rate, *denorm_param))
return torch.stack(y, dim=0).view(bs, 1, -1)
|