Update app.py
Browse files
app.py
CHANGED
@@ -41,16 +41,14 @@ def load_model(model):
|
|
41 |
return session, onnx_model, input_names, output_names
|
42 |
|
43 |
def inference(re_im, session, onnx_model, input_names, output_names):
|
44 |
-
inputs = {input_names[i]: np.zeros([d.dim_value for d in _input.type.tensor_type.shape.dim],
|
45 |
-
dtype=np.float32)
|
46 |
for i, _input in enumerate(onnx_model.graph.input)
|
47 |
}
|
48 |
|
49 |
output_audio = []
|
50 |
for t in range(re_im.shape[0]):
|
51 |
inputs[input_names[0]] = re_im[t]
|
52 |
-
inputs = torch.from_numpy(inputs)
|
53 |
-
inputs = inputs.type(torch.float16)
|
54 |
out, prev_mag, predictor_state, mlp_state = session.run(output_names, inputs)
|
55 |
inputs[input_names[1]] = prev_mag
|
56 |
inputs[input_names[2]] = predictor_state
|
|
|
41 |
return session, onnx_model, input_names, output_names
|
42 |
|
43 |
def inference(re_im, session, onnx_model, input_names, output_names):
|
44 |
+
inputs = {input_names[i]: torch.from_numpy(np.zeros([d.dim_value for d in _input.type.tensor_type.shape.dim],
|
45 |
+
dtype=np.float32)).type(torch.float16)
|
46 |
for i, _input in enumerate(onnx_model.graph.input)
|
47 |
}
|
48 |
|
49 |
output_audio = []
|
50 |
for t in range(re_im.shape[0]):
|
51 |
inputs[input_names[0]] = re_im[t]
|
|
|
|
|
52 |
out, prev_mag, predictor_state, mlp_state = session.run(output_names, inputs)
|
53 |
inputs[input_names[1]] = prev_mag
|
54 |
inputs[input_names[2]] = predictor_state
|