Politrees commited on
Commit
385df8a
·
verified ·
1 Parent(s): 6ca65a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import gradio as gr
3
 
4
  from audio_separator.separator import Separator
@@ -103,8 +104,7 @@ def rename_stems(input_file, output_dir, stems, output_format):
103
  for i, stem in enumerate(stems):
104
  new_name = f"{base_name}_(Stem{i+1}).{output_format}"
105
  new_path = os.path.join(output_dir, new_name)
106
- print(f"Renaming {stem} to {new_path}")
107
- os.rename(stem, new_path)
108
  renamed_stems.append(new_path)
109
  return renamed_stems
110
 
@@ -112,6 +112,7 @@ def roformer_separator(audio, model_key, seg_size, overlap, model_dir, out_dir,
112
  """Separate audio using Roformer model."""
113
  model = ROFORMER_MODELS[model_key]
114
  separator = Separator(
 
115
  model_file_dir=model_dir,
116
  output_dir=out_dir,
117
  output_format=out_format,
@@ -130,8 +131,6 @@ def roformer_separator(audio, model_key, seg_size, overlap, model_dir, out_dir,
130
  progress(0.7, desc="Audio separated")
131
  separation = separator.separate(audio)
132
 
133
- print(f"Separation result: {separation}")
134
-
135
  progress(0.9, desc="Stems renamed")
136
  stems = rename_stems(audio, out_dir, separation, out_format)
137
 
@@ -140,6 +139,7 @@ def roformer_separator(audio, model_key, seg_size, overlap, model_dir, out_dir,
140
  def mdx23c_separator(audio, model, seg_size, overlap, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
141
  """Separate audio using MDX23C model."""
142
  separator = Separator(
 
143
  model_file_dir=model_dir,
144
  output_dir=out_dir,
145
  output_format=out_format,
@@ -166,6 +166,7 @@ def mdx23c_separator(audio, model, seg_size, overlap, model_dir, out_dir, out_fo
166
  def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
167
  """Separate audio using MDX-NET model."""
168
  separator = Separator(
 
169
  model_file_dir=model_dir,
170
  output_dir=out_dir,
171
  output_format=out_format,
@@ -194,6 +195,7 @@ def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_di
194
  def vr_separator(audio, model, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
195
  """Separate audio using VR ARCH model."""
196
  separator = Separator(
 
197
  model_file_dir=model_dir,
198
  output_dir=out_dir,
199
  output_format=out_format,
@@ -224,6 +226,7 @@ def vr_separator(audio, model, window_size, aggression, tta, post_process, post_
224
  def demucs_separator(audio, model, seg_size, shifts, overlap, segments_enabled, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
225
  """Separate audio using Demucs model."""
226
  separator = Separator(
 
227
  model_file_dir=model_dir,
228
  output_dir=out_dir,
229
  output_format=out_format,
@@ -276,7 +279,7 @@ with gr.Blocks(
276
  roformer_model = gr.Dropdown(label="Select the Model", choices=list(ROFORMER_MODELS.keys()))
277
  with gr.Row():
278
  roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
279
- roformer_overlap = gr.Slider(minimum=2, maximum=4, step=1, value=4, label="Overlap", info="Amount of overlap between prediction windows.")
280
  with gr.Row():
281
  roformer_audio = gr.Audio(label="Input Audio", type="filepath")
282
  with gr.Row():
@@ -440,4 +443,4 @@ with gr.Blocks(
440
  outputs=[demucs_stem1, demucs_stem2, demucs_stem3, demucs_stem4],
441
  )
442
 
443
- app.launch()
 
1
  import os
2
+ import logging
3
  import gradio as gr
4
 
5
  from audio_separator.separator import Separator
 
104
  for i, stem in enumerate(stems):
105
  new_name = f"{base_name}_(Stem{i+1}).{output_format}"
106
  new_path = os.path.join(output_dir, new_name)
107
+ os.rename(os.path.join(output_dir, stem), new_path)
 
108
  renamed_stems.append(new_path)
109
  return renamed_stems
110
 
 
112
  """Separate audio using Roformer model."""
113
  model = ROFORMER_MODELS[model_key]
114
  separator = Separator(
115
+ log_level=logging.WARNING,
116
  model_file_dir=model_dir,
117
  output_dir=out_dir,
118
  output_format=out_format,
 
131
  progress(0.7, desc="Audio separated")
132
  separation = separator.separate(audio)
133
 
 
 
134
  progress(0.9, desc="Stems renamed")
135
  stems = rename_stems(audio, out_dir, separation, out_format)
136
 
 
139
  def mdx23c_separator(audio, model, seg_size, overlap, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
140
  """Separate audio using MDX23C model."""
141
  separator = Separator(
142
+ log_level=logging.WARNING,
143
  model_file_dir=model_dir,
144
  output_dir=out_dir,
145
  output_format=out_format,
 
166
  def mdx_separator(audio, model, hop_length, seg_size, overlap, denoise, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
167
  """Separate audio using MDX-NET model."""
168
  separator = Separator(
169
+ log_level=logging.WARNING,
170
  model_file_dir=model_dir,
171
  output_dir=out_dir,
172
  output_format=out_format,
 
195
  def vr_separator(audio, model, window_size, aggression, tta, post_process, post_process_threshold, high_end_process, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
196
  """Separate audio using VR ARCH model."""
197
  separator = Separator(
198
+ log_level=logging.WARNING,
199
  model_file_dir=model_dir,
200
  output_dir=out_dir,
201
  output_format=out_format,
 
226
  def demucs_separator(audio, model, seg_size, shifts, overlap, segments_enabled, model_dir, out_dir, out_format, norm_thresh, amp_thresh, progress=gr.Progress()):
227
  """Separate audio using Demucs model."""
228
  separator = Separator(
229
+ log_level=logging.WARNING,
230
  model_file_dir=model_dir,
231
  output_dir=out_dir,
232
  output_format=out_format,
 
279
  roformer_model = gr.Dropdown(label="Select the Model", choices=list(ROFORMER_MODELS.keys()))
280
  with gr.Row():
281
  roformer_seg_size = gr.Slider(minimum=32, maximum=4000, step=32, value=256, label="Segment Size", info="Larger consumes more resources, but may give better results.")
282
+ roformer_overlap = gr.Slider(minimum=2, maximum=10, step=1, value=8, label="Overlap", info="Amount of overlap between prediction windows.")
283
  with gr.Row():
284
  roformer_audio = gr.Audio(label="Input Audio", type="filepath")
285
  with gr.Row():
 
443
  outputs=[demucs_stem1, demucs_stem2, demucs_stem3, demucs_stem4],
444
  )
445
 
446
+ app.launch(share=True)