TheStinger Mikus commited on
Commit
215bdba
·
verified ·
1 Parent(s): 3e5466e

Model Quant (model_handler), working Model Viewer, Fusion, and Debug information. Model Selection and reloading. (#8)

Browse files

- Model Quant (model_handler), working Model Viewer, Fusion, and Debug information. Model Selection and reloading. (495cc7f6e20f7a773f034772065e0fbc90681eec)


Co-authored-by: Mikus <[email protected]>

Files changed (3) hide show
  1. app.py +509 -366
  2. model_handler.py +155 -0
  3. requirements.txt +3 -0
app.py CHANGED
@@ -1,366 +1,509 @@
1
- import gradio as gr
2
- import requests
3
- import random
4
- import os
5
- import zipfile
6
- import librosa
7
- import time
8
- from infer_rvc_python import BaseLoader
9
- from pydub import AudioSegment
10
- from tts_voice import tts_order_voice
11
- import edge_tts
12
- import tempfile
13
- import anyio
14
- import asyncio
15
- from audio_separator.separator import Separator
16
-
17
- language_dict = tts_order_voice
18
-
19
- async def text_to_speech_edge(text, language_code):
20
- voice = language_dict[language_code]
21
- communicate = edge_tts.Communicate(text, voice)
22
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
23
- tmp_path = tmp_file.name
24
-
25
- await communicate.save(tmp_path)
26
-
27
- return tmp_path
28
-
29
- # fucking dogshit toggle
30
- try:
31
- import spaces
32
- spaces_status = True
33
- except ImportError:
34
- spaces_status = False
35
-
36
- separator = Separator()
37
- converter = BaseLoader(only_cpu=False, hubert_path=None, rmvpe_path=None)
38
-
39
- # CONFIGS
40
- TEMP_DIR = "temp"
41
- MODEL_PREFIX = "model"
42
- PITCH_ALGO_OPT = [
43
- "pm",
44
- "harvest",
45
- "crepe",
46
- "rmvpe",
47
- "rmvpe+",
48
- ]
49
-
50
- os.makedirs(TEMP_DIR, exist_ok=True)
51
-
52
- # Model data array to store pth file paths
53
- model_data = []
54
-
55
- # Define the model_dropdown here so it's accessible globally
56
- model_dropdown = None
57
-
58
- def unzip_file(file):
59
- filename = os.path.basename(file).split(".")[0]
60
- with zipfile.ZipFile(file, 'r') as zip_ref:
61
- zip_ref.extractall(os.path.join(TEMP_DIR, filename))
62
- return True
63
-
64
- def get_training_info(audio_file):
65
- if audio_file is None:
66
- return 'Please provide an audio file!'
67
- duration = get_audio_duration(audio_file)
68
- sample_rate = wave.open(audio_file, 'rb').getframerate()
69
-
70
- training_info = {
71
- (0, 2): (150, 'OV2'),
72
- (2, 3): (200, 'OV2'),
73
- (3, 5): (250, 'OV2'),
74
- (5, 10): (300, 'Normal'),
75
- (10, 25): (500, 'Normal'),
76
- (25, 45): (700, 'Normal'),
77
- (45, 60): (1000, 'Normal')
78
- }
79
-
80
- for (min_duration, max_duration), (epochs, pretrain) in training_info.items():
81
- if min_duration <= duration < max_duration:
82
- break
83
- else:
84
- return 'Duration is not within the specified range!'
85
-
86
- return f'You should use the **{pretrain}** pretrain with **{epochs}** epochs at **{sample_rate/1000}khz** sample rate.'
87
-
88
- def on_button_click(audio_file_path):
89
- return get_training_info(audio_file_path)
90
-
91
- def get_audio_duration(audio_file_path):
92
- audio_info = sf.info(audio_file_path)
93
- duration_minutes = audio_info.duration / 60
94
- return duration_minutes
95
-
96
- def progress_bar(total, current):
97
- return "[" + "=" * int(current / total * 20) + ">" + " " * (20 - int(current / total * 20)) + "] " + str(int(current / total * 100)) + "%"
98
-
99
- def download_from_url(url, filename=None):
100
- global model_dropdown # Access the global model_dropdown
101
-
102
- if "/blob/" in url:
103
- url = url.replace("/blob/", "/resolve/")
104
- if "huggingface" not in url:
105
- return ["The URL must be from huggingface", "Failed", "Failed"]
106
- if filename is None:
107
- filename = os.path.join(TEMP_DIR, MODEL_PREFIX + str(random.randint(1, 1000)) + ".zip")
108
- response = requests.get(url, stream=True) # Stream the download
109
- total = int(response.headers.get('content-length', 0))
110
- if total > 500000000:
111
- return ["The file is too large. You can only download files up to 500 MB in size.", "Failed", "Failed"]
112
-
113
- current = 0
114
- with open(filename, "wb") as f:
115
- for data in response.iter_content(chunk_size=4096):
116
- f.write(data)
117
- current += len(data)
118
- print(progress_bar(total, current), end="\r")
119
-
120
- # Update the model dropdown during download
121
- status_text = f"Downloading... {progress_bar(total, current)}"
122
- yield status_text, None, None # Yield the status while downloading
123
-
124
- try:
125
- unzip_file(filename)
126
- except Exception as e:
127
- return ["Failed to unzip the file", "Failed", "Failed"]
128
-
129
- unzipped_dir = os.path.join(TEMP_DIR, os.path.basename(filename).split(".")[0])
130
- pth_files = []
131
- index_files = []
132
- for root, dirs, files in os.walk(unzipped_dir):
133
- for file in files:
134
- if file.endswith(".pth"):
135
- pth_files.append(os.path.join(root, file))
136
- elif file.endswith(".index"):
137
- index_files.append(os.path.join(root, file))
138
-
139
- # Update the model dropdown and return the final status
140
- model_data.append(pth_files[0])
141
- model_dropdown.choices = model_data
142
- yield f"Downloaded as {filename}", pth_files[0], index_files[0]
143
-
144
- def inference(audio, model_name):
145
- output_data = inf_handler(audio, model_name)
146
- vocals = output_data[0]
147
- inst = output_data[1]
148
- return vocals, inst
149
-
150
- if spaces_status:
151
- @spaces.GPU()
152
- def convert_now(audio_files, random_tag, converter):
153
- return converter(
154
- audio_files,
155
- random_tag,
156
- overwrite=False,
157
- parallel_workers=8
158
- )
159
- else:
160
- def convert_now(audio_files, random_tag, converter):
161
- return converter(
162
- audio_files,
163
- random_tag,
164
- overwrite=False,
165
- parallel_workers=8
166
- )
167
-
168
- def calculate_remaining_time(epochs, seconds_per_epoch):
169
- total_seconds = epochs * seconds_per_epoch
170
- hours = total_seconds // 3600
171
- minutes = (total_seconds % 3600) // 60
172
- seconds = total_seconds % 60
173
-
174
- if hours == 0:
175
- return f"{int(minutes)} minutes"
176
- elif hours == 1:
177
- return f"{int(hours)} hour and {int(minutes)} minutes"
178
- else:
179
- return f"{int(hours)} hours and {int(minutes)} minutes"
180
-
181
- def inf_handler(audio, model_name):
182
- model_found = False
183
- for model_info in UVR_5_MODELS:
184
- if model_info["model_name"] == model_name:
185
- separator.load_model(model_info["checkpoint"])
186
- model_found = True
187
- break
188
- if not model_found:
189
- separator.load_model()
190
- output_files = separator.separate(audio)
191
- vocals = output_files[0]
192
- inst = output_files[1]
193
- return vocals, inst
194
-
195
- def run(
196
- audio_files,
197
- model_selected,
198
- pitch_alg,
199
- pitch_lvl,
200
- index_inf,
201
- r_m_f,
202
- e_r,
203
- c_b_p,
204
- ):
205
- if not audio_files:
206
- raise ValueError("The audio pls")
207
-
208
- if isinstance(audio_files, str):
209
- audio_files = [audio_files]
210
-
211
- try:
212
- duration_base = librosa.get_duration(filename=audio_files[0])
213
- print("Duration:", duration_base)
214
- except Exception as e:
215
- print(e)
216
-
217
- random_tag = "USER_" + str(random.randint(10000000, 99999999))
218
- pth_file = model_selected
219
- index_file = None
220
-
221
- print("Random tag:", random_tag)
222
- print("File model:", pth_file)
223
- print("Pitch algorithm:", pitch_alg)
224
- print("Pitch level:", pitch_lvl)
225
- print("File index:", index_file)
226
- print("Index influence:", index_inf)
227
- print("Respiration median filtering:", r_m_f)
228
- print("Envelope ratio:", e_r)
229
-
230
- converter.apply_conf(
231
- tag=random_tag,
232
- file_model=pth_file,
233
- pitch_algo=pitch_alg,
234
- pitch_lvl=pitch_lvl,
235
- file_index=index_file,
236
- index_influence=index_inf,
237
- respiration_median_filtering=r_m_f,
238
- envelope_ratio=e_r,
239
- consonant_breath_protection=c_b_p,
240
- resample_sr=44100 if audio_files[0].endswith('.mp3') else 0,
241
- )
242
- time.sleep(0.1)
243
-
244
- result = convert_now(audio_files, random_tag, converter)
245
- print("Result:", result)
246
- return result[0]
247
-
248
- def upload_model(index_file, pth_file):
249
- pth_file = pth_file.name
250
- index_file = index_file.name
251
- return "Uploaded!"
252
-
253
- with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC 💖") as demo:
254
- gr.Markdown("## Ilaria RVC 💖")
255
-
256
- with gr.Tab("Inference"):
257
- sound_gui = gr.Audio(value=None, type="filepath", autoplay=False, visible=True)
258
- model_dropdown = gr.Dropdown(choices=model_data, label="Model", interactive=True)
259
-
260
- with gr.Accordion("Ilaria TTS", open=False):
261
- text_tts = gr.Textbox(label="Text", placeholder="Hello!", lines=3, interactive=True)
262
- dropdown_tts = gr.Dropdown(label="Language and Model", choices=list(language_dict.keys()),
263
- interactive=True, value=list(language_dict.keys())[0])
264
-
265
- button_tts = gr.Button("Speak", variant="primary")
266
- button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=sound_gui)
267
-
268
- with gr.Accordion("Settings", open=False):
269
- pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT, value=PITCH_ALGO_OPT[4], label="Pitch algorithm",
270
- visible=True, interactive=True)
271
- pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')", minimum=-24,
272
- maximum=24, step=1, value=0, visible=True, interactive=True)
273
- index_inf_conf = gr.Slider(minimum=0, maximum=1, label="Index influence -> How much accent is applied",
274
- value=0.75)
275
- respiration_filter_conf = gr.Slider(minimum=0, maximum=7, label="Respiration median filtering", value=3,
276
- step=1, interactive=True)
277
- envelope_ratio_conf = gr.Slider(minimum=0, maximum=1, label="Envelope ratio", value=0.25,
278
- interactive=True)
279
- consonant_protec_conf = gr.Slider(minimum=0, maximum=0.5, label="Consonant breath protection", value=0.5,
280
- interactive=True)
281
-
282
- button_conf = gr.Button("Convert", variant="primary")
283
- output_conf = gr.Audio(type="filepath", label="Output")
284
-
285
- button_conf.click(lambda: None, None, output_conf)
286
- button_conf.click(
287
- run,
288
- inputs=[
289
- sound_gui,
290
- model_dropdown,
291
- pitch_algo_conf,
292
- pitch_lvl_conf,
293
- index_inf_conf,
294
- respiration_filter_conf,
295
- envelope_ratio_conf,
296
- consonant_protec_conf,
297
- ],
298
- outputs=[output_conf],
299
- )
300
-
301
- with gr.Tab("Model Loader (Download and Upload)"):
302
- with gr.Accordion("Model Downloader", open=False):
303
- gr.Markdown(
304
- "Download the model from the following URL and upload it here. (Hugginface RVC model)"
305
- )
306
- model = gr.Textbox(lines=1, label="Model URL")
307
- download_button = gr.Button("Download Model")
308
- status = gr.Textbox(lines=1, label="Status", placeholder="Waiting....", interactive=False)
309
- model_pth = gr.Textbox(lines=1, label="Model pth file", placeholder="Waiting....", interactive=False)
310
- index_pth = gr.Textbox(lines=1, label="Index pth file", placeholder="Waiting....", interactive=False)
311
-
312
- download_button.click(
313
- download_from_url,
314
- model,
315
- outputs=[status, model_pth, index_pth]
316
- )
317
-
318
- with gr.Accordion("Upload A Model", open=False):
319
- index_file_upload = gr.File(label="Index File (.index)")
320
- pth_file_upload = gr.File(label="Model File (.pth)")
321
- upload_button = gr.Button("Upload Model")
322
- upload_status = gr.Textbox(lines=1, label="Status", placeholder="Waiting....", interactive=False)
323
-
324
- upload_button.click(upload_model, [index_file_upload, pth_file_upload], upload_status)
325
-
326
- with gr.Tab("Extra"):
327
- with gr.Accordion("Training Time Calculator", open=False):
328
- with gr.Column():
329
- epochs_input = gr.Number(label="Number of Epochs")
330
- seconds_input = gr.Number(label="Seconds per Epoch")
331
- calculate_button = gr.Button("Calculate Time Remaining")
332
- remaining_time_output = gr.Textbox(label="Remaining Time", interactive=False)
333
-
334
- calculate_button.click(
335
- fn=calculate_remaining_time,
336
- inputs=[epochs_input, seconds_input],
337
- outputs=[remaining_time_output]
338
- )
339
-
340
- with gr.Accordion('Training Helper', open=False):
341
- with gr.Column():
342
- audio_input = gr.Audio(type="filepath", label="Upload your audio file")
343
- gr.Text(
344
- "Please note that these results are approximate and intended to provide a general idea for beginners.",
345
- label='Notice:')
346
- training_info_output = gr.Markdown(label="Training Information:")
347
- get_info_button = gr.Button("Get Training Info")
348
- get_info_button.click(
349
- fn=on_button_click,
350
- inputs=[audio_input],
351
- outputs=[training_info_output]
352
- )
353
-
354
- with gr.Tab("Credits"):
355
- gr.Markdown(
356
- """
357
- Ilaria RVC made by [Ilaria](https://huggingface.co/TheStinger) suport her on [ko-fi](https://ko-fi.com/ilariaowo)
358
- The Inference code is made by [r3gm](https://huggingface.co/r3gm) (his module helped form this space 💖)
359
- made with ❤️ by [mikus](https://github.com/cappuch) - i make this ui........
360
- ## In loving memory of JLabDX 🕊️
361
- """
362
- )
363
-
364
- demo.queue(api_open=False).launch(show_api=False)
365
-
366
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import random
4
+ import os
5
+ import zipfile # built in module for unzipping files (thank god)
6
+ import librosa
7
+ import time
8
+ from infer_rvc_python import BaseLoader
9
+ from pydub import AudioSegment
10
+ from tts_voice import tts_order_voice
11
+ import edge_tts
12
+ import tempfile
13
+ from audio_separator.separator import Separator
14
+ import model_handler
15
+ import psutil
16
+ import cpuinfo
17
+
18
+ language_dict = tts_order_voice
19
+
20
+ # ilaria tts implementation :rofl:
21
+ async def text_to_speech_edge(text, language_code):
22
+ voice = language_dict[language_code]
23
+ communicate = edge_tts.Communicate(text, voice)
24
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
25
+ tmp_path = tmp_file.name
26
+
27
+ await communicate.save(tmp_path)
28
+
29
+ return tmp_path
30
+
31
+ # fucking dogshit toggle
32
+ try:
33
+ import spaces
34
+ spaces_status = True
35
+ except ImportError:
36
+ spaces_status = False
37
+
38
+ separator = Separator()
39
+ converter = BaseLoader(only_cpu=False, hubert_path=None, rmvpe_path=None) # <- yeah so like this handles rvc
40
+
41
+ global pth_file
42
+ global index_file
43
+
44
+ pth_file = "model.pth"
45
+ index_file = "model.index"
46
+
47
+ #CONFIGS
48
+ TEMP_DIR = "temp"
49
+ MODEL_PREFIX = "model"
50
+ PITCH_ALGO_OPT = [
51
+ "pm",
52
+ "harvest",
53
+ "crepe",
54
+ "rmvpe",
55
+ "rmvpe+",
56
+ ]
57
+ UVR_5_MODELS = [
58
+ {"model_name": "BS-Roformer-Viperx-1297", "checkpoint": "model_bs_roformer_ep_317_sdr_12.9755.ckpt"},
59
+ {"model_name": "MDX23C-InstVoc HQ 2", "checkpoint": "MDX23C-8KFFT-InstVoc_HQ_2.ckpt"},
60
+ {"model_name": "Kim Vocal 2", "checkpoint": "Kim_Vocal_2.onnx"},
61
+ {"model_name": "5_HP-Karaoke", "checkpoint": "5_HP-Karaoke-UVR.pth"},
62
+ {"model_name": "UVR-DeNoise by FoxJoy", "checkpoint": "UVR-DeNoise.pth"},
63
+ {"model_name": "UVR-DeEcho-DeReverb by FoxJoy", "checkpoint": "UVR-DeEcho-DeReverb.pth"},
64
+ ]
65
+ MODELS = [
66
+ {"model": "model.pth", "index": "model.index", "model_name": "Test Model"},
67
+ ]
68
+
69
+ os.makedirs(TEMP_DIR, exist_ok=True)
70
+
71
+ def unzip_file(file):
72
+ filename = os.path.basename(file).split(".")[0] # converts "model.zip" to "model" so we can do things
73
+ with zipfile.ZipFile(file, 'r') as zip_ref:
74
+ zip_ref.extractall(os.path.join(TEMP_DIR, filename)) # might not be very ram efficient...
75
+ return True
76
+
77
+
78
+ def progress_bar(total, current): # best progress bar ever trust me sunglasses emoji 😎
79
+ return "[" + "=" * int(current / total * 20) + ">" + " " * (20 - int(current / total * 20)) + "] " + str(int(current / total * 100)) + "%"
80
+
81
+ def download_from_url(url, name=None):
82
+ if name is None:
83
+ raise ValueError("The model name must be provided")
84
+ if "/blob/" in url:
85
+ url = url.replace("/blob/", "/resolve/") # made it delik proof 😎
86
+ if "huggingface" not in url:
87
+ return ["The URL must be from huggingface", "Failed", "Failed"]
88
+ filename = os.path.join(TEMP_DIR, MODEL_PREFIX + str(random.randint(1, 1000)) + ".zip")
89
+ response = requests.get(url)
90
+ total = int(response.headers.get('content-length', 0)) # bytes to download (length of the file)
91
+ if total > 500000000:
92
+
93
+ return ["The file is too large. You can only download files up to 500 MB in size.", "Failed", "Failed"]
94
+ current = 0
95
+ with open(filename, "wb") as f:
96
+ for data in response.iter_content(chunk_size=4096): # download in chunks of 4096 bytes (4kb - helps with memory usage and speed)
97
+ f.write(data)
98
+ current += len(data)
99
+ print(progress_bar(total, current), end="\r") # \r is a carriage return, it moves the cursor to the start of the line so its like tqdm sunglasses emoji 😎
100
+
101
+ # unzip because the model is in a zip file lel
102
+
103
+ try:
104
+ unzip_file(filename)
105
+ except Exception as e:
106
+ return ["Failed to unzip the file", "Failed", "Failed"] # return early if it fails and like tell the user but its dogshit hahahahahahaha 😎 According to all known laws aviation, there is no way a bee should be able to fly.
107
+ unzipped_dir = os.path.join(TEMP_DIR, os.path.basename(filename).split(".")[0]) # just do what we did in unzip_file because we need the directory
108
+ pth_files = []
109
+ index_files = []
110
+ for root, dirs, files in os.walk(unzipped_dir): # could be done more efficiently because nobody stores models in subdirectories but like who cares (it's a futureproofing thing lel)
111
+ for file in files:
112
+ if file.endswith(".pth"):
113
+ pth_files.append(os.path.join(root, file))
114
+ elif file.endswith(".index"):
115
+ index_files.append(os.path.join(root, file))
116
+
117
+ print(pth_files, index_files) # debug print because im fucking stupid and i need to see what is going on
118
+ global pth_file
119
+ global index_file
120
+ pth_file = pth_files[0]
121
+ index_file = index_files[0]
122
+
123
+ print(pth_file)
124
+ print(index_file)
125
+
126
+ MODELS.append({"model": pth_file, "index": index_file, "model_name": name})
127
+ return ["Downloaded as " + name, pth_files[0], index_files[0]]
128
+
129
+ def inference(audio, model_name):
130
+ output_data = inf_handler(audio, model_name)
131
+ vocals = output_data[0]
132
+ inst = output_data[1]
133
+
134
+ return vocals, inst
135
+
136
+ if spaces_status:
137
+ @spaces.GPU()
138
+ def convert_now(audio_files, random_tag, converter):
139
+ return converter(
140
+ audio_files,
141
+ random_tag,
142
+ overwrite=False,
143
+ parallel_workers=8
144
+ )
145
+
146
+
147
+ else:
148
+ def convert_now(audio_files, random_tag, converter):
149
+ return converter(
150
+ audio_files,
151
+ random_tag,
152
+ overwrite=False,
153
+ parallel_workers=8
154
+ )
155
+
156
+ def calculate_remaining_time(epochs, seconds_per_epoch):
157
+ total_seconds = epochs * seconds_per_epoch
158
+
159
+ hours = total_seconds // 3600
160
+ minutes = (total_seconds % 3600) // 60
161
+ seconds = total_seconds % 60
162
+
163
+ if hours == 0:
164
+ return f"{int(minutes)} minutes"
165
+ elif hours == 1:
166
+ return f"{int(hours)} hour and {int(minutes)} minutes"
167
+ else:
168
+ return f"{int(hours)} hours and {int(minutes)} minutes"
169
+
170
+ def inf_handler(audio, model_name): # its a shame that zerogpu just WONT cooperate with us
171
+ model_found = False
172
+ for model_info in UVR_5_MODELS:
173
+ if model_info["model_name"] == model_name:
174
+ separator.load_model(model_info["checkpoint"])
175
+ model_found = True
176
+ break
177
+ if not model_found:
178
+ separator.load_model()
179
+ output_files = separator.separate(audio)
180
+ vocals = output_files[0]
181
+ inst = output_files[1]
182
+ return vocals, inst
183
+
184
+
185
+ def run(
186
+ model,
187
+ audio_files,
188
+ pitch_alg,
189
+ pitch_lvl,
190
+ index_inf,
191
+ r_m_f,
192
+ e_r,
193
+ c_b_p,
194
+ ):
195
+ if not audio_files:
196
+ raise ValueError("The audio pls")
197
+
198
+ if isinstance(audio_files, str):
199
+ audio_files = [audio_files]
200
+
201
+ try:
202
+ duration_base = librosa.get_duration(filename=audio_files[0])
203
+ print("Duration:", duration_base)
204
+ except Exception as e:
205
+ print(e)
206
+
207
+ random_tag = "USER_"+str(random.randint(10000000, 99999999))
208
+
209
+ file_m = model
210
+ print("File model:", file_m)
211
+
212
+ # get from MODELS
213
+ for model in MODELS:
214
+ if model["model_name"] == file_m:
215
+ print(model)
216
+ file_m = model["model"]
217
+ file_index = model["index"]
218
+ break
219
+
220
+ if not file_m.endswith(".pth"):
221
+ raise ValueError("The model file must be a .pth file")
222
+
223
+
224
+ print("Random tag:", random_tag)
225
+ print("File model:", file_m)
226
+ print("Pitch algorithm:", pitch_alg)
227
+ print("Pitch level:", pitch_lvl)
228
+ print("File index:", file_index)
229
+ print("Index influence:", index_inf)
230
+ print("Respiration median filtering:", r_m_f)
231
+ print("Envelope ratio:", e_r)
232
+
233
+ converter.apply_conf(
234
+ tag=random_tag,
235
+ file_model=file_m,
236
+ pitch_algo=pitch_alg,
237
+ pitch_lvl=pitch_lvl,
238
+ file_index=file_index,
239
+ index_influence=index_inf,
240
+ respiration_median_filtering=r_m_f,
241
+ envelope_ratio=e_r,
242
+ consonant_breath_protection=c_b_p,
243
+ resample_sr=44100 if audio_files[0].endswith('.mp3') else 0,
244
+ )
245
+ time.sleep(0.1)
246
+
247
+ result = convert_now(audio_files, random_tag, converter)
248
+ print("Result:", result)
249
+
250
+ return result[0]
251
+
252
+ def upload_model(index_file, pth_file, model_name):
253
+ pth_file = pth_file.name
254
+ index_file = index_file.name
255
+ MODELS.append({"model": pth_file, "index": index_file, "model_name": model_name})
256
+ return "Uploaded!"
257
+
258
+ with gr.Blocks(theme="Ilaria RVC") as demo:
259
+ gr.Markdown("## Ilaria RVC 💖")
260
+ with gr.Tab("Inference"):
261
+ sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True,)
262
+ def update():
263
+ print(MODELS)
264
+ return gr.Dropdown(label="Model",choices=[model["model_name"] for model in MODELS],visible=True,interactive=True, value=MODELS[0]["model_name"],)
265
+ with gr.Row():
266
+ models_dropdown = gr.Dropdown(label="Model",choices=[model["model_name"] for model in MODELS],visible=True,interactive=True, value=MODELS[0]["model_name"],)
267
+ refresh_button = gr.Button("Refresh Models")
268
+ refresh_button.click(update, outputs=[models_dropdown])
269
+
270
+ with gr.Accordion("Settings", open=False):
271
+ pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT,value=PITCH_ALGO_OPT[4],label="Pitch algorithm",visible=True,interactive=True,)
272
+ pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
273
+ index_inf_conf = gr.Slider(minimum=0,maximum=1,label="Index influence -> How much accent is applied",value=0.75,)
274
+ respiration_filter_conf = gr.Slider(minimum=0,maximum=7,label="Respiration median filtering",value=3,step=1,interactive=True,)
275
+ envelope_ratio_conf = gr.Slider(minimum=0,maximum=1,label="Envelope ratio",value=0.25,interactive=True,)
276
+ consonant_protec_conf = gr.Slider(minimum=0,maximum=0.5,label="Consonant breath protection",value=0.5,interactive=True,)
277
+
278
+ button_conf = gr.Button("Convert",variant="primary",)
279
+ output_conf = gr.Audio(type="filepath",label="Output",)
280
+
281
+ button_conf.click(lambda :None, None, output_conf)
282
+ button_conf.click(
283
+ run,
284
+ inputs=[
285
+ models_dropdown,
286
+ sound_gui,
287
+ pitch_algo_conf,
288
+ pitch_lvl_conf,
289
+ index_inf_conf,
290
+ respiration_filter_conf,
291
+ envelope_ratio_conf,
292
+ consonant_protec_conf,
293
+ ],
294
+ outputs=[output_conf],
295
+ )
296
+
297
+ with gr.Tab("Ilaria TTS"):
298
+ text_tts = gr.Textbox(label="Text", placeholder="Hello!", lines=3, interactive=True,)
299
+ dropdown_tts = gr.Dropdown(label="Language and Model",choices=list(language_dict.keys()),interactive=True, value=list(language_dict.keys())[0])
300
+
301
+ button_tts = gr.Button("Speak", variant="primary",)
302
+
303
+ output_tts = gr.Audio(type="filepath", label="Output",)
304
+
305
+ button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=[output_tts])
306
+
307
+
308
+ with gr.Tab("Model Loader (Download and Upload)"):
309
+ with gr.Accordion("Model Downloader", open=False):
310
+ gr.Markdown(
311
+ "Download the model from the following URL and upload it here. (Huggingface RVC model)"
312
+ )
313
+ model = gr.Textbox(lines=1, label="Model URL")
314
+ name = gr.Textbox(lines=1, label="Model Name", placeholder="Model Name")
315
+ download_button = gr.Button("Download Model")
316
+ status = gr.Textbox(lines=1, label="Status", placeholder="Waiting....", interactive=False)
317
+ model_pth = gr.Textbox(lines=1, label="Model pth file", placeholder="Waiting....", interactive=False)
318
+ index_pth = gr.Textbox(lines=1, label="Index pth file", placeholder="Waiting....", interactive=False)
319
+ download_button.click(download_from_url, [model, name], outputs=[status, model_pth, index_pth])
320
+ with gr.Accordion("Upload A Model", open=False):
321
+ index_file_upload = gr.File(label="Index File (.index)")
322
+ pth_file_upload = gr.File(label="Model File (.pth)")
323
+
324
+ model_name = gr.Textbox(label="Model Name", placeholder="Model Name")
325
+ upload_button = gr.Button("Upload Model")
326
+ upload_status = gr.Textbox(lines=1, label="Status", placeholder="Waiting....", interactive=False)
327
+
328
+ upload_button.click(upload_model, [index_file_upload, pth_file_upload, model_name], upload_status)
329
+
330
+
331
+ with gr.Tab("Vocal Separator (UVR)"):
332
+ gr.Markdown("Separate vocals and instruments from an audio file using UVR models. - This is only on CPU due to ZeroGPU being ZeroGPU :(")
333
+ uvr5_audio_file = gr.Audio(label="Audio File",type="filepath")
334
+
335
+ with gr.Row():
336
+ uvr5_model = gr.Dropdown(label="Model", choices=[model["model_name"] for model in UVR_5_MODELS])
337
+ uvr5_button = gr.Button("Separate Vocals", variant="primary",)
338
+
339
+ uvr5_output_voc = gr.Audio(type="filepath", label="Output 1",) # UVR models sometimes output it in a weird way where it's like the positions swap randomly, so let's just call them Outputs lol
340
+ uvr5_output_inst = gr.Audio(type="filepath", label="Output 2",)
341
+
342
+ uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
343
+
344
+ with gr.Tab("Extra"):
345
+ with gr.Accordion("Model Information", open=False):
346
+ def json_to_markdown_table(json_data):
347
+ table = "| Key | Value |\n| --- | --- |\n"
348
+ for key, value in json_data.items():
349
+ table += f"| {key} | {value} |\n"
350
+ return table
351
+ def model_info(name):
352
+ for model in MODELS:
353
+ if model["model_name"] == name:
354
+ print(model["model"])
355
+ info = model_handler.model_info(model["model"])
356
+ info2 = {
357
+ "Model Name": model["model_name"],
358
+ "Model Config": info['config'],
359
+ "Epochs Trained": info['epochs'],
360
+ "Sample Rate": info['sr'],
361
+ "Pitch Guidance": info['f0'],
362
+ "Model Precision": info['size'],
363
+ }
364
+ return gr.Markdown(json_to_markdown_table(info2))
365
+
366
+ return "Model not found"
367
+ def update():
368
+ print(MODELS)
369
+ return gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
370
+ with gr.Row():
371
+ model_info_dropdown = gr.Dropdown(label="Model", choices=[model["model_name"] for model in MODELS])
372
+ refresh_button = gr.Button("Refresh Models")
373
+ refresh_button.click(update, outputs=[model_info_dropdown])
374
+ model_info_button = gr.Button("Get Model Information")
375
+ model_info_output = gr.Textbox(value="Waiting...",label="Output", interactive=False)
376
+ model_info_button.click(model_info, [model_info_dropdown], [model_info_output])
377
+
378
+
379
+
380
+ with gr.Accordion("Training Time Calculator", open=False):
381
+ with gr.Column():
382
+ epochs_input = gr.Number(label="Number of Epochs")
383
+ seconds_input = gr.Number(label="Seconds per Epoch")
384
+ calculate_button = gr.Button("Calculate Time Remaining")
385
+ remaining_time_output = gr.Textbox(label="Remaining Time", interactive=False)
386
+
387
+ calculate_button.click(calculate_remaining_time,inputs=[epochs_input, seconds_input],outputs=[remaining_time_output])
388
+
389
+ with gr.Accordion("Model Fusion", open=False): # I DIDNT WRITE THIS - MIKUS - THIS IS ILARIA RVC MAINLINE PORT
390
+ with gr.Group():
391
+ def merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2):
392
+ for model in MODELS:
393
+ if model["model_name"] == ckpt_a:
394
+ ckpt_a = model["model"]
395
+ if model["model_name"] == ckpt_b:
396
+ ckpt_b = model["model"]
397
+
398
+ path = model_handler.merge(ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0, version_2)
399
+ if path == "Fail to merge the models. The model architectures are not the same.":
400
+ return "Fail to merge the models. The model architectures are not the same."
401
+ else:
402
+ MODELS.append({"model": path, "index": None, "model_name": name_to_save0})
403
+ return "Merged, saved as " + name_to_save0
404
+
405
+ gr.Markdown(value="Strongly suggested to use only very clean models.")
406
+ with gr.Row():
407
+ def update():
408
+ print(MODELS)
409
+ return gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS]), gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
410
+ refresh_button_fusion = gr.Button("Refresh Models")
411
+ ckpt_a = gr.Dropdown(label="Model A", choices=[model["model_name"] for model in MODELS])
412
+ ckpt_b = gr.Dropdown(label="Model B", choices=[model["model_name"] for model in MODELS])
413
+ refresh_button_fusion.click(update, outputs=[ckpt_a, ckpt_b])
414
+ alpha_a = gr.Slider(
415
+ minimum=0,
416
+ maximum=1,
417
+ label="Weight of the first model over the second",
418
+ value=0.5,
419
+ interactive=True,
420
+ )
421
+ with gr.Group():
422
+ with gr.Row():
423
+ sr_ = gr.Radio(
424
+ label="Sample rate of both models",
425
+ choices=["32k","40k", "48k"],
426
+ value="32k",
427
+ interactive=True,
428
+ )
429
+ if_f0_ = gr.Radio(
430
+ label="Pitch Guidance",
431
+ choices=["Yes", "Nah"],
432
+ value="Yes",
433
+ interactive=True,
434
+ )
435
+ info__ = gr.Textbox(
436
+ label="Add informations to the model",
437
+ value="",
438
+ max_lines=8,
439
+ interactive=True,
440
+ visible=False
441
+ )
442
+ name_to_save0 = gr.Textbox(
443
+ label="Final Model name",
444
+ value="",
445
+ max_lines=1,
446
+ interactive=True,
447
+ )
448
+ version_2 = gr.Radio(
449
+ label="Versions of the models",
450
+ choices=["v1", "v2"],
451
+ value="v2",
452
+ interactive=True,
453
+ )
454
+ with gr.Group():
455
+ with gr.Row():
456
+ but6 = gr.Button("Fuse the two models", variant="primary")
457
+ info4 = gr.Textbox(label="Output", value="", max_lines=8)
458
+ but6.click(
459
+ merge,
460
+ [ckpt_a,ckpt_b,alpha_a,sr_,if_f0_,info__,name_to_save0,version_2,],info4,api_name="ckpt_merge",)
461
+
462
+ with gr.Accordion("Model Quantization", open=False):
463
+ gr.Markdown("Quantize the model to a lower precision. - soon™ or never™ 😎")
464
+
465
+ with gr.Accordion("Debug", open=False):
466
+ def json_to_markdown_table(json_data):
467
+ table = "| Key | Value |\n| --- | --- |\n"
468
+ for key, value in json_data.items():
469
+ table += f"| {key} | {value} |\n"
470
+ return table
471
+ gr.Markdown("View the models that are currently loaded in the instance.")
472
+
473
+ gr.Markdown(json_to_markdown_table({"Models": len(MODELS), "UVR Models": len(UVR_5_MODELS)}))
474
+
475
+ gr.Markdown("View the current status of the instance.")
476
+ status = {
477
+ "Status": "Running", # duh lol
478
+ "Models": len(MODELS),
479
+ "UVR Models": len(UVR_5_MODELS),
480
+ "CPU Usage": f"{psutil.cpu_percent()}%",
481
+ "RAM Usage": f"{psutil.virtual_memory().percent}%",
482
+ "CPU": f"{cpuinfo.get_cpu_info()['brand_raw']}",
483
+ "System Uptime": f"{round(time.time() - psutil.boot_time(), 2)} seconds",
484
+ "System Load Average": f"{psutil.getloadavg()}",
485
+ "====================": "====================",
486
+ "CPU Cores": psutil.cpu_count(),
487
+ "CPU Threads": psutil.cpu_count(logical=True),
488
+ "RAM Total": f"{round(psutil.virtual_memory().total / 1024**3, 2)} GB",
489
+ "RAM Used": f"{round(psutil.virtual_memory().used / 1024**3, 2)} GB",
490
+ "CPU Frequency": f"{psutil.cpu_freq().current} MHz",
491
+ "====================": "====================",
492
+ "GPU": "A100 - Do a request (Inference, you won't see it either way)",
493
+ }
494
+ gr.Markdown(json_to_markdown_table(status))
495
+
496
+ with gr.Tab("Credits"):
497
+ gr.Markdown(
498
+ """
499
+ Ilaria RVC made by [Ilaria](https://huggingface.co/TheStinger) suport her on [ko-fi](https://ko-fi.com/ilariaowo)
500
+
501
+ The Inference code is made by [r3gm](https://huggingface.co/r3gm) (his module helped form this space 💖)
502
+
503
+ made with ❤️ by [mikus](https://github.com/cappuch) - i make this ui........
504
+
505
+ ## In loving memory of JLabDX 🕊️
506
+ """
507
+ )
508
+
509
+ demo.queue(api_open=False).launch(show_api=False) # idk ilaria if you want or dont want to
model_handler.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import huggingface_hub
4
+ import zipfile
5
+ import os
6
+ from collections import OrderedDict
7
+
8
+ def model_info(model_path):
9
+ model = torch.load(model_path, map_location=torch.device('cpu'))
10
+ info = {
11
+ 'config': model['config'],
12
+ 'info': model['info'],
13
+ 'epochs': model['info'].split('epoch')[0],
14
+ 'sr': model['sr'],
15
+ 'f0': model['f0'],
16
+ 'size': model['size'] if 'size' in model['weight'] else 'fp32',
17
+ }
18
+ return info
19
+
20
+ def merge(path1, path2, alpha1, sr, f0, info, name, version):
21
+ try:
22
+ def extract(ckpt):
23
+ a = ckpt["model"]
24
+ opt = OrderedDict()
25
+ opt["weight"] = {}
26
+ for key in a.keys():
27
+ if "enc_q" in key:
28
+ continue
29
+ opt["weight"][key] = a[key]
30
+ return opt
31
+
32
+ ckpt1 = torch.load(path1, map_location="cpu")
33
+ ckpt2 = torch.load(path2, map_location="cpu")
34
+ cfg = ckpt1["config"]
35
+ if "model" in ckpt1:
36
+ ckpt1 = extract(ckpt1)
37
+ else:
38
+ ckpt1 = ckpt1["weight"]
39
+ if "model" in ckpt2:
40
+ ckpt2 = extract(ckpt2)
41
+ else:
42
+ ckpt2 = ckpt2["weight"]
43
+ if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
44
+ return "Fail to merge the models. The model architectures are not the same."
45
+ opt = OrderedDict()
46
+ opt["weight"] = {}
47
+ for key in ckpt1.keys():
48
+ # try:
49
+ if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
50
+ min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
51
+ opt["weight"][key] = (
52
+ alpha1 * (ckpt1[key][:min_shape0].float())
53
+ + (1 - alpha1) * (ckpt2[key][:min_shape0].float())
54
+ ).half()
55
+ else:
56
+ opt["weight"][key] = (
57
+ alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
58
+ ).half()
59
+ # except:
60
+ # pdb.set_trace()
61
+ opt["config"] = cfg
62
+ """
63
+ if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
64
+ elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
65
+ elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
66
+ """
67
+ opt["sr"] = sr
68
+ opt["f0"] = 1 if f0 == "Yes" else 0
69
+ opt["version"] = version
70
+ opt["info"] = info
71
+ torch.save(opt, "models/" + name + ".pth")
72
+ return "models/" + name + ".pth"
73
+ except:
74
+ return "Fail to merge the models. The model architectures are not the same." # <- L if u see this u suck
75
+
76
+ def model_quant(model_path, size):
77
+ """
78
+ Quantize the model to a lower precision. - this is the floating point version
79
+
80
+ Args:
81
+ model_path: str, path to the model file
82
+ size: str, one of ["fp2", "fp4", "fp8", "fp16"]
83
+
84
+ Returns:
85
+ str, message indicating the success of the operation
86
+ """
87
+ size_options = ["fp2", "fp4", "fp8", "fp16"]
88
+ if size not in size_options:
89
+ raise ValueError(f"Size must be one of {size_options}")
90
+
91
+ model_base = torch.load(model_path, map_location=torch.device('cpu'))
92
+ model = model_base['weight']
93
+ #model = json.loads(json.dumps(model))
94
+
95
+ if size == "fp16":
96
+ for key in model.keys():
97
+ model[key] = model[key].half() # 16-bit floating point
98
+ elif size == "fp8":
99
+ for key in model.keys():
100
+ model[key] = model[key].half().half() # 8-bit floating point <- this is the most common one
101
+ elif size == "fp4":
102
+ for key in model.keys():
103
+ model[key] = model[key].half().half().half() # 4-bit floating point <- ok maybe you're mentally ill if you choose this (very low precision)
104
+ elif size == "fp2":
105
+ for key in model.keys():
106
+ model[key] = model[key].half().half().half().half() # 2-bit floating point <- if you choose this you're a fucking dickhead coming
107
+
108
+ print(model_path)
109
+ output_path = model_path.split('.pth')[0] + f'_{size}.pth'
110
+ output_style = {
111
+ 'weight': model,
112
+ 'config': model_base['config'],
113
+ 'info': model_base['info'],
114
+ 'sr': model_base['sr'],
115
+ 'f0': model_base['f0'],
116
+ 'credits': f"Quantized to {size} precision, using Ilaria RVC, (Mikus's script)",
117
+ "size": size
118
+ }
119
+ torch.save(output_style, output_path)
120
+
121
+ #AmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithraxAmerithrax
122
+ # our data isnt safe anymore currently typing this and there is a 100% chance that it'll be stolen and used for training another fucking dogshit language model by a horrible company like openai
123
+ # i say this as a person who communicates with microsoft and i will stop mentioning this as they're so closely tied together nowadays
124
+ # as fred durst has said - "That's your best friend and your worst enemy - your own brain." - keep your shit local and never trust scumbag companies even if they make the models oss - they're stealing data
125
+ # this is probably the only rant i'll have in this entire space and i put it in a notable spot
126
+
127
+ return "Model quantized successfully" # <- enjoy this fucking hot shit that looks like a steaming turd paired with skibidi toilet and the unibomber
128
+
129
+ def upload_model(repo, pth, index, token):
130
+ """
131
+ Upload a model to the Hugging Face Hub
132
+
133
+ Args:
134
+ repo: str, the name of the repository
135
+ pth: str, path to the model file
136
+ index: str, the index of the model in the repository
137
+ token: str, the API token
138
+
139
+ Returns:
140
+ str, message indicating the success of the operation
141
+ """
142
+ readme = f"""
143
+ # {repo}
144
+ This is a model uploaded by Ilaria RVC, using Mikus's script.
145
+ """
146
+ repo_name = repo.split('/')[1]
147
+ with zipfile.ZipFile(f'{repo_name}.zip', 'w') as zipf:
148
+ zipf.write(pth, os.path.basename(pth))
149
+ zipf.write(index, os.path.basename(index))
150
+ zipf.writestr('README.md', readme)
151
+
152
+ huggingface_hub.HfApi().create_repo(token=token, name=repo, exist_ok=True)
153
+ huggingface_hub.HfApi().upload_file(token=token, path=f'{repo.split("/")[1]}.zip', repo_id=repo)
154
+ os.remove(f'{repo.split("/")[1]}.zip')
155
+ return "Model uploaded successfully"
requirements.txt CHANGED
@@ -8,3 +8,6 @@ audio-separator[gpu]
8
  scipy
9
  onnxruntime-gpu
10
  samplerate
 
 
 
 
8
  scipy
9
  onnxruntime-gpu
10
  samplerate
11
+ transformers
12
+ psutil
13
+ py-cpuinfo