import gradio as gr import torch import os import zipfile import requests from TTS.api import TTS # Verifica se รจ disponibile una GPU, altrimenti utilizza la CPU device = "cuda" if torch.cuda.is_available() else "cpu" os.environ["COQUI_TOS_AGREED"] = "1" tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device) def clone(text, url, language): response = requests.get(url) with open("temp.zip", "wb") as f: f.write(response.content) with zipfile.ZipFile("temp.zip", "r") as zip_ref: zip_ref.extractall() audio_file = [f for f in os.listdir(".") if f.endswith(".wav")][0] tts.tts_to_file(text=text, speaker_wav=audio_file, language=language, file_path="./output.wav") os.remove(audio_file) os.remove("temp.zip") return "./output.wav" languages = [ "en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja", "hu", "ko", "hi" ] # Create a separate tab for model sharing def model_sharing_tab(): model_input = gr.components.Textbox(label="Share your model/link") model_output = gr.components.Markdown(label="Shared Models") model_sharing_iface = gr.Interface( fn=lambda x: x, # dummy function to display the input inputs=model_input, outputs=model_output, title="Share Your Model", description="Share your model or link with the community!" ) model_sharing_iface.launch() # Combine the two interfaces iface = gr.Interface( fn=clone, inputs=[ gr.components.Text("", label="the text to be said"), gr.components.Text("", label="URL of the zip file with the dataset on hf.co (10 seconds is fine)"), gr.Dropdown(choices=languages, label="Language") ], outputs=gr.Audio(type='filepath'), title='Voice Clone', description=""" by [Angetyde](https://youtube.com/@Angetyde?si=7nusP31nTumIkPTF) and [Tony Assi](https://www.tonyassi.com/ ) use this colab with caution <3. """, theme=gr.themes.Base(primary_hue="teal", secondary_hue="teal", neutral_hue="slate"), style=gr.Box.vertical, # Add the model sharing tab children=[ iface, gr.Blocks(model_sharing_tab) ] ) iface.launch(share=True)