Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import json | |
import torch | |
import wavio | |
from tqdm import tqdm | |
from huggingface_hub import snapshot_download | |
from models import AudioDiffusion, DDPMScheduler | |
from audioldm.audio.stft import TacotronSTFT | |
from audioldm.variational_autoencoder import AutoencoderKL | |
from gradio import Markdown | |
import spaces | |
class Tango: | |
def __init__(self, name="declare-lab/tango", device="cuda:0"): | |
path = snapshot_download(repo_id=name) | |
vae_config = json.load(open("{}/vae_config.json".format(path))) | |
stft_config = json.load(open("{}/stft_config.json".format(path))) | |
main_config = json.load(open("{}/main_config.json".format(path))) | |
self.vae = AutoencoderKL(**vae_config).to(device) | |
self.stft = TacotronSTFT(**stft_config).to(device) | |
self.model = AudioDiffusion(**main_config).to(device) | |
vae_weights = torch.load("{}/pytorch_model_vae.bin".format(path), map_location=device) | |
stft_weights = torch.load("{}/pytorch_model_stft.bin".format(path), map_location=device) | |
main_weights = torch.load("{}/pytorch_model_main.bin".format(path), map_location=device) | |
self.vae.load_state_dict(vae_weights) | |
self.stft.load_state_dict(stft_weights) | |
self.model.load_state_dict(main_weights) | |
print ("Successfully loaded checkpoint from:", name) | |
self.vae.eval() | |
self.stft.eval() | |
self.model.eval() | |
self.scheduler = DDPMScheduler.from_pretrained(main_config["scheduler_name"], subfolder="scheduler") | |
def chunks(self, lst, n): | |
""" Yield successive n-sized chunks from a list. """ | |
for i in range(0, len(lst), n): | |
yield lst[i:i + n] | |
def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True): | |
""" Genrate audio for a single prompt string. """ | |
with torch.no_grad(): | |
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress) | |
mel = self.vae.decode_first_stage(latents) | |
wave = self.vae.decode_to_waveform(mel) | |
return wave[0] | |
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True): | |
""" Genrate audio for a list of prompt strings. """ | |
outputs = [] | |
for k in tqdm(range(0, len(prompts), batch_size)): | |
batch = prompts[k: k+batch_size] | |
with torch.no_grad(): | |
latents = self.model.inference(batch, self.scheduler, steps, guidance, samples, disable_progress=disable_progress) | |
mel = self.vae.decode_first_stage(latents) | |
wave = self.vae.decode_to_waveform(mel) | |
outputs += [item for item in wave] | |
if samples == 1: | |
return outputs | |
else: | |
return list(self.chunks(outputs, samples)) | |
# Initialize TANGO | |
tango = Tango(device="cpu") | |
tango.vae.to("cuda") | |
tango.stft.to("cuda") | |
tango.model.to("cuda") | |
def gradio_generate(prompt, steps, guidance): | |
output_wave = tango.generate(prompt, steps, guidance) | |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav" | |
output_filename = "temp.wav" | |
wavio.write(output_filename, output_wave, rate=16000, sampwidth=2) | |
return output_filename | |
# description_text = """ | |
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/> | |
# Generate audio using TANGO by providing a text prompt. | |
# <br/><br/>Limitations: TANGO is trained on the small AudioCaps dataset so it may not generate good audio \ | |
# samples related to concepts that it has not seen in training (e.g. singing). For the same reason, TANGO \ | |
# is not always able to finely control its generations over textual control prompts. For example, \ | |
# the generations from TANGO for prompts Chopping tomatoes on a wooden table and Chopping potatoes \ | |
# on a metal table are very similar. \ | |
# <br/><br/>We are currently training another version of TANGO on larger datasets to enhance its generalization, \ | |
# compositional and controllable generation ability. | |
# <br/><br/>We recommend using a guidance scale of 3. The default number of steps is set to 100. More steps generally lead to better quality of generated audios but will take longer. | |
# <br/><br/> | |
# <h1> ChatGPT-enhanced audio generation</h1> | |
# <br/> | |
# As TANGO consists of an instruction-tuned LLM, it is able to process complex sound descriptions allowing us to provide more detailed instructions to improve the generation quality. | |
# For example, ``A boat is moving on the sea'' vs ``The sound of the water lapping against the hull of the boat or splashing as you move through the waves''. The latter is obtained by prompting ChatGPT to explain the sound generated when a boat moves on the sea. | |
# Using this ChatGPT-generated description of the sound, TANGO provides superior results. | |
# <p/> | |
# """ | |
description_text = "" | |
# Gradio input and output components | |
input_text = gr.Textbox(lines=2, label="Prompt") | |
output_audio = gr.Audio(label="Generated Audio", type="filepath") | |
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True) | |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True) | |
# Gradio interface | |
gr_interface = gr.Interface( | |
fn=gradio_generate, | |
inputs=[input_text, denoising_steps, guidance_scale], | |
outputs=[output_audio], | |
title="TANGO: Text to Audio using Instruction-Guided Diffusion", | |
description=description_text, | |
allow_flagging=False, | |
examples=[ | |
["A lady is singing a song with a kid"], | |
["The sound of the water lapping against the hull of the boat or splashing as you move through the waves"], | |
["An audience cheering and clapping"], | |
["Rolling thunder with lightning strikes"], | |
["Gentle water stream, birds chirping and sudden gun shot"], | |
["A car engine revving"], | |
["A dog barking"], | |
["A cat meowing"], | |
["Wooden table tapping sound while water pouring"], | |
["Emergency sirens wailing"], | |
["two gunshots followed by birds flying away while chirping"], | |
["Whistling with birds chirping"], | |
["A person snoring"], | |
["Motor vehicles are driving with loud engines and a person whistles"], | |
["People cheering in a stadium while thunder and lightning strikes"], | |
["A helicopter is in flight"], | |
["A dog barking and a man talking and a racing car passes by"], | |
], | |
cache_examples="lazy", # Turn on to cache. | |
) | |
# Launch Gradio app | |
gr_interface.queue(10).launch() |