import argparse import logging import os import sys import time import typing as tp import warnings import base64 from pathlib import Path from tempfile import NamedTemporaryFile from einops import rearrange import torch import gradio as gr import requests from audiocraft.data.audio_utils import convert_audio from audiocraft.data.audio import audio_write from audiocraft.models.encodec import InterleaveStereoCompressionModel from audiocraft.models import MusicGen, MultiBandDiffusion from theme_wave import theme, css # --- Configuration (Main App) --- MLLM_API_URL = ( "http://localhost:8000" ) DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # --- Global Variables (Main App) --- MODEL = None MBD = None INTERRUPTING = False USE_DIFFUSION = False # Keep this for now, even if unused, for easier switching # --- Utility Functions (Main App) --- def interrupt(): global INTERRUPTING INTERRUPTING = True class FileCleaner: def __init__(self, file_lifetime: float = 3600): self.file_lifetime = file_lifetime self.files = [] def add(self, path: tp.Union[str, Path]): self._cleanup() self.files.append((time.time(), Path(path))) def _cleanup(self): now = time.time() for time_added, path in list(self.files): if now - time_added > self.file_lifetime: if path.exists(): try: path.unlink() except Exception as e: print(f"Error deleting file {path}: {e}") self.files.pop(0) else: break file_cleaner = FileCleaner() def make_waveform(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") return gr.make_waveform(*args, **kwargs) # --- Model Loading (Main App) --- def load_musicgen_model(version="facebook/musicgen-stereo-melody-large"): global MODEL print(f"Loading MusicGen model: {version}") if MODEL is None or MODEL.name != version: if MODEL is not None: del MODEL torch.cuda.empty_cache() MODEL = MusicGen.get_pretrained(version, device=DEVICE) def load_diffusion_model(): global MBD if MBD is None: print("Loading diffusion model") MBD = MultiBandDiffusion.get_mbd_musicgen(device=DEVICE) # --- API Client Functions --- def get_mllm_description(media_path: str, user_prompt: str) -> str: """Gets the music description from the MLLM API.""" try: if media_path.lower().endswith((".mp4", ".avi", ".mov", ".mkv")): # Video with open(media_path, "rb") as f: video_data = f.read() encoded_video = base64.b64encode(video_data).decode("utf-8") response = requests.post( f"{MLLM_API_URL}/describe_video/", json={"video": encoded_video, "user_prompt": user_prompt}, ) elif media_path.lower().endswith((".png", ".jpg", ".jpeg", ".gif", ".bmp")): # Image with open(media_path, "rb") as f: image_data = f.read() encoded_image = base64.b64encode(image_data).decode("utf-8") response = requests.post( f"{MLLM_API_URL}/describe_image/", json={"image": encoded_image, "user_prompt": user_prompt}, ) else: # Text-only response = requests.post( f"{MLLM_API_URL}/describe_text/", json={"user_prompt": user_prompt} ) response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx). return response.json()["description"] except requests.exceptions.RequestException as e: raise gr.Error(f"Error communicating with MLLM API: {e}") except Exception as e: raise gr.Error(f"An unexpected error occurred: {e}") # --- Music Generation --- def predict_full( model_version, media_type, image_input, video_input, text_prompt, melody, duration, topk, topp, temperature, cfg_coef, decoder, progress=gr.Progress(), ): global INTERRUPTING, USE_DIFFUSION INTERRUPTING = False USE_DIFFUSION = decoder == "MultiBand_Diffusion" if media_type == "Image": media = image_input if image_input else None elif media_type == "Video": media = video_input if video_input else None else: media = None # 1. Get Music Description (using the API client). progress(progress=None, desc="Generating music description...") if media: try: music_description = get_mllm_description(media, text_prompt) except Exception as e: raise gr.Error(str(e)) # Re-raise for Gradio to handle. else: music_description = text_prompt # 2. Load MusicGen Model (locally). progress(progress=None, desc="Loading MusicGen model...") load_musicgen_model(model_version) # 3. Set Generation Parameters (locally). MODEL.set_generation_params( duration=duration, top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef, ) # 4. Melody Preprocessing (locally). progress(progress=None, desc="Processing melody...") melody_tensor = None # Use a different variable name if melody: try: sr, melody_tensor = ( melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t(), ) if melody_tensor.dim() == 1: melody_tensor = melody_tensor[None] melody_tensor = melody_tensor[..., : int(sr * duration)] melody_tensor = convert_audio( melody_tensor, sr, MODEL.sample_rate, MODEL.audio_channels ) except Exception as e: raise gr.Error(f"Error processing melody: {e}") # 5. Music Generation (locally). progress(progress=None, desc="Generating music...") if USE_DIFFUSION: load_diffusion_model() try: if melody_tensor is not None: # Use the new variable output = MODEL.generate_with_chroma( descriptions=[music_description], melody_wavs=[melody_tensor], melody_sample_rate=MODEL.sample_rate, progress=True, return_tokens=USE_DIFFUSION, ) else: output = MODEL.generate( descriptions=[music_description], progress=True, return_tokens=USE_DIFFUSION, ) except RuntimeError as e: raise gr.Error("Error while generating: " + str(e)) if USE_DIFFUSION: progress(progress=None, desc="Running MultiBandDiffusion...") tokens = output[1] if isinstance(MODEL.compression_model, InterleaveStereoCompressionModel): left, right = MODEL.compression_model.get_left_right_codes(tokens) tokens = torch.cat([left, right]) outputs_diffusion = MBD.tokens_to_wav(tokens) if isinstance(MODEL.compression_model, InterleaveStereoCompressionModel): assert outputs_diffusion.shape[1] == 1 # output is mono outputs_diffusion = rearrange( outputs_diffusion, "(s b) c t -> b (s c) t", s=2 ) output_audio = torch.cat([output[0], outputs_diffusion], dim=0) else: output_audio = output[0] output_audio = output_audio.detach().cpu().float() # 6. Save and Return (locally). progress(progress=None, desc="Saving and returning...") output_audio_paths = [] for i, audio in enumerate(output_audio): with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: audio_write( file.name, audio, MODEL.sample_rate, strategy="loudness", loudness_headroom_db=16, loudness_compressor=True, add_suffix=False, ) output_audio_paths.append(file.name) file_cleaner.add(file.name) if USE_DIFFUSION: # Return both audios, but make sure to return the correct one first result = ( output_audio_paths[0], # Original output_audio_paths[1], # MBD ) else: result = ( output_audio_paths[0], None, ) # Only original audio and description del melody_tensor, output, output_audio if torch.cuda.is_available(): torch.cuda.empty_cache() return result Wave = theme() def create_ui(launch_kwargs=None): """Creates and launches the Gradio UI.""" if launch_kwargs is None: launch_kwargs = {} def interrupt_handler(): interrupt() with gr.Blocks(theme=Wave, css=css) as interface: gr.Markdown( """