Spaces:
Paused
Paused
File size: 1,275 Bytes
eff5779 4f7474e 1e3d6f4 4f7474e 57d4630 5bb9b4d 593ef38 1e3d6f4 5bb9b4d 4f7474e 1e3d6f4 4f7474e 1e3d6f4 4f7474e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
import os
# Import the model
from models import MODELS
model = jukebox.make_vqvae(MODELS['5B_LYRICS'], device="cpu")
# Generate music
def generate_music(temperature=1.0, top_k=10, beam_width=5):
z = torch.randn(1, 1024)
audio = model.sample(z, temperature=temperature, top_k=top_k, beam_width=beam_width)
return audio
# Input audio
def input_audio():
audio_file = input("Enter the path to the audio file: ")
audio_data = librosa.load(audio_file)
return audio_data
# Generate music from the input audio
def generate_music_from_audio(audio_data):
z = model.encode(audio_data)
audio = model.decode(z)
return audio
# Save the music
def save_music(audio, filename):
librosa.output(filename, audio, sr=44100)
# Play the music
def play_music(audio):
Audio(audio)
# Create the Gradio interface
app = gr.Interface(
generate_music,
inputs=[gr.inputs.Slider(label="Temperature", min=0.0, max=1.0, step=0.1),
gr.inputs.Slider(label="Top K", min=1, max=10, step=1),
gr.inputs.Slider(label="Beam Width", min=1, max=10, step=1)],
outputs=gr.outputs.Audio(),
title="OpenAI Jukebox",
description="Generate music using OpenAI Jukebox",
allow_screenshot=True,
clear_output=True
)
# Run the app
app.launch() |