lauraibnz commited on
Commit
ea2e854
1 Parent(s): badd630

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -16
app.py CHANGED
@@ -23,19 +23,19 @@ pipe = pipe.to(device)
23
  generator = torch.Generator(device)
24
 
25
 
26
- def predict(midi_file=None, prompt="", negative_prompt="", audio_length_in_s=10, random_seed=0, controlnet_conditioning_scale=1, num_inference_steps=20, guidance_scale=2.5, guess_mode=False):
27
  if isinstance(midi_file, _TemporaryFileWrapper):
28
  midi_file = midi_file.name
29
  midi = PrettyMIDI(midi_file)
30
  audio = pipe(
31
  prompt,
32
- negative_prompt=negative_prompt,
33
  midi=midi,
34
- audio_length_in_s=audio_length_in_s,
35
- num_inference_steps=num_inference_steps,
36
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
37
- guess_mode=guess_mode,
38
- generator=generator.manual_seed(int(random_seed)),
39
  guidance_scale=float(guidance_scale),
40
  )
41
  return (SAMPLE_RATE, audio.audios.T)
@@ -48,10 +48,10 @@ def synthesize(midi_file=None):
48
  midi_synth = midi_synth.reshape(midi_synth.shape[0], 1)
49
  return (SAMPLE_RATE, midi_synth)
50
 
51
- # def upload(midi_file=None):
52
- # if isinstance(midi_file, _TemporaryFileWrapper):
53
- # midi_file = midi_file.name
54
- # return midi_file
55
 
56
  with gr.Blocks(title="🎹 MIDI-AudioLDM", theme=gr.themes.Base(text_size=gr.themes.sizes.text_md, font=[gr.themes.GoogleFont("Nunito Sans")])) as demo:
57
  gr.HTML(
@@ -62,13 +62,10 @@ with gr.Blocks(title="🎹 MIDI-AudioLDM", theme=gr.themes.Base(text_size=gr.the
62
  """
63
  MIDI-AudioLDM is a MIDI-conditioned text-to-audio model based on the project [AudioLDM](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation). The model has been conditioned using the ControlNet architecture and has been developed within Hugging Face’s [🧨 Diffusers](https://huggingface.co/docs/diffusers/) framework. Once trained, MIDI-AudioLDM accepts a MIDI file and a text prompt as input and returns an audio file, which is an interpretation of the MIDI based on the given text description. This enables detailed control over different musical aspects such as notes, mood and timbre.
64
  """)
65
- # upload_button = gr.UploadButton("Upload MIDI file", file_types=[".mid"], value="S00.mid")
66
  with gr.Row():
67
  with gr.Column(variant='panel'):
68
- # midi = gr.File(visible=False, value="S00.mid")
69
- midi = gr.File(label="midi file", file_types=[".mid"], value="S00.mid")
70
  midi_synth = gr.Audio(label="synthesized midi")
71
- # upload_button.upload(upload, upload_button, midi)
72
  midi.upload(synthesize, midi, midi_synth)
73
  prompt = gr.Textbox(label="prompt", info="Enter a descriptive text prompt to guide the audio generation.")
74
  with gr.Column(variant='panel'):
@@ -83,6 +80,6 @@ with gr.Blocks(title="🎹 MIDI-AudioLDM", theme=gr.themes.Base(text_size=gr.the
83
  guess = gr.Checkbox(label="guess mode", info="Optionally select guess mode. If so, the model will try to recognize the content of the MIDI without the need of a text prompt.")
84
  btn = gr.Button("Generate")
85
  btn.click(predict, inputs=[midi, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], outputs=[audio])
86
- gr.Examples(examples=[["S00.mid", "piano", "", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "violin", "", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "woman singing, studio recording", "noise", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "jazz band, clean", "noise", 10, 25, 0.8, 20, 2.5, False], ["S00.mid", "choir", "noise, percussion", 10, 25, 0.7, 20, 2.5, False]], inputs=[midi, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], fn=predict, outputs=[audio], cache_examples=True)
87
 
88
  demo.launch()
 
23
  generator = torch.Generator(device)
24
 
25
 
26
+ def predict(midi_file=None, prompt="", neg_prompt="", duration=10, seed=0, cond=1, inf=20, guidance_scale=2.5, guess=False):
27
  if isinstance(midi_file, _TemporaryFileWrapper):
28
  midi_file = midi_file.name
29
  midi = PrettyMIDI(midi_file)
30
  audio = pipe(
31
  prompt,
32
+ negative_prompt=neg_prompt,
33
  midi=midi,
34
+ audio_length_in_s=duration,
35
+ num_inference_steps=inf,
36
+ controlnet_conditioning_scale=float(cond),
37
+ guess_mode=guess,
38
+ generator=generator.manual_seed(int(seed)),
39
  guidance_scale=float(guidance_scale),
40
  )
41
  return (SAMPLE_RATE, audio.audios.T)
 
48
  midi_synth = midi_synth.reshape(midi_synth.shape[0], 1)
49
  return (SAMPLE_RATE, midi_synth)
50
 
51
+ def run_example(midi_file=None, prompt="", neg_prompt="", duration=10, seed=0, cond=1, inf=20, guidance_scale=2.5, guess=False):
52
+ midi_synth = synthesize(midi_file)
53
+ gen_audio = predict(midi_file, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess)
54
+ return midi_synth, gen_audio
55
 
56
  with gr.Blocks(title="🎹 MIDI-AudioLDM", theme=gr.themes.Base(text_size=gr.themes.sizes.text_md, font=[gr.themes.GoogleFont("Nunito Sans")])) as demo:
57
  gr.HTML(
 
62
  """
63
  MIDI-AudioLDM is a MIDI-conditioned text-to-audio model based on the project [AudioLDM](https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation). The model has been conditioned using the ControlNet architecture and has been developed within Hugging Face’s [🧨 Diffusers](https://huggingface.co/docs/diffusers/) framework. Once trained, MIDI-AudioLDM accepts a MIDI file and a text prompt as input and returns an audio file, which is an interpretation of the MIDI based on the given text description. This enables detailed control over different musical aspects such as notes, mood and timbre.
64
  """)
 
65
  with gr.Row():
66
  with gr.Column(variant='panel'):
67
+ midi = gr.File(label="midi file", file_types=[".mid"])
 
68
  midi_synth = gr.Audio(label="synthesized midi")
 
69
  midi.upload(synthesize, midi, midi_synth)
70
  prompt = gr.Textbox(label="prompt", info="Enter a descriptive text prompt to guide the audio generation.")
71
  with gr.Column(variant='panel'):
 
80
  guess = gr.Checkbox(label="guess mode", info="Optionally select guess mode. If so, the model will try to recognize the content of the MIDI without the need of a text prompt.")
81
  btn = gr.Button("Generate")
82
  btn.click(predict, inputs=[midi, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], outputs=[audio])
83
+ gr.Examples(examples=[["S00.mid", "piano", "", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "violin", "", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "woman singing, studio recording", "noise", 10, 25, 1.0, 20, 2.5, False], ["S00.mid", "jazz band, clean", "noise", 10, 25, 0.8, 20, 2.5, False], ["S00.mid", "choir", "noise, percussion", 10, 25, 0.7, 20, 2.5, False]], inputs=[midi, prompt, neg_prompt, duration, seed, cond, inf, guidance_scale, guess], fn=run_example, outputs=[midi_synth, audio], cache_examples=True)
84
 
85
  demo.launch()