lauraibnz commited on
Commit
1df0264
1 Parent(s): 39bf1c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -14,14 +14,11 @@ controlnet = ControlNetModel.from_pretrained("lauraibnz/midi-audioldm", torch_dt
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
 
17
- def predict(prompt, midi_file=None, audio_length_in_s=5, controlnet_conditioning_scale=1.0, num_inference_steps=20):
18
- if midi_file:
19
- midi_file = midi_file.name
20
- else:
21
- midi_file = "test.mid"
22
  midi = PrettyMIDI(midi_file)
23
  audio = pipe(prompt, midi=midi, audio_length_in_s=audio_length_in_s, num_inference_steps=num_inference_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale))
24
  return (16000, audio.audios.T)
25
 
26
- demo = gr.Interface(fn=predict, inputs=["text", gr.UploadButton("Upload a MIDI file", file_types=[".mid"]), gr.Slider(0, 30, value=5, step=5, label="duration (seconds)"), gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale")], outputs="audio")
27
  demo.launch()
 
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
 
17
+ def predict(midi_file, prompt, audio_length_in_s, controlnet_conditioning_scale, num_inference_steps=20):
18
+ midi_file = midi_file.name
 
 
 
19
  midi = PrettyMIDI(midi_file)
20
  audio = pipe(prompt, midi=midi, audio_length_in_s=audio_length_in_s, num_inference_steps=num_inference_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale))
21
  return (16000, audio.audios.T)
22
 
23
+ demo = gr.Interface(fn=predict, inputs=[gr.File("test.mid", file_types=[".mid"]), "text", gr.Slider(0, 30, value=5, step=5, label="duration (seconds)"), gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="conditioning scale")], outputs="audio")
24
  demo.launch()