lauraibnz commited on
Commit
281952b
1 Parent(s): 6e4c184

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -14,7 +14,7 @@ controlnet = ControlNetModel.from_pretrained("lauraibnz/midi-audioldm", torch_dt
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
 
17
- def predict(prompt, midi_file=None, audio_length_in_s=10, num_inference_steps=20, controlnet_conditioning_scale=1.0):
18
  if midi_file:
19
  midi_file = midi_file.name
20
  else:
@@ -23,5 +23,5 @@ def predict(prompt, midi_file=None, audio_length_in_s=10, num_inference_steps=20
23
  audio = pipe(prompt, midi=midi, audio_length_in_s=audio_length_in_s, num_inference_steps=num_inference_steps, controlnet_conditioning_scale=controlnet_conditioning_scale)
24
  return (16000, audio.audios.T)
25
 
26
- demo = gr.Interface(fn=predict, inputs=["text", gr.UploadButton("Upload a MIDI File", file_types=[".mid"])], outputs="audio")
27
  demo.launch()
 
14
  pipe = AudioLDMControlNetPipeline.from_pretrained("cvssp/audioldm-m-full", controlnet=controlnet, torch_dtype=torch_dtype)
15
  pipe = pipe.to(device)
16
 
17
+ def predict(prompt, midi_file=None, audio_length_in_s=5, controlnet_conditioning_scale=1.0, num_inference_steps=20):
18
  if midi_file:
19
  midi_file = midi_file.name
20
  else:
 
23
  audio = pipe(prompt, midi=midi, audio_length_in_s=audio_length_in_s, num_inference_steps=num_inference_steps, controlnet_conditioning_scale=controlnet_conditioning_scale)
24
  return (16000, audio.audios.T)
25
 
26
+ demo = gr.Interface(fn=predict, inputs=["text", gr.UploadButton("Upload a MIDI File", file_types=[".mid"], gr.Slider(0, 30, value=5, step=5, label="Duration (seconds)"), gr.Slider(0.0, 1.0, value=1.0, step=0.1, label="Conditioning scale")], outputs="audio")
27
  demo.launch()