Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,9 @@ login(HUGGINGFACE_TOKEN)
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
20 |
|
|
|
|
|
|
|
21 |
|
22 |
# Initialize Flask app
|
23 |
app = Flask(__name__)
|
@@ -34,10 +37,6 @@ def generate_audio():
|
|
34 |
return jsonify({"error": "Missing prompt parameter"}), 400
|
35 |
|
36 |
try:
|
37 |
-
|
38 |
-
# Load the StableAudio model from Hugging Face Hub
|
39 |
-
pipe = StableAudioPipeline.from_pretrained("stabilityai/stable-audio-open-1.0", torch_dtype=torch_dtype)
|
40 |
-
pipe = pipe.to(device)
|
41 |
|
42 |
# Generate the audio using StableAudioPipeline
|
43 |
generator = torch.Generator(device)
|
@@ -47,7 +46,7 @@ def generate_audio():
|
|
47 |
prompt=prompt,
|
48 |
negative_prompt='Low Quality',
|
49 |
num_inference_steps=10, # Number of diffusion steps
|
50 |
-
guidance_scale=
|
51 |
audio_end_in_s=1,
|
52 |
num_waveforms_per_prompt=1,
|
53 |
generator=generator
|
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
20 |
|
21 |
+
# Load the StableAudio model from Hugging Face Hub
|
22 |
+
pipe = StableAudioPipeline.from_pretrained("stabilityai/stable-audio-open-1.0", torch_dtype=torch_dtype)
|
23 |
+
pipe = pipe.to(device)
|
24 |
|
25 |
# Initialize Flask app
|
26 |
app = Flask(__name__)
|
|
|
37 |
return jsonify({"error": "Missing prompt parameter"}), 400
|
38 |
|
39 |
try:
|
|
|
|
|
|
|
|
|
40 |
|
41 |
# Generate the audio using StableAudioPipeline
|
42 |
generator = torch.Generator(device)
|
|
|
46 |
prompt=prompt,
|
47 |
negative_prompt='Low Quality',
|
48 |
num_inference_steps=10, # Number of diffusion steps
|
49 |
+
guidance_scale=20.0,
|
50 |
audio_end_in_s=1,
|
51 |
num_waveforms_per_prompt=1,
|
52 |
generator=generator
|