vilarin commited on
Commit
0916cce
β€’
1 Parent(s): b3e3306

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -127
app.py CHANGED
@@ -1,21 +1,18 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusionXLPipeline, AutoencoderKL, KDPM2AncestralDiscreteScheduler, UNet2DConditionModel
4
  from huggingface_hub import hf_hub_download
5
  import spaces
6
- from PIL import Image
7
- import requests
8
  from translatepy import Translator
9
  import numpy as np
10
  import random
11
-
12
 
13
  translator = Translator()
14
 
15
  # Constants
16
- model = "Corcelio/mobius"
17
- vae_model = "madebyollin/sdxl-vae-fp16-fix"
18
- MAX_SEED = np.iinfo(np.int32).max
19
 
20
  CSS = """
21
  .gradio-container {
@@ -41,10 +38,9 @@ vae = AutoencoderKL.from_pretrained(
41
 
42
  # Ensure model and scheduler are initialized in GPU-enabled function
43
  if torch.cuda.is_available():
44
- unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16).to("cuda")
45
- pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, unet=unet, torch_dtype=torch.float16).to("cuda")
46
-
47
- pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
48
 
49
 
50
  # Function
@@ -52,131 +48,51 @@ pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.conf
52
  def generate_image(
53
  prompt,
54
  negative="low quality",
55
- width=1024,
56
- height=1024,
57
- seed: int = -1,
58
- nums=1,
59
- scale=1.5,
60
- steps=30,
61
- clip=3):
62
 
63
- if seed == -1:
64
- seed = random.randint(0, MAX_SEED)
65
- seed = int(seed)
66
- generator = torch.Generator().manual_seed(seed)
67
 
68
  prompt = str(translator.translate(prompt, 'English'))
69
 
70
  print(f'prompt:{prompt}')
71
 
72
-
73
- image = pipe(
74
  prompt,
75
  negative_prompt=negative,
76
- width=width,
77
- height=height,
78
- guidance_scale=scale,
79
- generator = generator,
80
- num_inference_steps=steps,
81
- num_images_per_prompt=nums,
82
- clip_skip=clip,
83
- ).images
84
- return image, seed
85
-
86
-
87
- examples = [
88
- "a cat eating a piece of cheese",
89
- "a ROBOT riding a BLUE horse on Mars, photorealistic",
90
- "Ironman VS Hulk, ultrarealistic",
91
- "a CUTE robot artist painting on an easel",
92
- "Astronaut in a jungle, cold color palette, oil pastel, detailed, 8k",
93
- "An alien holding sign board contain word 'Flash', futuristic, neonpunk",
94
- "Kids going to school, Anime style"
95
- ]
96
 
 
 
 
 
 
 
 
97
 
98
  # Gradio Interface
99
 
100
- with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
101
- gr.HTML("<h1><center>MobiusπŸ’ </center></h1>")
102
- gr.HTML("<p><center><a href='https://huggingface.co/Corcelio/mobius'>mobius</a> text-to-image generation</center><br><center>Adding default prompts to enhance.</center></p>")
103
- with gr.Group():
104
- with gr.Row():
105
- prompt = gr.Textbox(label='Enter Your Prompt(Multi-Languages)', value="best quality, HD, aesthetic", scale=6)
106
- submit = gr.Button(scale=1, variant='primary')
107
- img = gr.Gallery(label='Mobius Generated Image',columns = 1, preview=True)
108
- with gr.Accordion("Advanced Options", open=False):
109
- with gr.Row():
110
- negative = gr.Textbox(label="Negative prompt", value="low quality, ugly, blurry, poor face, bad anatomy")
111
- with gr.Row():
112
- width = gr.Slider(
113
- label="Width",
114
- minimum=512,
115
- maximum=1280,
116
- step=8,
117
- value=1024,
118
- )
119
- height = gr.Slider(
120
- label="Height",
121
- minimum=512,
122
- maximum=1280,
123
- step=8,
124
- value=1024,
125
- )
126
- with gr.Row():
127
- seed = gr.Slider(
128
- label="Seed (-1 Get Random)",
129
- minimum=-1,
130
- maximum=MAX_SEED,
131
- step=1,
132
- value=-1,
133
- scale=2,
134
- )
135
- nums = gr.Slider(
136
- label="Image Numbers",
137
- minimum=1,
138
- maximum=4,
139
- step=1,
140
- value=1,
141
- scale=1,
142
- )
143
- with gr.Row():
144
- scale = gr.Slider(
145
- label="Guidance",
146
- minimum=3.5,
147
- maximum=7,
148
- step=0.1,
149
- value=7,
150
- )
151
- steps = gr.Slider(
152
- label="Steps",
153
- minimum=1,
154
- maximum=50,
155
- step=1,
156
- value=30,
157
- )
158
- clip = gr.Slider(
159
- label="Clip Skip",
160
- minimum=1,
161
- maximum=10,
162
- step=1,
163
- value=3,
164
- )
165
- gr.Examples(
166
- examples=examples,
167
- inputs=prompt,
168
- outputs=[img, seed],
169
- fn=generate_image,
170
- cache_examples="lazy",
171
- )
172
-
173
- prompt.submit(fn=generate_image,
174
- inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
175
- outputs=[img, seed],
176
- )
177
- submit.click(fn=generate_image,
178
- inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
179
- outputs=[img, seed],
180
- )
181
-
182
- demo.queue().launch()
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableAudioPipeline
4
  from huggingface_hub import hf_hub_download
5
  import spaces
 
 
6
  from translatepy import Translator
7
  import numpy as np
8
  import random
9
+ import soundfile as sf
10
 
11
  translator = Translator()
12
 
13
  # Constants
14
+ model = "stabilityai/stable-audio-open-1.0"
15
+ # MAX_SEED = np.iinfo(np.int32).max
 
16
 
17
  CSS = """
18
  .gradio-container {
 
38
 
39
  # Ensure model and scheduler are initialized in GPU-enabled function
40
  if torch.cuda.is_available():
41
+ pipe = StableAudioPipeline.from_pretrained(
42
+ model,
43
+ torch_dtype=torch.float16).to("cuda")
 
44
 
45
 
46
  # Function
 
48
  def generate_image(
49
  prompt,
50
  negative="low quality",
51
+ second: float = 10.0):
 
 
 
 
 
 
52
 
53
+ # if seed == -1:
54
+ # seed = random.randint(0, MAX_SEED)
55
+ # seed = int(seed)
56
+ # generator = torch.Generator().manual_seed(seed)
57
 
58
  prompt = str(translator.translate(prompt, 'English'))
59
 
60
  print(f'prompt:{prompt}')
61
 
62
+ audio = pipe(
 
63
  prompt,
64
  negative_prompt=negative,
65
+ audio_end_in_s=second,
66
+ ).audios
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ os.makedirs("outputs", exist_ok=True)
69
+ base_count = len(glob(os.path.join("outputs", "*.mp4")))
70
+ audio_path = os.path.join("outputs", f"{base_count:06d}.wav")
71
+
72
+ sf.write(audio_path, audio[0].T.float().cpu().numpy(), pipe.vae.samping_rate)
73
+
74
+ return audio_path
75
 
76
  # Gradio Interface
77
 
78
+ with gr.Blocks(theme='soft', css=css, title="Stable Audio Open") as iface:
79
+ with gr.Accordion(""):
80
+ gr.Markdown(DESCRIPTION)
81
+ with gr.Row():
82
+ output = gr.Audio(label="Podcast", type="filepath", interactive=False, autoplay=True, elem_classes="audio") # Create an output textbox
83
+ with gr.Row():
84
+ prompt = gr.Textbox(label="Prompt", placeholder="1000 BPM percussive sound of water drops")
85
+ with gr.Row():
86
+ negative = gr.Textbox(label="Negative prompt", placeholder="Low quality")
87
+ second = gr.Slider(5.0, 60.0, value=10.0, label="Second", step=0.1),
88
+ with gr.Row():
89
+ submit_btn = gr.Button("πŸš€ Send") # Create a submit button
90
+ clear_btn = gr.ClearButton(output_box, value="πŸ—‘οΈ Clear") # Create a clear button
91
+
92
+ # Set up the event listeners
93
+ submit_btn.click(main, inputs=[prompt, negative, second], outputs=output)
94
+
95
+
96
+ #gr.close_all()
97
+
98
+ iface.queue().launch(show_api=False) # Launch the Gradio interface