TheVeshup commited on
Commit
3741df5
·
verified ·
1 Parent(s): 72d303e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -161
app.py CHANGED
@@ -1,184 +1,34 @@
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
-
5
- import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/stable-diffusion-3.5-large" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- # Seed Handling
37
- if randomize_seed:
38
- seed = random.randint(0, MAX_SEED)
39
-
40
- generator = torch.Generator().manual_seed(seed)
41
-
42
- # Generate Image
43
- image = pipe(
44
- prompt=prompt,
45
- negative_prompt=negative_prompt,
46
- guidance_scale=guidance_scale,
47
- num_inference_steps=num_inference_steps,
48
- width=width,
49
- height=height,
50
- generator=generator,
51
- ).images[0]
52
-
53
- return image, seed
54
-
55
-
56
- examples = [
57
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
58
- "An astronaut riding a green horse",
59
- "A delicious ceviche cheesecake slice",
60
- ]
61
-
62
- css = """
63
- /* CSS Styling (remains unchanged from earlier examples) */
64
- """
65
-
66
- # Higher Defaults for Advanced Settings
67
- DEFAULT_STEPS = 50
68
- DEFAULT_GUIDANCE = 7.5
69
-
70
- with gr.Blocks(css=css) as demo:
71
- with gr.Column(elem_id="col-container"):
72
- gr.Markdown("<div id='header'><h1 id='title'>Veshon: Veshup's Image Generation AI</h1><p id='subtitle'>Create stunning images with just a prompt. Powered by cutting-edge AI technology.</p></div>")
73
-
74
- with gr.Row():
75
- prompt = gr.Text(
76
- label="Your Creative Prompt",
77
- show_label=False,
78
- max_lines=1,
79
- placeholder="Enter your prompt here...",
80
- container=False,
81
- )
82
-
83
- run_button = gr.Button("Generate Image", scale=0, variant="primary", elem_classes="gradio-button")
84
-
85
- result = gr.Image(label="Generated Image", show_label=False)
86
-
87
- with gr.Accordion("Advanced Settings", open=False):
88
- negative_prompt = gr.Text(
89
- label="Negative Prompt",
90
- max_lines=1,
91
- placeholder="Enter a negative prompt if needed",
92
- visible=False,
93
- )
94
-
95
- seed = gr.Slider(
96
- label="Seed",
97
- minimum=0,
98
- maximum=MAX_SEED,
99
- step=1,
100
- value=0,
101
- )
102
-
103
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
104
-
105
- with gr.Row():
106
- width = gr.Slider(
107
- label="Width",
108
- minimum=256,
109
- maximum=MAX_IMAGE_SIZE,
110
- step=32,
111
- value=768, # Higher default resolution
112
- )
113
-
114
- height = gr.Slider(
115
- label="Height",
116
- minimum=256,
117
- maximum=MAX_IMAGE_SIZE,
118
- step=32,
119
- value=768, # Higher default resolution
120
- )
121
-
122
- with gr.Row():
123
- guidance_scale = gr.Slider(
124
- label="Guidance Scale",
125
- minimum=0.0,
126
- maximum=15.0,
127
- step=0.1,
128
- value=DEFAULT_GUIDANCE, # Higher guidance by default
129
- )
130
-
131
- num_inference_steps = gr.Slider(
132
- label="Number of Inference Steps",
133
- minimum=1,
134
- maximum=150, # Increased maximum steps
135
- step=1,
136
- value=DEFAULT_STEPS, # Higher inference steps for quality
137
- )
138
-
139
- gr.Examples(examples=examples, inputs=[prompt])
140
- gr.on(
141
- triggers=[run_button.click, prompt.submit],
142
- fn=infer,
143
- inputs=[
144
- prompt,
145
- negative_prompt,
146
- seed,
147
- randomize_seed,
148
- width,
149
- height,
150
- guidance_scale,
151
- num_inference_steps,
152
- ],
153
- outputs=[result, seed],
154
- )
155
-
156
- if __name__ == "__main__":
157
- demo.launch()
158
- import gradio as gr
159
- import numpy as np
160
- import random
161
-
162
- import spaces #[uncomment to use ZeroGPU]
163
- from diffusers import DiffusionPipeline
164
- import torch
165
 
166
  device = "cuda" if torch.cuda.is_available() else "cpu"
167
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
168
 
169
  if torch.cuda.is_available():
170
  torch_dtype = torch.float16
171
  else:
172
  torch_dtype = torch.float32
173
 
174
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
 
175
  pipe = pipe.to(device)
176
 
177
  MAX_SEED = np.iinfo(np.int32).max
178
  MAX_IMAGE_SIZE = 1024
179
 
180
 
181
- @spaces.GPU #[uncomment to use ZeroGPU]
182
  def infer(
183
  prompt,
184
  negative_prompt,
 
1
+ import os
2
  import gradio as gr
3
  import numpy as np
4
  import random
5
+ import spaces # ZeroGPU integration
 
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
+ # Get Hugging Face token from environment variable
10
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None
11
+ if not HF_TOKEN:
12
+ raise ValueError("Hugging Face token not found. Please set the 'HF_TOKEN' environment variable.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
+ model_repo_id = "stabilityai/sdxl-turbo" # Replace with the model you would like to use
16
 
17
  if torch.cuda.is_available():
18
  torch_dtype = torch.float16
19
  else:
20
  torch_dtype = torch.float32
21
 
22
+ pipe = DiffusionPipeline.from_pretrained(
23
+ model_repo_id, torch_dtype=torch_dtype, use_auth_token=HF_TOKEN
24
+ )
25
  pipe = pipe.to(device)
26
 
27
  MAX_SEED = np.iinfo(np.int32).max
28
  MAX_IMAGE_SIZE = 1024
29
 
30
 
31
+ @spaces.GPU # ZeroGPU decorator
32
  def infer(
33
  prompt,
34
  negative_prompt,