el-el-san commited on
Commit
1bc9f68
1 Parent(s): f48b076

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -58
app.py CHANGED
@@ -3,33 +3,16 @@ import numpy as np
3
  import PIL.Image
4
  from PIL import Image
5
  import random
6
- from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, StableDiffusionXLPipeline, AutoencoderKL
7
  from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
8
- #from diffusers.utils import load_image
9
-
10
  import cv2
11
  import torch
12
  import spaces
13
 
14
-
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
 
17
- controlnet = ControlNetModel.from_pretrained(
18
- #"2vXpSwA7/test_controlnet2/CN-anytest_v4-marged_am_dim256.safetensors",
19
- "xinsir/controlnet-scribble-sdxl-1.0",
20
- torch_dtype=torch.float16
21
- #from_tf=False,
22
- #variant="safetensors"
23
- )
24
-
25
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
26
 
27
- #pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
28
- # "yodayo-ai/holodayo-xl-2.1",
29
- # controlnet=controlnet,
30
- # vae=vae,
31
- # torch_dtype=torch.float16,
32
- #)
33
  pipe = StableDiffusionXLPipeline.from_pretrained(
34
  "yodayo-ai/holodayo-xl-2.1",
35
  vae=vae,
@@ -44,48 +27,22 @@ MAX_IMAGE_SIZE = 1216
44
 
45
 
46
  @spaces.GPU
47
- #def infer(use_image, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, image: PIL.Image.Image = None) -> PIL.Image.Image:
48
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
49
- # Check if the input image is a valid PIL Image and is not empty
50
- use_image = False
51
- #image = None
52
-
53
- #if use_image :# and image is not None :
54
- # width, height = image['composite'].size
55
- # ratio = np.sqrt(1024. * 1024. / (width * height))
56
- # new_width, new_height = int(width * ratio), int(height * ratio)
57
- # image = image['composite'].resize((new_width, new_height))
58
- # print(image)
59
 
60
  if randomize_seed:
61
  seed = random.randint(0, MAX_SEED)
62
 
63
  generator = torch.Generator().manual_seed(seed)
64
 
65
- if use_image:
66
- print("use_image")
67
- #output_image = pipe(
68
- # prompt=prompt + ", masterpiece, best quality, very aesthetic, absurdres",
69
- # negative_prompt=negative_prompt,
70
- # image=image,
71
- # controlnet_conditioning_scale=1.0,
72
- # guidance_scale=guidance_scale,
73
- # num_inference_steps=num_inference_steps,
74
- # width=new_width,
75
- # height=new_height,
76
- # generator=generator
77
- #).images[0]
78
- else:
79
- # If no valid image is provided, generate an image based only on the text prompt
80
- output_image = pipe(
81
- prompt=prompt,
82
- negative_prompt=negative_prompt,
83
- guidance_scale=guidance_scale,
84
- num_inference_steps=num_inference_steps,
85
- width=width,
86
- height=height,
87
- generator=generator
88
- ).images[0]
89
 
90
  return output_image
91
 
@@ -116,10 +73,7 @@ with gr.Blocks(css=css) as demo:
116
 
117
  run_button = gr.Button("Run", scale=0)
118
 
119
- #image = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512))
120
  result = gr.Image(label="Result", show_label=False)
121
-
122
- #use_image = gr.Checkbox(label="Use image", value=True)
123
 
124
  with gr.Accordion("Advanced Settings", open=False):
125
 
@@ -176,9 +130,8 @@ with gr.Blocks(css=css) as demo:
176
 
177
  run_button.click(#lambda x: None, inputs=None, outputs=result).then(
178
  fn=infer,
179
- #inputs=[use_image, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,image],
180
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
181
  outputs=[result]
182
  )
183
 
184
- demo.queue().launch(show_error=True)
 
3
  import PIL.Image
4
  from PIL import Image
5
  import random
6
+ from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
7
  from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
 
 
8
  import cv2
9
  import torch
10
  import spaces
11
 
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
 
 
 
 
 
 
 
 
14
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
15
 
 
 
 
 
 
 
16
  pipe = StableDiffusionXLPipeline.from_pretrained(
17
  "yodayo-ai/holodayo-xl-2.1",
18
  vae=vae,
 
27
 
28
 
29
  @spaces.GPU
 
30
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
 
 
 
 
 
 
 
 
 
31
 
32
  if randomize_seed:
33
  seed = random.randint(0, MAX_SEED)
34
 
35
  generator = torch.Generator().manual_seed(seed)
36
 
37
+ output_image = pipe(
38
+ prompt=prompt,
39
+ negative_prompt=negative_prompt,
40
+ guidance_scale=guidance_scale,
41
+ num_inference_steps=num_inference_steps,
42
+ width=width,
43
+ height=height,
44
+ generator=generator
45
+ ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  return output_image
48
 
 
73
 
74
  run_button = gr.Button("Run", scale=0)
75
 
 
76
  result = gr.Image(label="Result", show_label=False)
 
 
77
 
78
  with gr.Accordion("Advanced Settings", open=False):
79
 
 
130
 
131
  run_button.click(#lambda x: None, inputs=None, outputs=result).then(
132
  fn=infer,
 
133
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
134
  outputs=[result]
135
  )
136
 
137
+ demo.queue().launch()