el-el-san commited on
Commit
eadb51f
·
verified ·
1 Parent(s): 55f1281

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -21
app.py CHANGED
@@ -39,36 +39,50 @@ MAX_IMAGE_SIZE = 1216
39
 
40
 
41
  @spaces.GPU
42
- def infer(image: PIL.Image.Image, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps) -> PIL.Image.Image:
 
 
 
 
 
 
 
 
 
 
43
 
44
- width, height = image['composite'].size
45
- ratio = np.sqrt(1024. * 1024. / (width * height))
46
- new_width, new_height = int(width * ratio), int(height * ratio)
47
- image = image['composite'].resize((new_width, new_height))
48
-
49
- print(image)
50
-
51
  if randomize_seed:
52
  seed = random.randint(0, MAX_SEED)
53
 
54
- controlnet_img = image
55
-
56
  generator = torch.Generator().manual_seed(seed)
57
 
58
- output_image = pipe(
59
- prompt=prompt + ", masterpiece, best quality, very aesthetic, absurdres",
60
- negative_prompt=negative_prompt,
61
- image=image,
62
- controlnet_conditioning_scale=1.0,
63
- guidance_scale=guidance_scale,
64
- num_inference_steps=num_inference_steps,
65
- width=new_width,
66
- height=new_height,
67
- generator=generator
68
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  return output_image
71
 
 
72
  css = """
73
  #col-container {
74
  margin: 0 auto;
 
39
 
40
 
41
  @spaces.GPU
42
+ def infer(image, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps) -> PIL.Image.Image:
43
+ # Check if the input image is a valid PIL Image and is not empty
44
+ if isinstance(image, PIL.Image.Image) and image.size != (0, 0):
45
+ use_image = True
46
+ width, height = image.size
47
+ ratio = np.sqrt(1024. * 1024. / (width * height))
48
+ new_width, new_height = int(width * ratio), int(height * ratio)
49
+ image = image.resize((new_width, new_height))
50
+ print(image)
51
+ else:
52
+ use_image = False
53
 
 
 
 
 
 
 
 
54
  if randomize_seed:
55
  seed = random.randint(0, MAX_SEED)
56
 
 
 
57
  generator = torch.Generator().manual_seed(seed)
58
 
59
+ if use_image:
60
+ output_image = pipe(
61
+ prompt=prompt + ", masterpiece, best quality, very aesthetic, absurdres",
62
+ negative_prompt=negative_prompt,
63
+ image=image,
64
+ controlnet_conditioning_scale=1.0,
65
+ guidance_scale=guidance_scale,
66
+ num_inference_steps=num_inference_steps,
67
+ width=new_width,
68
+ height=new_height,
69
+ generator=generator
70
+ ).images[0]
71
+ else:
72
+ # If no valid image is provided, generate an image based only on the text prompt
73
+ output_image = pipe(
74
+ prompt=prompt,
75
+ negative_prompt=negative_prompt,
76
+ guidance_scale=guidance_scale,
77
+ num_inference_steps=num_inference_steps,
78
+ width=width,
79
+ height=height,
80
+ generator=generator
81
+ ).images[0]
82
 
83
  return output_image
84
 
85
+
86
  css = """
87
  #col-container {
88
  margin: 0 auto;