SkalskiP commited on
Commit
d888127
·
1 Parent(s): bb81176
Files changed (1) hide show
  1. app.py +27 -21
app.py CHANGED
@@ -23,8 +23,8 @@ for taking it to the next level by enabling inpainting with the FLUX.
23
 
24
  MAX_SEED = np.iinfo(np.int32).max
25
  IMAGE_SIZE = 1024
26
- # DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
27
- DEVICE = torch.device("cuda")
28
 
29
  torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
30
  if torch.cuda.get_device_properties(0).major >= 8:
@@ -57,6 +57,12 @@ def resize_image_dimensions(
57
  return new_width, new_height
58
 
59
 
 
 
 
 
 
 
60
  @spaces.GPU(duration=150)
61
  @torch.inference_mode()
62
  @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
@@ -81,11 +87,11 @@ def process(
81
  gr.Info("Please upload an image.")
82
  return None, None
83
 
84
- if not mask and not segmentation_prompt_text:
85
  gr.Info("Please draw a mask or enter a segmentation prompt.")
86
  return None, None
87
 
88
- if mask and segmentation_prompt_text:
89
  gr.Info("Both mask and segmentation prompt are provided. Please provide only "
90
  "one.")
91
  return None, None
@@ -122,23 +128,23 @@ def process(
122
  mask = mask.resize((width, height), Image.LANCZOS)
123
  mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
124
 
125
- return image, mask
126
-
127
- # if randomize_seed_checkbox:
128
- # seed_slicer = random.randint(0, MAX_SEED)
129
- # generator = torch.Generator().manual_seed(seed_slicer)
130
- # result = FLUX_INPAINTING_PIPELINE(
131
- # prompt=inpainting_prompt_text,
132
- # image=image,
133
- # mask_image=mask,
134
- # width=width,
135
- # height=height,
136
- # strength=strength_slider,
137
- # generator=generator,
138
- # num_inference_steps=num_inference_steps_slider
139
- # ).images[0]
140
- # print('INFERENCE DONE')
141
- # return result, mask
142
 
143
 
144
  with gr.Blocks() as demo:
 
23
 
24
  MAX_SEED = np.iinfo(np.int32).max
25
  IMAGE_SIZE = 1024
26
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
27
+ # DEVICE = torch.device("cpu")
28
 
29
  torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
30
  if torch.cuda.get_device_properties(0).major >= 8:
 
57
  return new_width, new_height
58
 
59
 
60
+ def is_image_empty(image: Image.Image) -> bool:
61
+ gray_img = image.convert("L")
62
+ pixels = list(gray_img.getdata())
63
+ return all(pixel == 0 for pixel in pixels)
64
+
65
+
66
  @spaces.GPU(duration=150)
67
  @torch.inference_mode()
68
  @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
 
87
  gr.Info("Please upload an image.")
88
  return None, None
89
 
90
+ if is_image_empty(mask) and not segmentation_prompt_text:
91
  gr.Info("Please draw a mask or enter a segmentation prompt.")
92
  return None, None
93
 
94
+ if not is_image_empty(mask) and segmentation_prompt_text:
95
  gr.Info("Both mask and segmentation prompt are provided. Please provide only "
96
  "one.")
97
  return None, None
 
128
  mask = mask.resize((width, height), Image.LANCZOS)
129
  mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
130
 
131
+ # return image, mask
132
+
133
+ if randomize_seed_checkbox:
134
+ seed_slicer = random.randint(0, MAX_SEED)
135
+ generator = torch.Generator().manual_seed(seed_slicer)
136
+ result = FLUX_INPAINTING_PIPELINE(
137
+ prompt=inpainting_prompt_text,
138
+ image=image,
139
+ mask_image=mask,
140
+ width=width,
141
+ height=height,
142
+ strength=strength_slider,
143
+ generator=generator,
144
+ num_inference_steps=num_inference_steps_slider
145
+ ).images[0]
146
+ print('INFERENCE DONE')
147
+ return result, mask
148
 
149
 
150
  with gr.Blocks() as demo: