hatmanstack commited on
Commit
911f98c
1 Parent(s): fbc9e28

GuardRails

Browse files
Files changed (4) hide show
  1. app.py +30 -7
  2. functions.py +27 -3
  3. generate.py +110 -7
  4. requirements.txt +2 -1
app.py CHANGED
@@ -44,10 +44,11 @@ with gr.Blocks() as demo:
44
  """)
45
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
46
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
 
47
  output = gr.Image()
48
  with gr.Accordion("Advanced Options", open=False):
49
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
50
- gr.Button("Generate").click(text_to_image, inputs=[prompt, negative_text, height, width, quality, cfg_scale, seed], outputs=output)
51
 
52
  with gr.Tab("Inpainting"):
53
  with gr.Column():
@@ -65,11 +66,12 @@ with gr.Blocks() as demo:
65
  mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
66
  with gr.Accordion("Mask Image", open=False):
67
  mask_image = gr.Image(type='pil', label="Mask Image")
 
68
  output = gr.Image()
69
  with gr.Accordion("Advanced Options", open=False):
70
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
71
 
72
- gr.Button("Generate").click(inpainting, inputs=[image, mask_prompt, mask_image, prompt, negative_text, height, width, quality, cfg_scale, seed], outputs=output)
73
 
74
  with gr.Tab("Outpainting"):
75
  with gr.Column():
@@ -88,12 +90,13 @@ with gr.Blocks() as demo:
88
  mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
89
  with gr.Accordion("Mask Image", open=False):
90
  mask_image = gr.Image(type='pil', label="Mask Image")
 
91
  output = gr.Image()
92
  with gr.Accordion("Advanced Options", open=False):
93
  outpainting_mode = gr.Radio(choices=["DEFAULT", "PRECISE"], value="DEFAULT", label="Outpainting Mode")
94
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
95
 
96
- gr.Button("Generate").click(outpainting, inputs=[image, mask_prompt, mask_image, prompt, negative_text, outpainting_mode, height, width, quality, cfg_scale, seed], outputs=output)
97
 
98
  with gr.Tab("Image Variation"):
99
  with gr.Column():
@@ -106,12 +109,13 @@ with gr.Blocks() as demo:
106
  with gr.Accordion("Optional Prompt", open=False):
107
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
108
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
 
109
  output = gr.Image()
110
  with gr.Accordion("Advanced Options", open=False):
111
  similarity_strength = gr.Slider(minimum=0.2, maximum=1.0, step=0.1, value=0.7, label="Similarity Strength")
112
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
113
 
114
- gr.Button("Generate").click(image_variation, inputs=[images, prompt, negative_text, similarity_strength, height, width, quality, cfg_scale, seed], outputs=output)
115
 
116
  with gr.Tab("Image Conditioning"):
117
  with gr.Column():
@@ -125,12 +129,13 @@ with gr.Blocks() as demo:
125
  condition_image = gr.Image(type='pil', label="Condition Image")
126
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
127
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
 
128
  output = gr.Image()
129
  with gr.Accordion("Advanced Options", open=False):
130
  control_mode = gr.Radio(choices=["CANNY_EDGE", "SEGMENTATION"], value="CANNY_EDGE", label="Control Mode")
131
  control_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Control Strength")
132
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
133
- gr.Button("Generate").click(image_conditioning, inputs=[condition_image, prompt, negative_text, control_mode, control_strength, height, width, quality, cfg_scale, seed], outputs=output)
134
 
135
  with gr.Tab("Color Guided Content"):
136
  with gr.Column():
@@ -145,10 +150,11 @@ with gr.Blocks() as demo:
145
  with gr.Accordion("Optional Prompt", open=False):
146
  prompt = gr.Textbox(label="Text", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
147
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
 
148
  output = gr.Image()
149
  with gr.Accordion("Advanced Options", open=False):
150
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
151
- gr.Button("Generate").click(color_guided_content, inputs=[prompt, reference_image, negative_text, colors, height, width, quality, cfg_scale, seed], outputs=output)
152
 
153
  with gr.Tab("Background Removal"):
154
  with gr.Column():
@@ -158,13 +164,30 @@ with gr.Blocks() as demo:
158
  </div>
159
  """)
160
  image = gr.Image(type='pil', label="Input Image")
 
161
  output = gr.Image()
162
- gr.Button("Generate").click(background_removal, inputs=image, outputs=output)
163
 
164
  with gr.Accordion("Tips", open=False):
165
  gr.Markdown("On Inference Speed: Resolution (width/height), and quality all have an impact on Inference Speed.")
166
  gr.Markdown("On Negation: For example, consider the prompt \"a rainy city street at night with no people\". The model might interpret \"people\" as a directive of what to include instead of omit. To generate better results, you could use the prompt \"a rainy city street at night\" with a negative prompt \"people\".")
167
  gr.Markdown("On Prompt Length: When diffusion models were first introduced, they could process only 77 tokens. While new techniques have extended this limit, they remain bound by their training data. AWS Nova Canvas limits input by character length instead, ensuring no characters beyond the set limit are considered in the generated model.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  if __name__ == "__main__":
169
  demo.launch()
170
 
 
44
  """)
45
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
46
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
47
+ error_box = gr.Markdown(visible=False, label="Error")
48
  output = gr.Image()
49
  with gr.Accordion("Advanced Options", open=False):
50
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
51
+ gr.Button("Generate").click(text_to_image, inputs=[prompt, negative_text, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
52
 
53
  with gr.Tab("Inpainting"):
54
  with gr.Column():
 
66
  mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
67
  with gr.Accordion("Mask Image", open=False):
68
  mask_image = gr.Image(type='pil', label="Mask Image")
69
+ error_box = gr.Markdown(visible=False, label="Error")
70
  output = gr.Image()
71
  with gr.Accordion("Advanced Options", open=False):
72
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
73
 
74
+ gr.Button("Generate").click(inpainting, inputs=[image, mask_prompt, mask_image, prompt, negative_text, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
75
 
76
  with gr.Tab("Outpainting"):
77
  with gr.Column():
 
90
  mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
91
  with gr.Accordion("Mask Image", open=False):
92
  mask_image = gr.Image(type='pil', label="Mask Image")
93
+ error_box = gr.Markdown(visible=False, label="Error")
94
  output = gr.Image()
95
  with gr.Accordion("Advanced Options", open=False):
96
  outpainting_mode = gr.Radio(choices=["DEFAULT", "PRECISE"], value="DEFAULT", label="Outpainting Mode")
97
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
98
 
99
+ gr.Button("Generate").click(outpainting, inputs=[image, mask_prompt, mask_image, prompt, negative_text, outpainting_mode, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
100
 
101
  with gr.Tab("Image Variation"):
102
  with gr.Column():
 
109
  with gr.Accordion("Optional Prompt", open=False):
110
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
111
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
112
+ error_box = gr.Markdown(visible=False, label="Error")
113
  output = gr.Image()
114
  with gr.Accordion("Advanced Options", open=False):
115
  similarity_strength = gr.Slider(minimum=0.2, maximum=1.0, step=0.1, value=0.7, label="Similarity Strength")
116
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
117
 
118
+ gr.Button("Generate").click(image_variation, inputs=[images, prompt, negative_text, similarity_strength, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
119
 
120
  with gr.Tab("Image Conditioning"):
121
  with gr.Column():
 
129
  condition_image = gr.Image(type='pil', label="Condition Image")
130
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
131
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
132
+ error_box = gr.Markdown(visible=False, label="Error")
133
  output = gr.Image()
134
  with gr.Accordion("Advanced Options", open=False):
135
  control_mode = gr.Radio(choices=["CANNY_EDGE", "SEGMENTATION"], value="CANNY_EDGE", label="Control Mode")
136
  control_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Control Strength")
137
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
138
+ gr.Button("Generate").click(image_conditioning, inputs=[condition_image, prompt, negative_text, control_mode, control_strength, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
139
 
140
  with gr.Tab("Color Guided Content"):
141
  with gr.Column():
 
150
  with gr.Accordion("Optional Prompt", open=False):
151
  prompt = gr.Textbox(label="Text", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
152
  gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
153
+ error_box = gr.Markdown(visible=False, label="Error")
154
  output = gr.Image()
155
  with gr.Accordion("Advanced Options", open=False):
156
  negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
157
+ gr.Button("Generate").click(color_guided_content, inputs=[prompt, reference_image, negative_text, colors, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
158
 
159
  with gr.Tab("Background Removal"):
160
  with gr.Column():
 
164
  </div>
165
  """)
166
  image = gr.Image(type='pil', label="Input Image")
167
+ error_box = gr.Markdown(visible=False, label="Error")
168
  output = gr.Image()
169
+ gr.Button("Generate").click(background_removal, inputs=image, outputs=[output, error_box])
170
 
171
  with gr.Accordion("Tips", open=False):
172
  gr.Markdown("On Inference Speed: Resolution (width/height), and quality all have an impact on Inference Speed.")
173
  gr.Markdown("On Negation: For example, consider the prompt \"a rainy city street at night with no people\". The model might interpret \"people\" as a directive of what to include instead of omit. To generate better results, you could use the prompt \"a rainy city street at night\" with a negative prompt \"people\".")
174
  gr.Markdown("On Prompt Length: When diffusion models were first introduced, they could process only 77 tokens. While new techniques have extended this limit, they remain bound by their training data. AWS Nova Canvas limits input by character length instead, ensuring no characters beyond the set limit are considered in the generated model.")
175
+
176
+ gr.Markdown("""
177
+ <div style="text-align: center;">
178
+ <h1>Sample Prompts and Results</h1>
179
+ </div>
180
+
181
+
182
+ | Example | Prompt |
183
+ |:-------:|:-------|
184
+ | <img src='examples/sample2.png' width='200'> | A whimsical outdoor scene where vibrant flowers and sprawling vines, crafted from an array of colorful fruit leathers and intricately designed candies, flutter with delicate, lifelike butterflies made from translucent, shimmering sweets. Each petal and leaf glistens with a soft, sugary sheen, casting playful reflections. The butterflies, with their candy wings adorned in fruity patterns, flit about, creating a magical, edible landscape that delights the senses. |
185
+ | <img src='examples/sample4.png' width='200'> | A rugged adventurer's ensemble, crafted for the wild, featuring a khaki jacket adorned with numerous functional pockets, a sun-bleached pith hat with a wide brim, sturdy canvas trousers with reinforced knees, and a pair of weathered leather boots with high-traction soles. Accented with a brass compass pendant and a leather utility belt laden with small tools, the outfit is completed by a pair of aviator sunglasses and a weathered map tucked into a side pocket. |
186
+
187
+ """)
188
+
189
+
190
+
191
  if __name__ == "__main__":
192
  demo.launch()
193
 
functions.py CHANGED
@@ -1,13 +1,21 @@
1
  import json
2
  import io
3
  import random
 
4
  from PIL import Image
5
  from generate import *
6
  from typing import Dict, Any
7
 
8
  def display_image(image_bytes):
9
- image = Image.open(io.BytesIO(image_bytes))
10
- return image
 
 
 
 
 
 
 
11
 
12
  def process_optional_params(**kwargs) -> Dict[str, Any]:
13
  return {k: v for k, v in kwargs.items() if v is not None}
@@ -60,7 +68,9 @@ def text_to_image(prompt, negative_text=None, height=1024, width=1024, quality="
60
 
61
  def inpainting(image, mask_prompt=None, mask_image=None, text=None, negative_text=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
62
  images = process_images(primary=image, secondary=None)
63
-
 
 
64
  # Prepare the inPaintingParams dictionary
65
  if mask_prompt and mask_image:
66
  raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
@@ -80,6 +90,9 @@ def inpainting(image, mask_prompt=None, mask_image=None, text=None, negative_tex
80
 
81
  def outpainting(image, mask_prompt=None, mask_image=None, text=None, negative_text=None, outpainting_mode="DEFAULT", height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
82
  images = process_images(primary=image, secondary=None)
 
 
 
83
 
84
  if mask_prompt and mask_image:
85
  raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
@@ -117,6 +130,9 @@ def image_variation(images, text=None, negative_text=None, similarity_strength=0
117
 
118
  def image_conditioning(condition_image, text, negative_text=None, control_mode="CANNY_EDGE", control_strength=0.7, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
119
  condition_image_encoded = process_images(primary=condition_image)
 
 
 
120
  # Prepare the textToImageParams dictionary
121
  text_to_image_params = {
122
  "text": text,
@@ -131,6 +147,10 @@ def image_conditioning(condition_image, text, negative_text=None, control_mode="
131
  def color_guided_content(text=None, reference_image=None, negative_text=None, colors=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
132
  # Encode the reference image if provided
133
  reference_image_encoded = process_images(primary=reference_image)
 
 
 
 
134
  if not colors:
135
  colors = "#FF5733,#33FF57,#3357FF,#FF33A1,#33FFF5,#FF8C33,#8C33FF,#33FF8C,#FF3333,#33A1FF"
136
 
@@ -146,6 +166,10 @@ def color_guided_content(text=None, reference_image=None, negative_text=None, co
146
 
147
  def background_removal(image):
148
  input_image = process_and_encode_image(image)
 
 
 
 
149
  body = json.dumps({
150
  "taskType": "BACKGROUND_REMOVAL",
151
  "backgroundRemovalParams": {"image": input_image}
 
1
  import json
2
  import io
3
  import random
4
+ import gradio as gr
5
  from PIL import Image
6
  from generate import *
7
  from typing import Dict, Any
8
 
9
  def display_image(image_bytes):
10
+ if isinstance(image_bytes, str):
11
+ # If we received a string (error message), return it to be displayed
12
+ return None, gr.update(visible=True, value=image_bytes)
13
+ elif image_bytes:
14
+ # If we received image bytes, process and display the image
15
+ return Image.open(io.BytesIO(image_bytes)), gr.update(visible=False)
16
+ else:
17
+ # Handle None case
18
+ return None, gr.update(visible=False)
19
 
20
  def process_optional_params(**kwargs) -> Dict[str, Any]:
21
  return {k: v for k, v in kwargs.items() if v is not None}
 
68
 
69
  def inpainting(image, mask_prompt=None, mask_image=None, text=None, negative_text=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
70
  images = process_images(primary=image, secondary=None)
71
+ for value in images.values():
72
+ if isinstance(value, str) and "Not Appropriate" in value:
73
+ return None, gr.update(visible=True, value="Image <b>Not Appropriate</b>")
74
  # Prepare the inPaintingParams dictionary
75
  if mask_prompt and mask_image:
76
  raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
 
90
 
91
  def outpainting(image, mask_prompt=None, mask_image=None, text=None, negative_text=None, outpainting_mode="DEFAULT", height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
92
  images = process_images(primary=image, secondary=None)
93
+ for value in images.values():
94
+ if isinstance(value, str) and "Not Appropriate" in value:
95
+ return None, gr.update(visible=True, value="Image <b>Not Appropriate</b>")
96
 
97
  if mask_prompt and mask_image:
98
  raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
 
130
 
131
  def image_conditioning(condition_image, text, negative_text=None, control_mode="CANNY_EDGE", control_strength=0.7, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
132
  condition_image_encoded = process_images(primary=condition_image)
133
+ for value in condition_image_encoded.values():
134
+ if isinstance(value, str) and "Not Appropriate" in value:
135
+ return None, gr.update(visible=True, value="Image <b>Not Appropriate</b>")
136
  # Prepare the textToImageParams dictionary
137
  text_to_image_params = {
138
  "text": text,
 
147
  def color_guided_content(text=None, reference_image=None, negative_text=None, colors=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
148
  # Encode the reference image if provided
149
  reference_image_encoded = process_images(primary=reference_image)
150
+ for value in reference_image_encoded.values():
151
+ if isinstance(value, str) and "Not Appropriate" in value:
152
+ return None, gr.update(visible=True, value="Image <b>Not Appropriate</b>")
153
+
154
  if not colors:
155
  colors = "#FF5733,#33FF57,#3357FF,#FF33A1,#33FFF5,#FF8C33,#8C33FF,#33FF8C,#FF3333,#33A1FF"
156
 
 
166
 
167
  def background_removal(image):
168
  input_image = process_and_encode_image(image)
169
+ for value in input_image.values():
170
+ if isinstance(value, str) and "Not Appropriate" in value:
171
+ return None, gr.update(visible=True, value="Image <b>Not Appropriate</b>")
172
+
173
  body = json.dumps({
174
  "taskType": "BACKGROUND_REMOVAL",
175
  "backgroundRemovalParams": {"image": input_image}
generate.py CHANGED
@@ -4,6 +4,8 @@ import boto3
4
  import json
5
  import logging
6
  import io
 
 
7
  from datetime import datetime
8
  from dotenv import load_dotenv
9
  from PIL import Image
@@ -46,6 +48,8 @@ config = ImageConfig()
46
  model_id = 'amazon.nova-canvas-v1:0'
47
  aws_id = os.getenv('AWS_ID')
48
  aws_secret = os.getenv('AWS_SECRET')
 
 
49
  nova_image_bucket='nova-image-data'
50
  bucket_region='us-west-2'
51
 
@@ -59,6 +63,42 @@ class ImageProcessor:
59
  raise ValueError("Input image is required.")
60
  return Image.open(image) if not isinstance(image, Image.Image) else image
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def _convert_color_mode(self):
63
  """Handle color mode conversion."""
64
  if self.image.mode not in ('RGB', 'RGBA'):
@@ -106,6 +146,7 @@ class ImageProcessor:
106
  ._convert_color_mode()
107
  ._resize_for_pixels(max_pixels)
108
  ._ensure_dimensions(min_size, max_size)
 
109
  .encode())
110
 
111
  # Function to generate an image using Amazon Nova Canvas model
@@ -203,18 +244,80 @@ class BedrockClient:
203
 
204
  raise ImageError("Unexpected response format.")
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  def process_and_encode_image(image, **kwargs):
207
  """Process and encode image with default parameters."""
208
- return ImageProcessor(image).process(**kwargs)
 
 
 
 
209
 
210
  def generate_image(body):
211
  """Generate image using Bedrock service."""
212
- client = BedrockClient(
213
- aws_id=os.getenv('AWS_ID'),
214
- aws_secret=os.getenv('AWS_SECRET'),
215
- model_id='amazon.nova-canvas-v1:0'
216
- )
217
- return client.generate_image(body)
 
 
 
 
 
218
 
219
  def generate_prompt(body):
220
  client = BedrockClient(
 
4
  import json
5
  import logging
6
  import io
7
+ import time
8
+ import requests
9
  from datetime import datetime
10
  from dotenv import load_dotenv
11
  from PIL import Image
 
48
  model_id = 'amazon.nova-canvas-v1:0'
49
  aws_id = os.getenv('AWS_ID')
50
  aws_secret = os.getenv('AWS_SECRET')
51
+ token = os.environ.get("HF_TOKEN")
52
+ headers = {"Authorization": f"Bearer {token}", "x-use-cache": "0", 'Content-Type': 'application/json'}
53
  nova_image_bucket='nova-image-data'
54
  bucket_region='us-west-2'
55
 
 
63
  raise ValueError("Input image is required.")
64
  return Image.open(image) if not isinstance(image, Image.Image) else image
65
 
66
+ def _check_nsfw(self, attempts=1):
67
+ """Check if image is NSFW using Hugging Face API."""
68
+ try:
69
+ # Save current image temporarily
70
+ temp_buffer = io.BytesIO()
71
+ self.image.save(temp_buffer, format='PNG')
72
+ temp_buffer.seek(0)
73
+
74
+ API_URL = "https://api-inference.huggingface.co/models/Falconsai/nsfw_image_detection"
75
+ response = requests.request("POST", API_URL, headers=headers, data=temp_buffer.getvalue())
76
+ decoded_response = response.content.decode("utf-8")
77
+
78
+ json_response = json.loads(decoded_response)
79
+
80
+ if "error" in json_response:
81
+ time.sleep(json_response["estimated_time"])
82
+ return self._check_nsfw(attempts+1)
83
+
84
+ scores = {item['label']: item['score'] for item in json_response}
85
+ nsfw_score = scores.get('nsfw', 0)
86
+ print(f"NSFW Score: {nsfw_score}")
87
+
88
+ if nsfw_score > 0.1:
89
+ raise ImageError("Image <b>Not Appropriate</b>")
90
+
91
+ return self
92
+
93
+ except json.JSONDecodeError as e:
94
+ print(f'JSON Decoding Error: {e}')
95
+ raise ImageError("NSFW check failed")
96
+ except Exception as e:
97
+ print(f'NSFW Check Error: {e}')
98
+ if attempts > 30:
99
+ raise ImageError("NSFW check failed after multiple attempts")
100
+ return self._check_nsfw(attempts+1)
101
+
102
  def _convert_color_mode(self):
103
  """Handle color mode conversion."""
104
  if self.image.mode not in ('RGB', 'RGBA'):
 
146
  ._convert_color_mode()
147
  ._resize_for_pixels(max_pixels)
148
  ._ensure_dimensions(min_size, max_size)
149
+ ._check_nsfw() # Add NSFW check before encoding
150
  .encode())
151
 
152
  # Function to generate an image using Amazon Nova Canvas model
 
244
 
245
  raise ImageError("Unexpected response format.")
246
 
247
+ def check_rate_limit(body):
248
+ body = json.loads(body)
249
+ quality = body.get('imageGenerationConfig', {}).get('quality', 'standard')
250
+
251
+ s3_client = boto3.client(
252
+ service_name='s3',
253
+ aws_access_key_id=os.getenv('AWS_ID'),
254
+ aws_secret_access_key=os.getenv('AWS_SECRET'),
255
+ region_name=bucket_region
256
+ )
257
+
258
+ try:
259
+ # Get current rate limit data
260
+ response = s3_client.get_object(
261
+ Bucket=nova_image_bucket,
262
+ Key='rate-limit/jsonData.json'
263
+ )
264
+ rate_data = json.loads(response['Body'].read().decode('utf-8'))
265
+ except ClientError as e:
266
+ if e.response['Error']['Code'] == 'NoSuchKey':
267
+ # Initialize if file doesn't exist
268
+ rate_data = {'premium': [], 'standard': []}
269
+ else:
270
+ raise ImageError(f"Failed to check rate limit: {str(e)}")
271
+
272
+ # Get current timestamp
273
+ current_time = datetime.now().timestamp()
274
+ # Keep only requests from last minute
275
+ twenty_minutes_ago = current_time - 1200
276
+
277
+ # Clean up old entries
278
+ rate_data['premium'] = [t for t in rate_data['premium'] if t > twenty_minutes_ago]
279
+ rate_data['standard'] = [t for t in rate_data['standard'] if t > twenty_minutes_ago]
280
+
281
+ # Check limits based on quality
282
+ if quality == 'premium':
283
+ if len(rate_data['premium']) >= 2:
284
+ raise ImageError("<div style='text-align: center;'>Premium rate limit exceeded. Check back later or you use the <a href='https://docs.aws.amazon.com/bedrock/latest/userguide/playgrounds.html'>Bedrock Playground</a>.</div>")
285
+ rate_data['premium'].append(current_time)
286
+ else: # standard
287
+ if len(rate_data['standard']) >= 4:
288
+ raise ImageError("<div style='text-align: center;'>Standard rate limit exceeded. Check back later or you use the <a href='https://docs.aws.amazon.com/bedrock/latest/userguide/playgrounds.html'>Bedrock Playground</a>.</div>")
289
+ rate_data['standard'].append(current_time)
290
+
291
+ # Update rate limit file
292
+ s3_client.put_object(
293
+ Bucket=nova_image_bucket,
294
+ Key='rate-limit/jsonData.json',
295
+ Body=json.dumps(rate_data),
296
+ ContentType='application/json'
297
+ )
298
+
299
+
300
  def process_and_encode_image(image, **kwargs):
301
  """Process and encode image with default parameters."""
302
+ try:
303
+ image = ImageProcessor(image).process(**kwargs)
304
+ return image
305
+ except ImageError as e:
306
+ return str(e)
307
 
308
  def generate_image(body):
309
  """Generate image using Bedrock service."""
310
+ try:
311
+ check_rate_limit(body)
312
+ client = BedrockClient(
313
+ aws_id=os.getenv('AWS_ID'),
314
+ aws_secret=os.getenv('AWS_SECRET'),
315
+ model_id='amazon.nova-canvas-v1:0'
316
+ )
317
+ return client.generate_image(body)
318
+ except ImageError as e:
319
+ return str(e)
320
+
321
 
322
  def generate_prompt(body):
323
  client = BedrockClient(
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  Pillow
2
  boto3
3
  python-dotenv
4
- gradio
 
 
1
  Pillow
2
  boto3
3
  python-dotenv
4
+ gradio
5
+ requests