File size: 12,773 Bytes
eaa9650
 
 
 
 
37c41a7
 
558b476
 
e53ff6c
0c46146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8086ba3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0351e9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8086ba3
0351e9f
 
0c46146
558b476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282f3fe
558b476
 
 
 
 
 
 
 
 
282f3fe
558b476
 
 
fc9e96b
282f3fe
eaa9650
 
 
 
d8389e8
37c41a7
3f6c118
 
 
b46eb2a
 
3f6c118
 
1b1621a
282f3fe
558b476
1b1621a
282f3fe
1b1621a
282f3fe
1b1621a
 
 
 
282f3fe
 
 
 
 
3f6c118
d8389e8
3f6c118
d8389e8
3f6c118
d8389e8
 
3f6c118
 
 
 
51d1244
3f6c118
 
d8389e8
3f6c118
d8389e8
 
3f6c118
 
d8389e8
3f6c118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8389e8
3f6c118
 
 
37c41a7
eaa9650
b15e4f3
0c86bfd
cdc2f90
 
 
0579504
cdc2f90
 
 
 
0c86bfd
7d01e69
 
 
 
 
e76ad14
7d01e69
 
 
 
0c86bfd
 
 
763f870
7d01e69
0c46146
7d01e69
0c46146
df60d8e
0c46146
 
7d01e69
 
 
 
 
 
 
0c86bfd
 
 
 
 
 
 
13d267c
0579504
0c86bfd
cdc2f90
dc229bf
 
0c86bfd
 
0579504
 
 
6782246
0579504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c86bfd
0579504
 
 
 
 
 
 
 
 
0c86bfd
763f870
0c86bfd
 
 
 
 
cdc2f90
d8389e8
cdc2f90
fe073ef
 
cdc2f90
 
2e14e78
eaa9650
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
import gradio as gr
from urllib.parse import urlparse
import requests
import time
import os
import re
from gradio_client import Client
import torch
from transformers import pipeline

safe_prompts = [
    "a person, dark blue suit, black tie, white pocket",
    "a person, light gray suit, navy tie, white shirt",
    "a person, beige blazer, white turtleneck, brown slacks",
    "a person, dark green jacket, patterned scarf, black trousers",
    "a person, formal black suit, red tie, white shirt",
    "a person, navy blazer, light blue shirt, no tie",
    "a person, charcoal suit, silver tie, matching vest",
    "a person, tan trench coat, plaid scarf, gloves",
    "a person, white shirt, navy vest, black pants",
    "a person, casual sweater, collared shirt, khaki pants",
    "a person, elegant tuxedo, bow tie, white gloves",
    "a person, dark brown suit, olive tie, pocket square",
    "a person, white t-shirt, denim jacket, black jeans",
    "a person, long red coat, black scarf, winter boots",
    "a person, floral summer dress, sun hat, sandals",
    "a person, athletic hoodie, running pants, sneakers",
    "a person, yellow raincoat, rubber boots, umbrella",
    "a person, black leather jacket, white t-shirt, dark jeans",
    "a person, pastel sweater, pleated skirt, ankle boots",
    "a person, traditional kimono, obi belt, white socks",
    "a person, business dress, black heels, pearl necklace",
    "a person, beige cardigan, striped shirt, blue jeans",
    "a person, safari jacket, khaki shorts, hiking boots",
    "a person, black sweater, long coat, gray trousers"
]

additional_safe_prompts = [
    "a person, white blouse, navy skirt, black flats",
    "a person, denim overalls, striped shirt, sneakers",
    "a person, green parka, wool scarf, winter boots",
    "a person, silk shirt, high-waisted trousers, loafers",
    "a person, traditional sari, gold bangles, sandals",
    "a person, lab coat, dress shirt, slacks",
    "a person, chef uniform, apron, white hat",
    "a person, flight attendant uniform, scarf, heels",
    "a person, construction vest, hard hat, work boots",
    "a person, navy sweater, corduroy pants, brown boots",
    "a person, red hoodie, jeans, running shoes",
    "a person, black tunic, leggings, sandals",
    "a person, plaid shirt, jeans, cowboy boots",
    "a person, white jumpsuit, belt, ankle boots",
    "a person, vintage dress, pearl earrings, pumps",
    "a person, medical scrubs, sneakers, ID badge",
    "a person, blue windbreaker, track pants, sneakers",
    "a person, bohemian blouse, long skirt, sandals",
    "a person, blazer, graphic tee, ripped jeans",
    "a person, puffer jacket, beanie, hiking shoes"
]

more_safe_prompts = [
    "a person, olive green jumpsuit, white sneakers, aviator sunglasses",
    "a person, oversized sweater, leggings, knee-high boots",
    "a person, red blazer, white blouse, black trousers",
    "a person, short-sleeve shirt, cargo pants, hiking boots",
    "a person, pinstripe suit, maroon tie, dress shoes",
    "a person, checkered dress, denim jacket, ankle boots",
    "a person, hooded parka, snow pants, winter gloves",
    "a person, silk scarf, long coat, leather gloves",
    "a person, floral print shirt, beige trousers, sandals",
    "a person, sports jersey, athletic shorts, running shoes",
    "a person, fisherman sweater, beanie, waterproof boots",
    "a person, vest, flannel shirt, hiking pants",
    "a person, polo shirt, chinos, loafers",
    "a person, elegant gown, clutch bag, heels",
    "a person, lightweight jacket, white jeans, sneakers",
    "a person, black hoodie, cargo pants, sneakers",
    "a person, striped sweater, denim skirt, tights",
    "a person, leather trench coat, turtleneck, boots",
    "a person, yellow sundress, straw hat, sandals",
    "a person, traditional hanbok, embroidered top, ribbon belt",
    "a person, navy cardigan, floral blouse, jeans",
    "a person, formal coat, scarf, dress shoes",
    "a person, sweater vest, dress shirt, pleated pants",
    "a person, short-sleeved dress, flats, handbag",
    "a person, zip-up fleece, track pants, sneakers",
    "a person, apron, collared shirt, khakis",
    "a person, cultural dashiki, fitted pants, sandals",
    "a person, business jacket, turtleneck, slacks",
    "a person, rain poncho, hiking pants, waterproof shoes",
    "a person, dark red blazer, black shirt, brown pants"
]

safe_prompts += additional_safe_prompts
safe_prompts += more_safe_prompts


pipe_safety = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")

agent_maker_sys = os.environ.get("SAFETY_PROMPT")

instruction = f"""
<|system|>
{agent_maker_sys}</s>
<|user|>
"""

def safety_check(user_prompt):
    
    prompt = f"{instruction.strip()}\n'{user_prompt}'</s>"   
    print(f"""

    USER PROMPT: {user_prompt}
    """)
    
    outputs = pipe_safety(prompt, max_new_tokens=256, do_sample=True, temperature=0.3, top_k=50, top_p=0.95)
    

    pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
    cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
    
    print(f"""

    SAFETY COUNCIL: {cleaned_text}
    
    """)
    
    return cleaned_text.lstrip("\n")


is_shared_ui = True if "fffiloni/consistent-character" in os.environ['SPACE_ID'] else False
    
from utils.gradio_helpers import parse_outputs, process_outputs

names = ['prompt', 'negative_prompt', 'subject', 'number_of_outputs', 'number_of_images_per_pose', 'randomise_poses', 'output_format', 'output_quality', 'seed']

def predict(request: gr.Request, *args, progress=gr.Progress(track_tqdm=True)):
    print(f"""
        —/n
        {args[0]}
        """)
    if args[0] == '' or args[0] is None:
        raise gr.Error(f"You forgot to provide a prompt.")
    
    try:
        if is_shared_ui:

            is_safe = safety_check(args[0])
            print(is_safe)
    
            match = re.search(r'\bYes\b', is_safe)
    
            if match:
                status = 'Yes'
            else:
                status = None
        else:
            status = None
    
        if status == "Yes" :
            raise gr.Error("Do not ask for such things.")
        else:
    
            headers = {'Content-Type': 'application/json'}
    
            payload = {"input": {}}
        
        
            base_url = "http://0.0.0.0:7860"
            for i, key in enumerate(names):
                value = args[i]
                if value and (os.path.exists(str(value))):
                    value = f"{base_url}/gradio_api/file=" + value
                if value is not None and value != "":
                    payload["input"][key] = value
    
            response = requests.post("http://0.0.0.0:5000/predictions", headers=headers, json=payload)
    
        
            if response.status_code == 201:
                follow_up_url = response.json()["urls"]["get"]
                response = requests.get(follow_up_url, headers=headers)
                while response.json()["status"] != "succeeded":
                    if response.json()["status"] == "failed":
                        raise gr.Error("The submission failed!")
                    response = requests.get(follow_up_url, headers=headers)
                    time.sleep(1)
            if response.status_code == 200:
                json_response = response.json()
                #If the output component is JSON return the entire output response 
                if(outputs[0].get_config()["name"] == "json"):
                    return json_response["output"]
                predict_outputs = parse_outputs(json_response["output"])
                processed_outputs = process_outputs(predict_outputs)        
                return tuple(processed_outputs) if len(processed_outputs) > 1 else processed_outputs[0]
            else:
                if(response.status_code == 409):
                    raise gr.Error(f"Sorry, the Cog image is still processing. Try again in a bit.")
                raise gr.Error(f"The submission failed! Error: {response.status_code}")

    except Exception as e:
        # Handle any other type of error
        raise gr.Error(f"An error occurred: {e}")

title = "Demo for consistent-character cog image by fofr"
description = "Create images of a given character in different poses • running cog image by fofr"

css="""
#col-container{
    margin: 0 auto;
    max-width: 1400px;
    text-align: left;
}
"""
with gr.Blocks(css=css) as app:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# Consistent Character Workflow")
        gr.Markdown("### Create images of a given character in different poses • running cog image by fofr")
        
        gr.HTML("""
        <div style="display:flex;column-gap:4px;">
            <a href="https://huggingface.co/spaces/fffiloni/consistent-character?duplicate=true">
				<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
			</a>
            <p> to skip the queue and use custom prompts
        </div>
        """)

        with gr.Row():
            with gr.Column(scale=2):
                if is_shared_ui:
                    prompt = gr.Dropdown(
                        label="Prompt", info='''Duplicate the space to you personal account for custom prompt''',
                        choices=safe_prompts,
                        value="a person, dark blue suit, black tie, white pocket",
                        interactive=True,
                        allow_custom_value=False
                    )
                else:
                    prompt = gr.Textbox(
                        label="Prompt", info='''Describe the subject. Include clothes and hairstyle for more consistency.''',
                        value="a person, darkblue suit, black tie, white pocket",
                        interactive=True
                    )
        
                subject = gr.Image(
                    label="Subject", type="filepath"
                )

                submit_btn = gr.Button("Submit")

                with gr.Accordion(label="Advanced Settings", open=False):
                    
                    negative_prompt = gr.Textbox(
                        label="Negative Prompt", info='''Things you do not want to see in your image''',
                        value="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
                        interactive=False if is_shared_ui else True
                    )

                    with gr.Row():

                        number_of_outputs = gr.Slider(
                            label="Number Of Outputs", info='''The number of images to generate.''', value=4,
                            minimum=1, maximum=4, step=1,
                        )
                        
                        number_of_images_per_pose = gr.Slider(
                            label="Number Of Images Per Pose", info='''The number of images to generate for each pose.''', value=1,
                            minimum=1, maximum=4, step=1,
                        )

                    with gr.Row():
                        
                        randomise_poses = gr.Checkbox(
                            label="Randomise Poses", info='''Randomise the poses used.''', value=True
                        )
                        
                        output_format = gr.Dropdown(
                            choices=['webp', 'jpg', 'png'], label="output_format", info='''Format of the output images''', value="webp"
                        )
                    
                    with gr.Row():
                        
                        output_quality = gr.Number(
                            label="Output Quality", info='''Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.''', value=80
                        )
                        
                        seed = gr.Number(
                            label="Seed", info='''Set a seed for reproducibility. Random by default.''', value=None
                        )

            with gr.Column(scale=3):
                consistent_results = gr.Gallery(label="Consistent Results")

    inputs = [prompt, negative_prompt, subject, number_of_outputs, number_of_images_per_pose, randomise_poses, output_format, output_quality, seed]
    outputs = [consistent_results]

    submit_btn.click(
        fn = predict,
        inputs = inputs,
        outputs = outputs,
        show_api = False
    )

app.queue(max_size=12, api_open=False).launch(share=False, show_api=False, show_error=True)