bram-w commited on
Commit
c2305a3
1 Parent(s): 390d3d4

description

Browse files
Files changed (1) hide show
  1. app.py +39 -5
app.py CHANGED
@@ -69,17 +69,14 @@ def edict(x, source_text, edit_text,
69
  examples = [
70
  ['square_ims/american_gothic.jpg', 'A painting of two people frowning', 'A painting of two people smiling', 0.5, 3],
71
  ['square_ims/colloseum.jpg', 'An old ruined building', 'A new modern office building', 0.8, 3],
72
- ['square_ims/hf.png', 'Emoji', 'Emoji with a mustache', 0.8, 3],
73
  ]
74
 
75
- for dog_i in [1, 2]:
76
- for breed in ['Golden Retriever', 'Chihuahua', 'Dalmatian']:
77
- examples.append([f'square_ims/imagenet_dog_{dog_i}.jpg', 'A dog', f'A {breed}', 0.8, 3])
78
 
79
  examples.append(['square_ims/scream.jpg', 'A painting of someone screaming', 'A painting of an alien', 0.5, 3])
80
  examples.append(['square_ims/yosemite.jpg', 'Granite forest valley', 'Granite desert valley', 0.8, 3])
81
  examples.append(['square_ims/einstein.jpg', 'Mouth open', 'Mouth closed', 0.8, 3])
82
  examples.append(['square_ims/einstein.jpg', 'A man', 'A man in K.I.S.S. facepaint', 0.8, 3])
 
83
  examples.extend([
84
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A Chinese New Year cupcake', 0.8, 3],
85
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A Union Jack cupcake', 0.8, 3],
@@ -89,6 +86,40 @@ examples.extend([
89
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A hedgehog cupcake', 0.8, 3],
90
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A rose cupcake', 0.8, 3],
91
  ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  iface = gr.Interface(fn=edict, inputs=["image",
94
  gr.Textbox(label="Original Description"),
@@ -99,5 +130,8 @@ iface = gr.Interface(fn=edict, inputs=["image",
99
  gr.Slider(0, 10, value=3, step=0.5),
100
  ],
101
  examples = examples,
102
- outputs="image")
 
 
 
103
  iface.launch()
 
69
  examples = [
70
  ['square_ims/american_gothic.jpg', 'A painting of two people frowning', 'A painting of two people smiling', 0.5, 3],
71
  ['square_ims/colloseum.jpg', 'An old ruined building', 'A new modern office building', 0.8, 3],
 
72
  ]
73
 
 
 
 
74
 
75
  examples.append(['square_ims/scream.jpg', 'A painting of someone screaming', 'A painting of an alien', 0.5, 3])
76
  examples.append(['square_ims/yosemite.jpg', 'Granite forest valley', 'Granite desert valley', 0.8, 3])
77
  examples.append(['square_ims/einstein.jpg', 'Mouth open', 'Mouth closed', 0.8, 3])
78
  examples.append(['square_ims/einstein.jpg', 'A man', 'A man in K.I.S.S. facepaint', 0.8, 3])
79
+ """
80
  examples.extend([
81
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A Chinese New Year cupcake', 0.8, 3],
82
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A Union Jack cupcake', 0.8, 3],
 
86
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A hedgehog cupcake', 0.8, 3],
87
  ['square_ims/imagenet_cake_2.jpg', 'A cupcake', 'A rose cupcake', 0.8, 3],
88
  ])
89
+ """
90
+
91
+ for dog_i in [1, 2]:
92
+ for breed in ['Golden Retriever', 'Chihuahua', 'Dalmatian']:
93
+ examples.append([f'square_ims/imagenet_dog_{dog_i}.jpg', 'A dog', f'A {breed}', 0.8, 3])
94
+
95
+
96
+ description = 'A gradio demo for [EDICT](https://arxiv.org/abs/2211.12446, CVPR23)'
97
+ description = gr.Markdown(description)
98
+
99
+ article = """
100
+
101
+ ### Prompting Style
102
+
103
+ As with many text-to-image methods, the prompting style of EDICT can make a big difference. When in doubt, experiment! Some guidance that we've found:
104
+ * Parallel the Original and Edit description construction as much as possible. Inserting/editing single words often is enough to affect a change while maintaining a lot of the original structure
105
+ * Words that will affect the entire setting (e.g. "A photo of " vs. "A painting of") can make a big difference. Playing around with them can help a lot
106
+
107
+ ### Parameters
108
+ Both `edit_strength` and `guidance_scale` have similar properties qualitatively: the higher the value the more the image will change. We suggest
109
+ * Increasing/decreasing `edit_strength` first, particularly to alter/preserve more of the original structure/content
110
+ * Then changing `guidance_scale` to make the change in the edited region more or less pronounced.
111
+
112
+ Usually we find changing `edit_strength` to be enough, but feel free to play around (and report any interesting results)!
113
+
114
+
115
+ Having difficulty coming up with a caption? Try [BLIP](https://huggingface.co/spaces/Salesforce/BLIP2) to automatically generate one!
116
+
117
+ As with most StableDiffusion approaches, faces/text are often problematic to render, especially if they're small. Having these in the foreground will help keep them cleaner.
118
+
119
+ A returned black image means that the [Safety Checker](https://huggingface.co/CompVis/stable-diffusion-safety-checker) triggered on the photo. This happens in odd cases sometimes (it often rejects
120
+ the huggingface logo or variations), but we need to keep it in for obvious reasons.
121
+ """
122
+ article = gr.Markdown(description)
123
 
124
  iface = gr.Interface(fn=edict, inputs=["image",
125
  gr.Textbox(label="Original Description"),
 
130
  gr.Slider(0, 10, value=3, step=0.5),
131
  ],
132
  examples = examples,
133
+ outputs="image",
134
+ description=description
135
+ article=article,
136
+ cache_examples=False)
137
  iface.launch()