Cossale commited on
Commit
dc63c14
·
verified ·
1 Parent(s): 1766719

remove examples

Browse files
Files changed (1) hide show
  1. app.py +0 -235
app.py CHANGED
@@ -41,221 +41,6 @@ def generate_image(text, img1, img2, img3, height, width, guidance_scale, img_gu
41
  img = output[0]
42
  return img
43
 
44
-
45
-
46
- def get_example():
47
- case = [
48
- [
49
- "A curly-haired man in a red shirt is drinking tea.",
50
- None,
51
- None,
52
- None,
53
- 1024,
54
- 1024,
55
- 2.5,
56
- 1.6,
57
- 0,
58
- 1024,
59
- False,
60
- False,
61
- ],
62
- [
63
- "The woman in <img><|image_1|></img> waves her hand happily in the crowd",
64
- "./imgs/test_cases/zhang.png",
65
- None,
66
- None,
67
- 1024,
68
- 1024,
69
- 2.5,
70
- 1.9,
71
- 128,
72
- 1024,
73
- False,
74
- False,
75
- ],
76
- [
77
- "A man in a black shirt is reading a book. The man is the right man in <img><|image_1|></img>.",
78
- "./imgs/test_cases/two_man.jpg",
79
- None,
80
- None,
81
- 1024,
82
- 1024,
83
- 2.5,
84
- 1.6,
85
- 0,
86
- 1024,
87
- False,
88
- False,
89
- ],
90
- [
91
- "Two woman are raising fried chicken legs in a bar. A woman is <img><|image_1|></img>. Another woman is <img><|image_2|></img>.",
92
- "./imgs/test_cases/mckenna.jpg",
93
- "./imgs/test_cases/Amanda.jpg",
94
- None,
95
- 1024,
96
- 1024,
97
- 2.5,
98
- 1.8,
99
- 65,
100
- 1024,
101
- False,
102
- False,
103
- ],
104
- [
105
- "A man and a short-haired woman with a wrinkled face are standing in front of a bookshelf in a library. The man is the man in the middle of <img><|image_1|></img>, and the woman is oldest woman in <img><|image_2|></img>",
106
- "./imgs/test_cases/1.jpg",
107
- "./imgs/test_cases/2.jpg",
108
- None,
109
- 1024,
110
- 1024,
111
- 2.5,
112
- 1.6,
113
- 60,
114
- 1024,
115
- False,
116
- False,
117
- ],
118
- [
119
- "A man and a woman are sitting at a classroom desk. The man is the man with yellow hair in <img><|image_1|></img>. The woman is the woman on the left of <img><|image_2|></img>",
120
- "./imgs/test_cases/3.jpg",
121
- "./imgs/test_cases/4.jpg",
122
- None,
123
- 1024,
124
- 1024,
125
- 2.5,
126
- 1.8,
127
- 66,
128
- 1024,
129
- False,
130
- False,
131
- ],
132
- [
133
- "The flower <img><|image_1|></img> is placed in the vase which is in the middle of <img><|image_2|></img> on a wooden table of a living room",
134
- "./imgs/test_cases/rose.jpg",
135
- "./imgs/test_cases/vase.jpg",
136
- None,
137
- 1024,
138
- 1024,
139
- 2.5,
140
- 1.6,
141
- 0,
142
- 1024,
143
- False,
144
- False,
145
- ],
146
- [
147
- "<img><|image_1|><img>\n Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola.",
148
- "./imgs/demo_cases/t2i_woman_with_book.png",
149
- None,
150
- None,
151
- None,
152
- None,
153
- 2.5,
154
- 1.6,
155
- 222,
156
- 1024,
157
- False,
158
- True,
159
- ],
160
- [
161
- "Detect the skeleton of human in this image: <img><|image_1|></img>.",
162
- "./imgs/test_cases/control.jpg",
163
- None,
164
- None,
165
- 1024,
166
- 1024,
167
- 2.0,
168
- 1.6,
169
- 0,
170
- 1024,
171
- False,
172
- True,
173
- ],
174
- [
175
- "Generate a new photo using the following picture and text as conditions: <img><|image_1|><img>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.",
176
- "./imgs/demo_cases/skeletal.png",
177
- None,
178
- None,
179
- 1024,
180
- 1024,
181
- 2,
182
- 1.6,
183
- 999,
184
- 1024,
185
- False,
186
- True,
187
- ],
188
- [
189
- "Following the pose of this image <img><|image_1|><img>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.",
190
- "./imgs/demo_cases/edit.png",
191
- None,
192
- None,
193
- 1024,
194
- 1024,
195
- 2.0,
196
- 1.6,
197
- 123,
198
- 1024,
199
- False,
200
- True,
201
- ],
202
- [
203
- "Following the depth mapping of this image <img><|image_1|><img>, generate a new photo: A young girl is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.",
204
- "./imgs/demo_cases/edit.png",
205
- None,
206
- None,
207
- 1024,
208
- 1024,
209
- 2.0,
210
- 1.6,
211
- 1,
212
- 1024,
213
- False,
214
- True,
215
- ],
216
- [
217
- "<img><|image_1|><\/img> What item can be used to see the current time? Please highlight it in blue.",
218
- "./imgs/test_cases/watch.jpg",
219
- None,
220
- None,
221
- 1024,
222
- 1024,
223
- 2.5,
224
- 1.6,
225
- 666,
226
- 1024,
227
- False,
228
- True,
229
- ],
230
- [
231
- "According to the following examples, generate an output for the input.\nInput: <img><|image_1|></img>\nOutput: <img><|image_2|></img>\n\nInput: <img><|image_3|></img>\nOutput: ",
232
- "./imgs/test_cases/icl1.jpg",
233
- "./imgs/test_cases/icl2.jpg",
234
- "./imgs/test_cases/icl3.jpg",
235
- 224,
236
- 224,
237
- 2.5,
238
- 1.6,
239
- 1,
240
- 768,
241
- False,
242
- False,
243
- ],
244
- ]
245
- return case
246
-
247
- def run_for_examples(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, seed, max_input_image_size, randomize_seed, use_input_image_size_as_output):
248
- # 在函数内部设置默认值
249
- inference_steps = 50
250
- separate_cfg_infer = True
251
- offload_model = False
252
-
253
- return generate_image(
254
- text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale,
255
- inference_steps, seed, separate_cfg_infer, offload_model,
256
- use_input_image_size_as_output, max_input_image_size, randomize_seed
257
- )
258
-
259
  description = """
260
  OmniGen is a unified image generation model that you can use to perform various tasks, including but not limited to text-to-image generation, subject-driven generation, Identity-Preserving Generation, and image-conditioned generation.
261
  For multi-modal to image generation, you should pass a string as `prompt`, and a list of image paths as `input_images`. The placeholder in the prompt should be in the format of `<img><|image_*|></img>` (for the first image, the placeholder is <img><|image_1|></img>. for the second image, the the placeholder is <img><|image_2|></img>).
@@ -379,26 +164,6 @@ with gr.Blocks() as demo:
379
  outputs=output_image,
380
  )
381
 
382
- gr.Examples(
383
- examples=get_example(),
384
- fn=run_for_examples,
385
- inputs=[
386
- prompt_input,
387
- image_input_1,
388
- image_input_2,
389
- image_input_3,
390
- height_input,
391
- width_input,
392
- guidance_scale_input,
393
- img_guidance_scale_input,
394
- seed_input,
395
- max_input_image_size,
396
- randomize_seed,
397
- use_input_image_size_as_output,
398
- ],
399
- outputs=output_image,
400
- )
401
-
402
  gr.Markdown(article)
403
 
404
  # launch
 
41
  img = output[0]
42
  return img
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  description = """
45
  OmniGen is a unified image generation model that you can use to perform various tasks, including but not limited to text-to-image generation, subject-driven generation, Identity-Preserving Generation, and image-conditioned generation.
46
  For multi-modal to image generation, you should pass a string as `prompt`, and a list of image paths as `input_images`. The placeholder in the prompt should be in the format of `<img><|image_*|></img>` (for the first image, the placeholder is <img><|image_1|></img>. for the second image, the the placeholder is <img><|image_2|></img>).
 
164
  outputs=output_image,
165
  )
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  gr.Markdown(article)
168
 
169
  # launch