apolinario
commited on
Commit
·
4df993c
1
Parent(s):
e4c75aa
Add footer
Browse files
app.py
CHANGED
@@ -102,7 +102,8 @@ with gr.Blocks() as mindseye:
|
|
102 |
# image = gr.outputs.Image()
|
103 |
with gr.TabItem("Gallery output"):
|
104 |
gallery = gr.Gallery(label="Individual images")
|
105 |
-
|
|
|
106 |
get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=gallery)
|
107 |
get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=gallery)
|
108 |
get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps_vq,flavor],outputs=gallery)
|
|
|
102 |
# image = gr.outputs.Image()
|
103 |
with gr.TabItem("Gallery output"):
|
104 |
gallery = gr.Gallery(label="Individual images")
|
105 |
+
with gr.Row():
|
106 |
+
gr.Markdown("<h4 style='font-size: 110%;margin-top:.5em'>Biases acknowledgment</h4><div>Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exarcbates societal biases. According to the <a href='https://arxiv.org/abs/2112.10752' target='_blank'>Latent Diffusion paper</a>:<i> \"Deep learning modules tend to reproduce or exacerbate biases that are already present in the data\"</i>. The model was trained on both the Imagenet dataset and in an undisclosed dataset by OpenAI.</div><h4 style='font-size: 110%;margin-top:1em'>Who owns the images produced by this demo?</h4><div>Definetly not me! Probably you do. I say probably because the Copyright discussion about AI generated art is ongoing. So <a href='https://www.theverge.com/2022/2/21/22944335/us-copyright-office-reject-ai-generated-art-recent-entrance-to-paradise' target='_blank'>it may be the case that everything produced here falls automatically into the public domain</a>. But in any case it is either yours or is in the public domain.</div>")
|
107 |
get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=gallery)
|
108 |
get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=gallery)
|
109 |
get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps_vq,flavor],outputs=gallery)
|