Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,9 @@ from diffusers import StableDiffusionPipeline, DiffusionPipeline
|
|
2 |
import torch
|
3 |
import random
|
4 |
from datetime import datetime
|
5 |
-
from flask import Flask, render_template_string, send_file
|
6 |
-
import io
|
7 |
from PIL import Image
|
8 |
|
9 |
-
resolution = (
|
10 |
num_steps = 20
|
11 |
guidance_scale = 7.5
|
12 |
neg_prompt = "blurry"
|
@@ -18,48 +16,18 @@ pipe = DiffusionPipeline.from_pretrained(model_id)
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
pipe = pipe.to(device)
|
20 |
|
21 |
-
app = Flask(__name__)
|
22 |
|
23 |
# Funzione per generare un'immagine
|
24 |
-
def generate_image(prompt, seed, steps
|
25 |
generator = torch.manual_seed(seed)
|
26 |
image = pipe(prompt, height=resolution[1], width=resolution[0], num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, negative_prompt=neg_prompt).images[0]
|
27 |
return image
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
image = generate_image(prompt, seed, num_steps, neg_prompt)
|
35 |
|
36 |
-
|
37 |
-
img_io = io.BytesIO()
|
38 |
-
image.save(img_io, 'JPEG', quality=70)
|
39 |
-
img_io.seek(0)
|
40 |
|
41 |
-
# Genera la pagina HTML
|
42 |
-
html = """
|
43 |
-
<!doctype html>
|
44 |
-
<title>Generated Image</title>
|
45 |
-
<h1>Generated Image</h1>
|
46 |
-
<img src="/image" alt="Generated Image">
|
47 |
-
"""
|
48 |
-
return render_template_string(html)
|
49 |
-
|
50 |
-
@app.route('/image')
|
51 |
-
def image():
|
52 |
-
# Genera un'immagine
|
53 |
-
prompt = "A beautiful landscape"
|
54 |
-
seed = random.randint(1, 1000000)
|
55 |
-
image = generate_image(prompt, seed, num_steps, neg_prompt)
|
56 |
-
|
57 |
-
# Salva l'immagine in un buffer
|
58 |
-
img_io = io.BytesIO()
|
59 |
-
image.save(img_io, 'JPEG', quality=70)
|
60 |
-
img_io.seek(0)
|
61 |
-
|
62 |
-
return send_file(img_io, mimetype='image/jpeg')
|
63 |
-
|
64 |
-
if __name__ == '__main__':
|
65 |
-
app.run(debug=True)
|
|
|
2 |
import torch
|
3 |
import random
|
4 |
from datetime import datetime
|
|
|
|
|
5 |
from PIL import Image
|
6 |
|
7 |
+
resolution = (512, 512) # Risoluzione dell'immagine (width, height)
|
8 |
num_steps = 20
|
9 |
guidance_scale = 7.5
|
10 |
neg_prompt = "blurry"
|
|
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
pipe = pipe.to(device)
|
18 |
|
|
|
19 |
|
20 |
# Funzione per generare un'immagine
|
21 |
+
def generate_image(prompt, neg_prompt, seed, steps):
|
22 |
generator = torch.manual_seed(seed)
|
23 |
image = pipe(prompt, height=resolution[1], width=resolution[0], num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, negative_prompt=neg_prompt).images[0]
|
24 |
return image
|
25 |
|
26 |
+
demo = gr.Interface(
|
27 |
+
fn=generate_image,
|
28 |
+
inputs=["text","text", "slider", "slider"],
|
29 |
+
outputs=[gr.Image()],
|
30 |
+
)
|
|
|
31 |
|
32 |
+
demo.launch()
|
|
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|