Spaces:
Runtime error
Runtime error
Commit
Β·
1b101d7
1
Parent(s):
ac3245b
Added code for log prints and added session image count
Browse files
app.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
#@title Prepare the Concepts Library to be used
|
2 |
|
3 |
-
|
4 |
-
|
5 |
import requests
|
6 |
import os
|
7 |
import gradio as gr
|
@@ -12,6 +10,9 @@ from diffusers import StableDiffusionPipeline
|
|
12 |
from huggingface_hub import HfApi
|
13 |
from transformers import CLIPTextModel, CLIPTokenizer
|
14 |
import html
|
|
|
|
|
|
|
15 |
|
16 |
community_icon_html = ""
|
17 |
|
@@ -126,9 +127,21 @@ def image_prompt(prompt, guidance, steps, seed, height, width):
|
|
126 |
if square_pixels > 640000:
|
127 |
height = 640000 // width
|
128 |
generator = torch.Generator(device="cuda").manual_seed(int(seed))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
return (
|
130 |
-
pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps, generator=generator, height=
|
131 |
-
f"prompt
|
132 |
)
|
133 |
|
134 |
|
@@ -259,11 +272,23 @@ def simple_image_prompt(prompt, dropdown, size_dropdown):
|
|
259 |
|
260 |
steps = 30
|
261 |
|
|
|
|
|
|
|
262 |
prompt = prompt + DROPDOWNS[dropdown]
|
263 |
generator = torch.Generator(device="cuda").manual_seed(int(seed))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
return (
|
265 |
-
pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps, generator=generator, height=
|
266 |
-
f"prompt
|
267 |
)
|
268 |
|
269 |
|
|
|
1 |
#@title Prepare the Concepts Library to be used
|
2 |
|
|
|
|
|
3 |
import requests
|
4 |
import os
|
5 |
import gradio as gr
|
|
|
10 |
from huggingface_hub import HfApi
|
11 |
from transformers import CLIPTextModel, CLIPTokenizer
|
12 |
import html
|
13 |
+
import datetime
|
14 |
+
|
15 |
+
image_count = 0
|
16 |
|
17 |
community_icon_html = ""
|
18 |
|
|
|
127 |
if square_pixels > 640000:
|
128 |
height = 640000 // width
|
129 |
generator = torch.Generator(device="cuda").manual_seed(int(seed))
|
130 |
+
|
131 |
+
height=int((height // 8) * 8)
|
132 |
+
width=int((width // 8) * 8)
|
133 |
+
|
134 |
+
image_count += 1
|
135 |
+
curr_time = datetime.datetime.now()
|
136 |
+
|
137 |
+
print("----- advanced tab prompt ------------------------------")
|
138 |
+
print(f"prompt: {prompt}, size: {width}px x {height}px, guidance: {guidance}, steps: {steps}, seed: {int(seed)}")
|
139 |
+
print(f"image_count: {image_count}, datetime: `{e}`")
|
140 |
+
print("-------------------------------------------------------")
|
141 |
+
|
142 |
return (
|
143 |
+
pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps, generator=generator, height=height, width=width).images[0],
|
144 |
+
f"prompt: '{prompt}', seed = {int(seed)},\nheight: {height}px, width: {width}px,\nguidance: {guidance}, steps: {steps}"
|
145 |
)
|
146 |
|
147 |
|
|
|
272 |
|
273 |
steps = 30
|
274 |
|
275 |
+
height=int((height // 8) * 8)
|
276 |
+
width=int((width // 8) * 8)
|
277 |
+
|
278 |
prompt = prompt + DROPDOWNS[dropdown]
|
279 |
generator = torch.Generator(device="cuda").manual_seed(int(seed))
|
280 |
+
|
281 |
+
image_count += 1
|
282 |
+
curr_time = datetime.datetime.now()
|
283 |
+
|
284 |
+
print("----- welcome / beta tab prompt ------------------------------")
|
285 |
+
print(f"prompt: {prompt}, size: {width}px x {height}px, guidance: {guidance}, steps: {steps}, seed: {int(seed)}")
|
286 |
+
print(f"image_count: {image_count}, datetime: `{e}`")
|
287 |
+
print("-------------------------------------------------------")
|
288 |
+
|
289 |
return (
|
290 |
+
pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps, generator=generator, height=height, width=width).images[0],
|
291 |
+
f"prompt: '{prompt}', seed = {int(seed)},\nheight: {height}px, width: {width}px,\nguidance: {guidance}, steps: {steps}"
|
292 |
)
|
293 |
|
294 |
|