Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,30 +3,32 @@ import torch
|
|
| 3 |
from diffusers import AutoPipelineForText2Image
|
| 4 |
import base64
|
| 5 |
from io import BytesIO
|
| 6 |
-
|
| 7 |
# Load the model once outside of the function
|
| 8 |
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
| 9 |
|
| 10 |
-
def generate_image(
|
| 11 |
try:
|
|
|
|
| 12 |
image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
except Exception as e:
|
| 15 |
-
return None
|
| 16 |
|
| 17 |
def inference(prompt):
|
| 18 |
-
|
|
|
|
|
|
|
| 19 |
# Debugging statement
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
buffered = BytesIO()
|
| 27 |
-
image.save(buffered, format="PNG")
|
| 28 |
-
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 29 |
-
return img_str
|
| 30 |
|
| 31 |
gradio_interface = gr.Interface(
|
| 32 |
fn=inference,
|
|
|
|
| 3 |
from diffusers import AutoPipelineForText2Image
|
| 4 |
import base64
|
| 5 |
from io import BytesIO
|
| 6 |
+
from generate_propmts.py import generate_prompt
|
| 7 |
# Load the model once outside of the function
|
| 8 |
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
| 9 |
|
| 10 |
+
def generate_image(text, sentence_mapping, character_dict, selected_style):
|
| 11 |
try:
|
| 12 |
+
prompt,_ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
|
| 13 |
image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
|
| 14 |
+
buffered = BytesIO()
|
| 15 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 16 |
+
if isinstance(result, img_str):
|
| 17 |
+
image_bytes = base64.b64decode(result)
|
| 18 |
+
return image_bytes
|
| 19 |
except Exception as e:
|
| 20 |
+
return None
|
| 21 |
|
| 22 |
def inference(prompt):
|
| 23 |
+
# Dictionary to store images results
|
| 24 |
+
images = {}
|
| 25 |
+
print(f"Received grouped_sentences: {grouped_sentences}")
|
| 26 |
# Debugging statement
|
| 27 |
+
with concurrent.images.ThreadPoolExecutor() as executor:
|
| 28 |
+
for paragraph_number, sentences in grouped_sentences.items():
|
| 29 |
+
combined_sentence = " ".join(sentences)
|
| 30 |
+
images[paragraph_number] = executor.submit(generate_image, combined_sentence, sentence_mapping, general_descriptions, selected_style)
|
| 31 |
+
repr images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
gradio_interface = gr.Interface(
|
| 34 |
fn=inference,
|