Update app.py
Browse files
app.py
CHANGED
@@ -7,37 +7,24 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from transformers import pipeline
|
|
|
10 |
|
11 |
# Load the pipeline for text generation
|
12 |
-
|
13 |
"text-generation",
|
14 |
model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
|
15 |
tokenizer="gpt2"
|
16 |
)
|
17 |
|
18 |
-
#
|
19 |
-
history = []
|
20 |
-
|
21 |
-
# Function to generate text based on input prompt and record the history
|
22 |
-
def generate_text(prompt):
|
23 |
-
generated_text = pipe(prompt, max_length=77)[0]["generated_text"]
|
24 |
-
# Append the generated prompt and its result to the history list
|
25 |
-
history.append({"prompt": prompt, "generated_text": generated_text})
|
26 |
-
return generated_text
|
27 |
-
|
28 |
-
# Create a Gradio interface with history recording
|
29 |
-
iface = gr.Interface(
|
30 |
-
fn=generate_text,
|
31 |
-
inputs=gr.Textbox(lines=5, label="Prompt"),
|
32 |
-
outputs=gr.Textbox(label="Output", show_copy_button=True),
|
33 |
-
title="AI Art Prompt Generator",
|
34 |
-
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
|
35 |
-
api_name="predict"
|
36 |
-
)
|
37 |
-
# Load tokenizer and model
|
38 |
tokenizer = AutoTokenizer.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
|
39 |
model = AutoModelForCausalLM.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
|
40 |
|
|
|
|
|
|
|
|
|
|
|
41 |
def generate_image(text):
|
42 |
# Tokenize input text
|
43 |
input_ids = tokenizer.encode(text, return_tensors="pt")
|
@@ -55,13 +42,13 @@ def generate_image(text):
|
|
55 |
|
56 |
# Create Gradio interface
|
57 |
iface = gr.Interface(
|
58 |
-
fn=generate_image,
|
59 |
-
inputs=
|
60 |
-
outputs="image",
|
61 |
-
title="
|
62 |
-
description="
|
63 |
theme="huggingface"
|
64 |
)
|
65 |
|
66 |
# Launch the interface
|
67 |
-
iface.launch(
|
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from transformers import pipeline
|
10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
11 |
|
12 |
# Load the pipeline for text generation
|
13 |
+
text_generator = pipeline(
|
14 |
"text-generation",
|
15 |
model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
|
16 |
tokenizer="gpt2"
|
17 |
)
|
18 |
|
19 |
+
# Load tokenizer and model for image generation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
tokenizer = AutoTokenizer.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
|
21 |
model = AutoModelForCausalLM.from_pretrained("stablediffusionapi/juggernaut-xl-v8")
|
22 |
|
23 |
+
# Function to generate text based on input prompt
|
24 |
+
def generate_text(prompt):
|
25 |
+
return text_generator(prompt, max_length=77)[0]["generated_text"]
|
26 |
+
|
27 |
+
# Function to generate image based on input text
|
28 |
def generate_image(text):
|
29 |
# Tokenize input text
|
30 |
input_ids = tokenizer.encode(text, return_tensors="pt")
|
|
|
42 |
|
43 |
# Create Gradio interface
|
44 |
iface = gr.Interface(
|
45 |
+
fn=[generate_text, generate_image],
|
46 |
+
inputs=["textbox", "textbox"],
|
47 |
+
outputs=["textbox", "image"],
|
48 |
+
title="AI Art Prompt Generator",
|
49 |
+
description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
|
50 |
theme="huggingface"
|
51 |
)
|
52 |
|
53 |
# Launch the interface
|
54 |
+
iface.launch()
|