File size: 1,322 Bytes
5b5b708
92a57ae
2a10b1a
 
92a57ae
03a8354
 
 
 
5b5b708
92a57ae
 
527cdd9
92a57ae
03a8354
92a57ae
 
 
 
527cdd9
92a57ae
5b5b708
92a57ae
 
cc15d30
7de1eac
527cdd9
92a57ae
5b5b708
 
2a10b1a
92a57ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
from transformers import pipeline

# Load the pipeline for text generation
pipe = pipeline(
    "text-generation",
    model="Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator",
    tokenizer="gpt2"
)

# Initialize a list to store the history of generated prompts
history = []

# Function to generate text based on input prompt and record the history
def generate_text(prompt):
    generated_text = pipe(prompt, max_length=77)[0]["generated_text"]
    # Append the generated prompt and its result to the history list
    history.append({"prompt": prompt, "generated_text": generated_text})
    return generated_text

# Create a Gradio interface with history recording
iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(lines=5, label="Prompt"),
    outputs=[gr.Textbox(label="Output 1", show_copy_button=True), gr.Textbox(label="Output 2", show_copy_button=True)],
    title="AI Art Prompt Generator",
    description="Art Prompt Generator is a user-friendly interface designed to optimize input for AI Art Generator or Creator. For faster generation speeds, it's recommended to load the model locally with GPUs, as the online demo at Hugging Face Spaces utilizes CPU, resulting in slower processing times.",
    api_name="predict"
)

# Launch the interface
iface.launch(show_api=True)