Huzaifa367 commited on
Commit
8eb9d7a
·
verified ·
1 Parent(s): a835444

Update pages/textimage.py

Browse files
Files changed (1) hide show
  1. pages/textimage.py +75 -50
pages/textimage.py CHANGED
@@ -1,69 +1,94 @@
1
  import streamlit as st
2
- import os
3
- import random
4
- import spaces
5
- import numpy as np
6
  import torch
7
- from typing import Tuple
8
- from datetime import datetime
9
- from diffusers import PixArtAlphaPipeline, LCMScheduler
 
 
10
 
11
- # Check if CUDA is available
12
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
13
 
14
- # Define Hugging Face API details
15
- API_URL = "https://api-inference.huggingface.co/models/Huzaifa367/chat-summarizer"
16
- API_TOKEN = os.getenv("AUTH_TOKEN")
17
- HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- # Initialize PixArtAlphaPipeline
20
- pipe = PixArtAlphaPipeline.from_pretrained(
21
- "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
22
- torch_dtype=torch.float16,
23
- use_safetensors=True,
24
- device=device
25
- )
26
 
27
- # Function to generate image based on prompt
28
- def generate_image(prompt: str) -> Tuple[str, int]:
29
- seed = random.randint(0, np.iinfo(np.int32).max)
30
  images = pipe(
31
- prompt=prompt,
32
- width=1024,
33
- height=1024,
34
- num_inference_steps=4,
35
- generator=torch.Generator().manual_seed(seed),
 
 
36
  num_images_per_prompt=1,
37
  use_resolution_binning=True,
38
  output_type="pil",
39
  ).images
40
 
41
- # Save image and return path and seed
42
- image_path = save_image(images[0])
43
- return image_path, seed
 
 
 
 
 
44
 
45
- # Function to save image and return path
46
- def save_image(img):
47
- unique_name = str(uuid.uuid4()) + ".png"
48
- img.save(unique_name)
49
- return unique_name
 
 
 
 
 
 
 
 
50
 
51
- # Streamlit app
52
- def main():
53
- st.set_page_config(layout="wide")
54
- st.title("Instant Image Generator")
55
 
56
- # Prompt input
57
- prompt = st.text_area("Prompt", "Enter your prompt here...")
58
 
59
- # Generate button
60
- if st.button("Generate Image"):
61
- if prompt:
62
- # Generate image based on prompt
63
- image_path, seed = generate_image(prompt)
64
 
65
- # Display the generated image
66
- st.image(image_path, use_column_width=True, caption=f"Seed: {seed}")
 
 
67
 
68
- if __name__ == "__main__":
69
- main()
 
1
  import streamlit as st
 
 
 
 
2
  import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ import random
6
+ import uuid
7
+ from diffusers import PixArtAlphaPipeline
8
 
9
+ # Check for CUDA availability
10
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
11
 
12
+ # Load the PixArtAlphaPipeline
13
+ if torch.cuda.is_available():
14
+ pipe = PixArtAlphaPipeline.from_pretrained(
15
+ "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
16
+ torch_dtype=torch.float16,
17
+ use_safetensors=True,
18
+ )
19
+ pipe.to(device)
20
+ st.write("Model loaded successfully!")
21
+ else:
22
+ st.error("This demo requires GPU support, which is not available on this system.")
23
+
24
+ # Constants
25
+ MAX_SEED = np.iinfo(np.int32).max
26
+
27
+ # Function to save image and return the path
28
+ def save_image(img):
29
+ unique_name = str(uuid.uuid4()) + ".png"
30
+ img.save(unique_name)
31
+ return unique_name
32
+
33
+ # Main function for image generation
34
+ def generate_image(prompt, style, use_negative_prompt, negative_prompt, seed, width, height, inference_steps):
35
+ generator = torch.Generator().manual_seed(seed)
36
 
37
+ # Apply the selected style
38
+ if style == "(No style)":
39
+ prompt_text = prompt
40
+ else:
41
+ prompt_text, _ = apply_style(style, prompt, negative_prompt)
 
 
42
 
43
+ # Generate the image
 
 
44
  images = pipe(
45
+ prompt=prompt_text,
46
+ negative_prompt=None,
47
+ width=width,
48
+ height=height,
49
+ guidance_scale=0,
50
+ num_inference_steps=inference_steps,
51
+ generator=generator,
52
  num_images_per_prompt=1,
53
  use_resolution_binning=True,
54
  output_type="pil",
55
  ).images
56
 
57
+ # Save the image and display
58
+ if images:
59
+ img_path = save_image(images[0])
60
+ img = Image.open(img_path)
61
+ st.image(img, caption="Generated Image", use_column_width=True)
62
+ st.success("Image generated successfully!")
63
+ else:
64
+ st.error("Failed to generate image. Please try again.")
65
 
66
+ # Helper function to apply selected style
67
+ def apply_style(style_name, positive, negative):
68
+ # Define styles dictionary (similar to your Gradio code)
69
+ styles = {
70
+ "(No style)": (positive, ""),
71
+ "Cinematic": ("cinematic still " + positive, "anime, cartoon, ..."),
72
+ "Realistic": ("Photorealistic " + positive, "drawing, painting, ..."),
73
+ # Add other styles here...
74
+ }
75
+ return styles.get(style_name, styles["(No style)"])
76
+
77
+ # Streamlit UI
78
+ st.title("Instant Image Generator")
79
 
80
+ prompt = st.text_input("Prompt", "Enter your prompt")
 
 
 
81
 
82
+ style_names = ["(No style)", "Cinematic", "Realistic"] # Add other styles here...
83
+ style = st.selectbox("Image Style", style_names)
84
 
85
+ use_negative_prompt = st.checkbox("Use negative prompt")
86
+ negative_prompt = st.text_input("Negative prompt", "")
 
 
 
87
 
88
+ seed = st.slider("Seed", 0, MAX_SEED, 0)
89
+ width = st.slider("Width", 256, 4192, 1024, step=32)
90
+ height = st.slider("Height", 256, 4192, 1024, step=32)
91
+ inference_steps = st.slider("Steps", 4, 20, 4)
92
 
93
+ if st.button("Generate Image"):
94
+ generate_image(prompt, style, use_negative_prompt, negative_prompt, seed, width, height, inference_steps)