santosh175 commited on
Commit
0e1fe38
·
verified ·
1 Parent(s): 5b0d949

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +61 -0
  2. app_logic.py +84 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from app_logic import text2image
3
+
4
+
5
+ from io import BytesIO
6
+
7
+
8
+ def app():
9
+ st.header("Text-to-image Web App")
10
+ st.subheader("Powered by Hugging Face")
11
+ user_input = st.text_area(
12
+ "Enter your text prompt below and click the button to submit."
13
+ )
14
+
15
+ option = st.selectbox(
16
+ "Select model (in order of processing time)",
17
+ (
18
+ "mukaist/DALLE-4K",
19
+ "prithivMLmods/Canopus-Realism-LoRA",
20
+ "black-forest-labs/FLUX.1-dev",
21
+ "SG161222/RealVisXL_V4.0_Lightning",
22
+ "prompthero/openjourney",
23
+ "stabilityai/stable-diffusion-2-1",
24
+ "runwayml/stable-diffusion-v1-5",
25
+ "SG161222/RealVisXL_V3.0",
26
+ "CompVis/stable-diffusion-v1-4",
27
+ ),
28
+ )
29
+
30
+ with st.form("my_form"):
31
+ submit = st.form_submit_button(label="Submit text prompt")
32
+
33
+ if submit:
34
+ with st.spinner(text="Generating image ... It may take up to some time."):
35
+ im, start, end = text2image(prompt=user_input, repo_id=option)
36
+
37
+ buf = BytesIO()
38
+ im.save(buf, format="PNG")
39
+ byte_im = buf.getvalue()
40
+
41
+ hours, rem = divmod(end - start, 3600)
42
+ minutes, seconds = divmod(rem, 60)
43
+
44
+ st.success(
45
+ "Processing time: {:0>2}:{:0>2}:{:05.2f}.".format(
46
+ int(hours), int(minutes), seconds
47
+ )
48
+ )
49
+
50
+ st.image(im)
51
+
52
+ st.download_button(
53
+ label="Click here to download",
54
+ data=byte_im,
55
+ file_name="generated_image.png",
56
+ mime="image/png",
57
+ )
58
+
59
+
60
+ if __name__ == "__main__":
61
+ app()
app_logic.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+ from diffusers import StableDiffusionPipeline
3
+ import torch
4
+ import time
5
+
6
+ import os
7
+ import io
8
+ import requests
9
+ from PIL import Image
10
+
11
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
+ #from huggingface_hub import hf_hub_download
13
+
14
+
15
+ seed = 2024
16
+ generator = torch.manual_seed(seed)
17
+
18
+ NUM_ITERS_TO_RUN = 1
19
+ NUM_INFERENCE_STEPS = 25
20
+ NUM_IMAGES_PER_PROMPT = 1
21
+
22
+
23
+ # Add your hugging face hub token here.
24
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = "Your_hub_API_KEY"
25
+
26
+ def text2image(
27
+ prompt: str,
28
+ repo_id: Literal[
29
+ "prithivMLmods/Canopus-Realism-LoRA",
30
+ "black-forest-labs/FLUX.1-dev",
31
+ "SG161222/RealVisXL_V4.0_Lightning",
32
+ "prompthero/openjourney",
33
+ "stabilityai/stable-diffusion-2-1",
34
+ "runwayml/stable-diffusion-v1-5",
35
+ "SG161222/RealVisXL_V3.0",
36
+ "CompVis/stable-diffusion-v1-4",
37
+ ],
38
+ ):
39
+ start = time.time()
40
+
41
+ HF_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
42
+ API_URL = f"https://api-inference.huggingface.co/models/{repo_id}"
43
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
44
+ payload = {"inputs":prompt}
45
+ response = requests.post(API_URL, headers=headers, json=payload)
46
+ image_bytes = response.content
47
+ image = Image.open(io.BytesIO(image_bytes))
48
+ upscaled_image = image.resize((2048,2048))
49
+
50
+ '''if torch.cuda.is_available():
51
+ print("Using GPU")
52
+ pipeline = StableDiffusionPipeline.from_pretrained(
53
+ repo_id,
54
+ torch_dtype=torch.float16,
55
+ use_safetensors=True,
56
+ ).to("cuda")
57
+
58
+ pipe = StableDiffusionXLPipeline.from_pretrained(
59
+ repo_id,
60
+ torch_dtype=torch.float16,
61
+ use_safetensors=True,
62
+ )
63
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
64
+
65
+ pipe.load_lora_weights("prithivMLmods/Canopus-Realism-LoRA", weight_name="Canopus-Realism-LoRA.safetensors", adapter_name="rlms")
66
+ pipe.set_adapters("rlms")
67
+ pipe.to("cuda")
68
+ else:
69
+ print("Using CPU")
70
+ pipeline = StableDiffusionPipeline.from_pretrained(
71
+ repo_id,
72
+ torch_dtype=torch.float32,
73
+ use_safetensors=True,
74
+ )
75
+
76
+ for _ in range(NUM_ITERS_TO_RUN):
77
+ images = pipeline(
78
+ prompt,
79
+ num_inference_steps=NUM_INFERENCE_STEPS,
80
+ generator=generator,
81
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
82
+ ).images'''
83
+ end = time.time()
84
+ return upscaled_image, start, end
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ diffusers==0.30.0
2
+ Pillow==10.4.0
3
+ Requests==2.32.3
4
+ streamlit==1.37.1
5
+ torch==2.4.0+cu124