Andre Embury commited on
Commit
52e3636
·
unverified ·
1 Parent(s): 8297189

Change to a Tile Upscaler

Browse files

Changes a models to Tile upscaler.

Files changed (5) hide show
  1. .tool-versions +1 -0
  2. README.md +6 -5
  3. __pycache__/app.cpython-313.pyc +0 -0
  4. app.py +220 -281
  5. requirements.txt +14 -12
.tool-versions ADDED
@@ -0,0 +1 @@
 
 
1
+ python 3.13.1
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Hackathon 2025
3
- emoji: 🖼
4
  colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.25.2
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Tile Upscaler
3
+ emoji: 🚀
4
  colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.44.1
8
  app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/app.cpython-313.pyc ADDED
Binary file (6.97 kB). View file
 
app.py CHANGED
@@ -1,294 +1,233 @@
1
- import gradio as gr
2
- import numpy as np
3
-
4
- # import random
5
-
6
- # import spaces #[uncomment to use ZeroGPU]
7
- from diffusers import (
8
- # StableDiffusionControlNetImg2ImgPipeline,
9
- ControlNetModel,
10
- ControlNetUnionModel,
11
- StableDiffusionXLControlNetPipeline,
12
- )
13
- import torch
14
 
 
15
  import requests
16
- from fastapi import FastAPI, HTTPException
17
- from PIL import Image
18
- from controlnet_aux import CannyDetector
19
-
20
- from diffusers import AutoencoderKL
21
- from diffusers import (
22
- EulerAncestralDiscreteScheduler,
23
- StableDiffusionXLControlNetUnionPipeline,
24
- )
25
- import cv2
26
-
27
- device = "cuda" if torch.cuda.is_available() else "cpu"
28
- # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
29
- model_repo_id = "runwayml/stable-diffusion-v1-5"
30
-
31
- if torch.cuda.is_available():
32
- torch_dtype = torch.float16
33
- else:
34
- torch_dtype = torch.float32
35
-
36
- # controlnet = ControlNetModel.from_pretrained(
37
- # "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32
38
- # )
39
-
40
- # controlnet = ControlNetModel.from_pretrained(
41
- # "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
42
- # )
43
-
44
- # pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
45
- # pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
46
- # model_repo_id,
47
- # controlnet=controlnet,
48
- # torch_dtype=torch_dtype,
49
- # ).to(device)
50
- # pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
51
- # "stabilityai/stable-diffusion-xl-base-1.0",
52
- # controlnet=controlnet,
53
- # torch_dtype=torch.float16,
54
- # variant="fp16",
55
- # use_safetensors=True,
56
- # ).to(device)
57
- # # pipe = pipe.to(device)
58
- # canny = CannyDetector()
59
-
60
-
61
- eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained(
62
- "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler"
63
- )
64
-
65
- # when test with other base model, you need to change the vae also.
66
- vae = AutoencoderKL.from_pretrained(
67
- "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
68
- )
69
-
70
- controlnet_model = ControlNetUnionModel.from_pretrained(
71
- "xinsir/controlnet-union-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True
72
- )
73
-
74
- # controlnet_union_model = ControlNetUnionModel([controlnet_model])
75
-
76
- pipe = StableDiffusionXLControlNetUnionPipeline.from_pretrained(
77
- "stabilityai/stable-diffusion-xl-base-1.0",
78
- controlnet=controlnet_model,
79
- vae=vae,
80
- torch_dtype=torch.float16,
81
- scheduler=eulera_scheduler,
82
- control_mode=[0],
83
- )
84
-
85
- pipe = pipe.to(device)
86
-
87
-
88
- MAX_SEED = np.iinfo(np.int32).max
89
- MAX_IMAGE_SIZE = 1024
90
-
91
-
92
- # @spaces.GPU #[uncomment to use ZeroGPU]
93
- def infer(
94
- image_url,
95
- # negative_prompt,
96
- # seed,
97
- # randomize_seed,
98
- width,
99
- height,
100
- guidance_scale,
101
- num_inference_steps,
102
- progress=gr.Progress(track_tqdm=True),
103
- ):
104
- # if randomize_seed:
105
- # seed = random.randint(0, MAX_SEED)
106
-
107
- # generator = torch.Generator().manual_seed(seed)
108
-
109
- # image = pipe(
110
- # prompt=prompt,
111
- # negative_prompt=negative_prompt,
112
- # guidance_scale=guidance_scale,
113
- # num_inference_steps=num_inference_steps,
114
- # width=width,
115
- # height=height,
116
- # generator=generator,
117
- # ).images[0]
118
-
119
- # return image, seed
120
-
121
- width = int(width)
122
- height = int(height)
123
 
124
- try:
125
- resp = requests.get(image_url)
126
- resp.raise_for_status()
127
- except Exception as e:
128
- raise HTTPException(400, f"Could not download image: {e}")
129
 
130
- # img = Image.open(io.BytesIO(resp.content)).convert("RGB")
131
- img = Image.open(requests.get(image_url, stream=True).raw).convert("RGB")
132
- # img = img.resize((req.width, req.height))
133
- # img = img.resize((width, height))
134
-
135
- # control_net_image = canny(img).resize((width, height))
136
-
137
- img_np = np.array(img)
138
-
139
- controlnet_img = cv2.resize(img_np, (width, height))
140
-
141
- controlnet_img = cv2.Canny(controlnet_img, 100, 200)
142
- controlnet_img = HWC3(controlnet_img)
143
- controlnet_img = Image.fromarray(controlnet_img)
144
-
145
- prompt = (
146
- "redraw the logo from scratch, clean sharp vector-style, "
147
- # + STYLE_PROMPTS[req.style_preset]
148
- )
149
-
150
- output = pipe(
151
- prompt=prompt,
152
- negative_prompt=NEGATIVE,
153
- # image=img,
154
- control_image=controlnet_img,
155
- # strength=req.strength,
156
- guidance_scale=guidance_scale,
157
- num_inference_steps=num_inference_steps,
158
- height=height,
159
- width=width,
160
- ).images[0]
161
 
162
- return output
 
 
 
163
 
 
 
 
164
 
165
- examples = [
166
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
167
- "An astronaut riding a green horse",
168
- "A delicious ceviche cheesecake slice",
169
- ]
170
 
171
- css = """
172
- #col-container {
173
- margin: 0 auto;
174
- max-width: 640px;
175
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  """
177
 
178
- NEGATIVE = "blurry, distorted, messy, gradients, background noise"
179
- WIDTH = 512
180
- HEIGHT = 512
181
-
182
-
183
- def HWC3(x):
184
- assert x.dtype == np.uint8
185
- if x.ndim == 2:
186
- x = x[:, :, None]
187
- assert x.ndim == 3
188
- H, W, C = x.shape
189
- assert C == 1 or C == 3 or C == 4
190
- if C == 3:
191
- return x
192
- if C == 1:
193
- return np.concatenate([x, x, x], axis=2)
194
- if C == 4:
195
- color = x[:, :, 0:3].astype(np.float32)
196
- alpha = x[:, :, 3:4].astype(np.float32) / 255.0
197
- y = color * alpha + 255.0 * (1.0 - alpha)
198
- y = y.clip(0, 255).astype(np.uint8)
199
- return y
200
-
201
-
202
- with gr.Blocks(css=css) as demo:
203
- with gr.Column(elem_id="col-container"):
204
- gr.Markdown(" # Text-to-Image Gradio Template")
205
-
206
- with gr.Row():
207
- image_url = gr.Text(
208
- label="Image URL",
209
- show_label=False,
210
- # max_lines=1,
211
- placeholder="Provide a image URL",
212
- container=False,
213
- )
214
-
215
- run_button = gr.Button("Run", scale=0, variant="primary")
216
-
217
- result = gr.Image(label="Result", show_label=False)
218
-
219
- with gr.Accordion("Advanced Settings", open=False):
220
- negative_prompt = gr.Label(
221
- label="Negative prompts",
222
- # max_lines=1,
223
- value=NEGATIVE,
224
- visible=True,
225
- )
226
-
227
- # seed = gr.Slider(
228
- # label="Seed",
229
- # minimum=0,
230
- # maximum=MAX_SEED,
231
- # step=1,
232
- # value=0,
233
- # )
234
-
235
- # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
236
-
237
- with gr.Row():
238
- width = gr.Label(
239
- label="Width",
240
- value=WIDTH,
241
- # minimum=256,
242
- # maximum=MAX_IMAGE_SIZE,
243
- # step=32,
244
- # value=1024, # Replace with defaults that work for your model
245
- )
246
-
247
- height = gr.Label(
248
- label="Height",
249
- value=HEIGHT,
250
- # minimum=256,
251
- # maximum=MAX_IMAGE_SIZE,
252
- # step=32,
253
- # value=1024, # Replace with defaults that work for your model
254
- )
255
-
256
- with gr.Row():
257
- guidance_scale = gr.Slider(
258
- label="Guidance scale",
259
- minimum=0.0,
260
- maximum=10.0,
261
- step=0.1,
262
- value=8.5, # Replace with defaults that work for your model
263
- )
264
-
265
- num_inference_steps = gr.Slider(
266
- label="Number of inference steps",
267
- minimum=1,
268
- maximum=50,
269
- step=1,
270
- value=25, # Replace with defaults that work for your model
271
- )
272
-
273
- # gr.Examples(examples=examples, inputs=[prompt])
274
- gr.on(
275
- triggers=[run_button.click, image_url.submit],
276
- fn=infer,
277
- inputs=[
278
- image_url,
279
- # negative_prompt,
280
- # seed,
281
- # randomize_seed,
282
- width,
283
- height,
284
- guidance_scale,
285
- num_inference_steps,
286
- ],
287
- outputs=[
288
- result,
289
- # seed,
290
  ],
 
 
 
 
291
  )
292
 
293
- if __name__ == "__main__":
294
- demo.launch()
 
1
+ import spaces
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ import os
4
  import requests
5
+ import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ import subprocess
8
+ subprocess.run("pip install git+https://github.com/inference-sh/Real-ESRGAN.git --no-deps", shell=True)
 
 
 
9
 
10
+ import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
14
+ from diffusers.models import AutoencoderKL
15
+ from diffusers.models.attention_processor import AttnProcessor2_0
16
 
17
+ from PIL import Image
18
+ import cv2
19
+ import numpy as np
20
 
21
+ from RealESRGAN import RealESRGAN
 
 
 
 
22
 
23
+ import gradio as gr
24
+ from gradio_imageslider import ImageSlider
25
+
26
+ from huggingface_hub import hf_hub_download
27
+
28
+ USE_TORCH_COMPILE = False
29
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
30
+
31
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
+
33
+ def download_models():
34
+ models = {
35
+ "MODEL": ("dantea1118/juggernaut_reborn", "juggernaut_reborn.safetensors", "models/models/Stable-diffusion"),
36
+ "UPSCALER_X2": ("ai-forever/Real-ESRGAN", "RealESRGAN_x2.pth", "models/upscalers/"),
37
+ "UPSCALER_X4": ("ai-forever/Real-ESRGAN", "RealESRGAN_x4.pth", "models/upscalers/"),
38
+ "NEGATIVE_1": ("philz1337x/embeddings", "verybadimagenegative_v1.3.pt", "models/embeddings"),
39
+ "NEGATIVE_2": ("philz1337x/embeddings", "JuggernautNegative-neg.pt", "models/embeddings"),
40
+ "LORA_1": ("philz1337x/loras", "SDXLrender_v2.0.safetensors", "models/Lora"),
41
+ "LORA_2": ("philz1337x/loras", "more_details.safetensors", "models/Lora"),
42
+ "CONTROLNET": ("lllyasviel/ControlNet-v1-1", "control_v11f1e_sd15_tile.pth", "models/ControlNet"),
43
+ "VAE": ("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors", "models/VAE"),
44
+ }
45
+
46
+ for model, (repo_id, filename, local_dir) in models.items():
47
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
48
+
49
+ download_models()
50
+
51
+ def timer_func(func):
52
+ def wrapper(*args, **kwargs):
53
+ start_time = time.time()
54
+ result = func(*args, **kwargs)
55
+ end_time = time.time()
56
+ print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
57
+ return result
58
+ return wrapper
59
+
60
+ class LazyLoadPipeline:
61
+ def __init__(self):
62
+ self.pipe = None
63
+
64
+ @timer_func
65
+ def load(self):
66
+ if self.pipe is None:
67
+ print("Starting to load the pipeline...")
68
+ self.pipe = self.setup_pipeline()
69
+ print(f"Moving pipeline to device: {device}")
70
+ self.pipe.to(device)
71
+ if USE_TORCH_COMPILE:
72
+ print("Compiling the model...")
73
+ self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
74
+
75
+ @timer_func
76
+ def setup_pipeline(self):
77
+ print("Setting up the pipeline...")
78
+ controlnet = ControlNetModel.from_single_file(
79
+ "models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
80
+ )
81
+ # safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
82
+ model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
83
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
84
+ model_path,
85
+ controlnet=controlnet,
86
+ torch_dtype=torch.float16,
87
+ use_safetensors=True,
88
+ # safety_checker=safety_checker
89
+ )
90
+ vae = AutoencoderKL.from_single_file(
91
+ "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
92
+ torch_dtype=torch.float16
93
+ )
94
+ pipe.vae = vae
95
+ pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
96
+ pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
97
+ pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
98
+ pipe.fuse_lora(lora_scale=0.5)
99
+ pipe.load_lora_weights("models/Lora/more_details.safetensors")
100
+ pipe.fuse_lora(lora_scale=1.)
101
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
102
+ pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
103
+ return pipe
104
+
105
+ def __call__(self, *args, **kwargs):
106
+ return self.pipe(*args, **kwargs)
107
+
108
+ class LazyRealESRGAN:
109
+ def __init__(self, device, scale):
110
+ self.device = device
111
+ self.scale = scale
112
+ self.model = None
113
+
114
+ def load_model(self):
115
+ if self.model is None:
116
+ self.model = RealESRGAN(self.device, scale=self.scale)
117
+ self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
118
+ def predict(self, img):
119
+ self.load_model()
120
+ return self.model.predict(img)
121
+
122
+ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
123
+ lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
124
+
125
+ @timer_func
126
+ def resize_and_upscale(input_image, resolution):
127
+ scale = 2 if resolution <= 2048 else 4
128
+ input_image = input_image.convert("RGB")
129
+ W, H = input_image.size
130
+ k = float(resolution) / min(H, W)
131
+ H = int(round(H * k / 64.0)) * 64
132
+ W = int(round(W * k / 64.0)) * 64
133
+ img = input_image.resize((W, H), resample=Image.LANCZOS)
134
+ if scale == 2:
135
+ img = lazy_realesrgan_x2.predict(img)
136
+ else:
137
+ img = lazy_realesrgan_x4.predict(img)
138
+ return img
139
+
140
+ @timer_func
141
+ def create_hdr_effect(original_image, hdr):
142
+ if hdr == 0:
143
+ return original_image
144
+ cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
145
+ factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
146
+ 1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
147
+ 1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
148
+ images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
149
+ merge_mertens = cv2.createMergeMertens()
150
+ hdr_image = merge_mertens.process(images)
151
+ hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
152
+ return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
153
+
154
+ lazy_pipe = LazyLoadPipeline()
155
+ lazy_pipe.load()
156
+
157
+ def prepare_image(input_image, resolution, hdr):
158
+ condition_image = resize_and_upscale(input_image, resolution)
159
+ condition_image = create_hdr_effect(condition_image, hdr)
160
+ return condition_image
161
+
162
+ @spaces.GPU
163
+ @timer_func
164
+ def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
165
+ print("Starting image processing...")
166
+ torch.cuda.empty_cache()
167
+
168
+ condition_image = prepare_image(input_image, resolution, hdr)
169
+
170
+ prompt = "masterpiece, best quality, highres"
171
+ negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
172
+
173
+ options = {
174
+ "prompt": prompt,
175
+ "negative_prompt": negative_prompt,
176
+ "image": condition_image,
177
+ "control_image": condition_image,
178
+ "width": condition_image.size[0],
179
+ "height": condition_image.size[1],
180
+ "strength": strength,
181
+ "num_inference_steps": num_inference_steps,
182
+ "guidance_scale": guidance_scale,
183
+ "generator": torch.Generator(device=device).manual_seed(0),
184
+ }
185
+
186
+ print("Running inference...")
187
+ result = lazy_pipe(**options).images[0]
188
+ print("Image processing completed successfully")
189
+
190
+ # Convert input_image and result to numpy arrays
191
+ input_array = np.array(input_image)
192
+ result_array = np.array(result)
193
+
194
+ return [input_array, result_array]
195
+
196
+ title = """<h1 align="center">Image Upscaler with Tile Controlnet</h1>
197
+ <p align="center">The main ideas come from</p>
198
+ <p><center>
199
+ <a href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[philz1337x]</a>
200
+ <a href="https://github.com/BatouResearch/controlnet-tile-upscale" target="_blank">[Pau-Lozano]</a>
201
+ </center></p>
202
  """
203
 
204
+ with gr.Blocks() as demo:
205
+ gr.HTML(title)
206
+ with gr.Row():
207
+ with gr.Column():
208
+ input_image = gr.Image(type="pil", label="Input Image")
209
+ run_button = gr.Button("Enhance Image")
210
+ with gr.Column():
211
+ output_slider = ImageSlider(label="Before / After", type="numpy")
212
+ with gr.Accordion("Advanced Options", open=False):
213
+ resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
214
+ num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
215
+ strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
216
+ hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
217
+ guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
218
+
219
+ run_button.click(fn=gradio_process_image,
220
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
221
+ outputs=output_slider)
222
+
223
+ # Add examples with all required inputs
224
+ gr.Examples(
225
+ examples=[
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  ],
227
+ inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
228
+ outputs=output_slider,
229
+ fn=gradio_process_image,
230
+ cache_examples=True,
231
  )
232
 
233
+ demo.launch(share=True)
 
requirements.txt CHANGED
@@ -1,13 +1,15 @@
1
- accelerate
 
2
  diffusers
3
- invisible_watermark
4
- torch
5
- transformers
6
- # xformers
7
- fastapi
8
- uvicorn
9
- pydantic
10
- requests
11
- Pillow
12
- controlnet-aux
13
- gradio
 
 
1
+ opencv-python
2
+ spaces
3
  diffusers
4
+ torch==2.4.0
5
+ torchvision
6
+ pipeline
7
+ transformers<=4.49.0
8
+ accelerate
9
+ safetensors
10
+ spaces
11
+ peft
12
+ gradio
13
+ pillow
14
+ gradio-imageslider
15
+ pydantic==2.10.6