Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import random
|
|
| 6 |
import spaces
|
| 7 |
import gradio as gr
|
| 8 |
import torch
|
| 9 |
-
from PIL import Image
|
| 10 |
from diffusers import StableDiffusionInstructPix2PixPipeline
|
| 11 |
from huggingface_hub import InferenceClient
|
| 12 |
|
|
@@ -45,31 +45,6 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
| 45 |
seed = random.randint(0, 999999)
|
| 46 |
return seed
|
| 47 |
|
| 48 |
-
def resize_image(image, output_size=(512, 512)):
|
| 49 |
-
# Calculate aspect ratios
|
| 50 |
-
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
| 51 |
-
image_aspect = image.width / image.height # Aspect ratio of the original image
|
| 52 |
-
|
| 53 |
-
# Resize then crop if the original image is larger
|
| 54 |
-
if image_aspect > target_aspect:
|
| 55 |
-
new_height = output_size[1]
|
| 56 |
-
new_width = int(new_height * image_aspect)
|
| 57 |
-
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
| 58 |
-
left = (new_width - output_size[0]) / 2
|
| 59 |
-
top = 0
|
| 60 |
-
right = (new_width + output_size[0]) / 2
|
| 61 |
-
bottom = output_size[1]
|
| 62 |
-
else:
|
| 63 |
-
new_width = output_size[0]
|
| 64 |
-
new_height = int(new_width / image_aspect)
|
| 65 |
-
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
| 66 |
-
left = 0
|
| 67 |
-
top = (new_height - output_size[1]) / 2
|
| 68 |
-
right = output_size[0]
|
| 69 |
-
bottom = (new_height + output_size[1]) / 2
|
| 70 |
-
cropped_image = resized_image.crop((left, top, right, bottom))
|
| 71 |
-
return cropped_image
|
| 72 |
-
|
| 73 |
pipe2 = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda")
|
| 74 |
|
| 75 |
@spaces.GPU(duration=30, queue=False)
|
|
|
|
| 6 |
import spaces
|
| 7 |
import gradio as gr
|
| 8 |
import torch
|
| 9 |
+
from PIL import Image
|
| 10 |
from diffusers import StableDiffusionInstructPix2PixPipeline
|
| 11 |
from huggingface_hub import InferenceClient
|
| 12 |
|
|
|
|
| 45 |
seed = random.randint(0, 999999)
|
| 46 |
return seed
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
pipe2 = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda")
|
| 49 |
|
| 50 |
@spaces.GPU(duration=30, queue=False)
|