Junaid423 commited on
Commit
2afb799
1 Parent(s): a834328

created app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -0
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ from diffusers import DiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DDPMScheduler
5
+ from transformers import CLIPVisionModelWithProjection, CLIPFeatureExtractor
6
+ from diffusers.utils import load_image
7
+ from pipeline_zero1to3_stable import Zero1to3StableDiffusionPipeline, CCProjection
8
+ import math
9
+ import imageio
10
+ import gradio as gr
11
+ from PIL import Image
12
+ import cv2
13
+ # Define the background removal function
14
+
15
+ def preprocess_image(input_im):
16
+ '''
17
+ :param input_im (PIL Image).
18
+ :return input_im (H, W, 3) array in [0, 1].
19
+ '''
20
+
21
+ input_im = input_im.convert('RGB')
22
+ print("shape1 = ",input_im.size)
23
+ input_im = input_im.resize([256, 256], Image.Resampling.LANCZOS)
24
+ input_im = np.asarray(input_im, dtype=np.float32) / 255.0
25
+ # input_im[input_im[:, :, -1] <= 0.9] = [1., 1., 1.]
26
+ return input_im
27
+
28
+
29
+ return input_im
30
+
31
+ # Load model and set paths
32
+ model_id = "mirza152/zero123-face"
33
+ cc_projection = CCProjection.from_pretrained(model_id, subfolder="cc_projection", use_safetensors=True)
34
+ unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet", use_safetensors=True)
35
+ feature_extractor = CLIPFeatureExtractor.from_pretrained(model_id, subfolder="feature_extractor", use_safetensors=True)
36
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(model_id, subfolder="image_encoder")
37
+ scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler", use_safetensors=True)
38
+ vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", use_safetensors=True)
39
+
40
+ # Instantiate pipeline
41
+ pipe = Zero1to3StableDiffusionPipeline(
42
+ unet=unet,
43
+ cc_projection=cc_projection,
44
+ vae=vae,
45
+ scheduler=scheduler,
46
+ feature_extractor=feature_extractor,
47
+ image_encoder=image_encoder,
48
+ safety_checker=None,
49
+ )
50
+ pipe.enable_vae_tiling()
51
+ pipe.enable_attention_slicing()
52
+
53
+ # Define the function to process and generate GIFs
54
+ def process_image(input_image):
55
+
56
+ input_image = preprocess_image(input_image)
57
+ H, W = input_image.shape[:2]
58
+ input_image = Image.fromarray((input_image * 255.0).astype(np.uint8))
59
+ total_frames = 8
60
+ input_images = [input_image]*total_frames
61
+ pitch_range, yaw_range = 0.20, 0.20
62
+ avg_polar, avg_azimuth = 1.52, 1.57
63
+ all_poses = []
64
+
65
+ # Generate poses for GIF frames
66
+ for frame_idx in range(total_frames):
67
+ theta_target = 3.14 / 2 + yaw_range * np.sin(2 * 3.14 * frame_idx / total_frames)
68
+ polar = avg_polar - theta_target
69
+ azimuth_cond = 3.14 / 2 - 0.05 + pitch_range * np.cos(2 * 3.14 * frame_idx / total_frames)
70
+ azimuth = avg_azimuth - azimuth_cond
71
+ query_pose = torch.tensor([(1.5708 - theta_target) - (1.5708 - avg_polar), math.sin(azimuth), math.cos(azimuth), 1.5708 - avg_azimuth])
72
+ all_poses.append(query_pose)
73
+
74
+ query_poses = torch.stack(all_poses)
75
+ images = pipe(input_imgs=input_images, prompt_imgs=input_images, poses=query_poses, height=H, width=W, guidance_scale=4, num_images_per_prompt=1, num_inference_steps=1).images
76
+
77
+ # Save images to GIF
78
+ gif_path = "output.gif"
79
+ images[0].save(gif_path, save_all=True, append_images=images[1:], duration=100, loop=0)
80
+ return gif_path
81
+
82
+ # Create Gradio Interface
83
+ iface = gr.Interface(
84
+ fn=process_image,
85
+ inputs=gr.Image(type="pil", label="Input Image"),
86
+ outputs=gr.Image(type="filepath", label="Output GIF"),
87
+ title="Image to GIF Pipeline",
88
+ description="Upload an image to generate a GIF.",
89
+ allow_flagging="never",
90
+ )
91
+ iface.launch()