Yash911 commited on
Commit
7a729e8
1 Parent(s): 438cad1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -160
app.py CHANGED
@@ -67,170 +67,20 @@ cloudinary.config(
67
  #
68
 
69
 
 
 
 
70
 
71
- #@title Computer ko aang lagani ho to hi show code click karke ched chad karna
72
-
73
- #!pip install git+https://github.com/openai/glide-text2im
74
-
75
- from PIL import Image
76
- from IPython.display import display
77
- import torch as th
78
-
79
- from glide_text2im.download import load_checkpoint
80
- from glide_text2im.model_creation import (
81
- create_model_and_diffusion,
82
- model_and_diffusion_defaults,
83
- model_and_diffusion_defaults_upsampler
84
- )
85
-
86
- # This notebook supports both CPU and GPU.
87
- # On CPU, generating one sample may take on the order of 20 minutes.
88
- # On a GPU, it should be under a minute.
89
-
90
- has_cuda = th.cuda.is_available()
91
- device = th.device('cpu' if not has_cuda else 'cuda')
92
-
93
- # Create base model.
94
- options = model_and_diffusion_defaults()
95
- options['use_fp16'] = has_cuda
96
- options['timestep_respacing'] = '100' # use 100 diffusion steps for fast sampling
97
- model, diffusion = create_model_and_diffusion(**options)
98
- model.eval()
99
- if has_cuda:
100
- model.convert_to_fp16()
101
- model.to(device)
102
- model.load_state_dict(load_checkpoint('base', device))
103
- print('total base parameters', sum(x.numel() for x in model.parameters()))
104
-
105
- # Create upsampler model.
106
- options_up = model_and_diffusion_defaults_upsampler()
107
- options_up['use_fp16'] = has_cuda
108
- options_up['timestep_respacing'] = 'fast27' # use 27 diffusion steps for very fast sampling
109
- model_up, diffusion_up = create_model_and_diffusion(**options_up)
110
- model_up.eval()
111
- if has_cuda:
112
- model_up.convert_to_fp16()
113
- model_up.to(device)
114
- model_up.load_state_dict(load_checkpoint('upsample', device))
115
- print('total upsampler parameters', sum(x.numel() for x in model_up.parameters()))
116
-
117
- def show_images(batch: th.Tensor):
118
- """ Display a batch of images inline. """
119
- scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu()
120
- reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3])
121
- display(Image.fromarray(reshaped.numpy()))
122
 
123
  def query_model_with_image(image_description):
124
- # Sampling parameters
125
- # image_description = "dog in the field" #@param {type:"string"}
126
- # image_description = ""
127
- batch_size = 1 #@param {type:"integer"}
128
- guidance_scale = 8.0
129
-
130
- # Tune this parameter to control the sharpness of 256x256 images.
131
- # A value of 1.0 is sharper, but sometimes results in grainy artifacts.
132
- upsample_temp = 0.997
133
-
134
- ##############################
135
- # Sample from the base model #
136
- ##############################
137
-
138
- # Create the text tokens to feed to the model.
139
- tokens = model.tokenizer.encode(image_description)
140
- tokens, mask = model.tokenizer.padded_tokens_and_mask(
141
- tokens, options['text_ctx']
142
- )
143
-
144
- # Create the classifier-free guidance tokens (empty)
145
- full_batch_size = batch_size * 2
146
- uncond_tokens, uncond_mask = model.tokenizer.padded_tokens_and_mask(
147
- [], options['text_ctx']
148
- )
149
-
150
- # Pack the tokens together into model kwargs.
151
- model_kwargs = dict(
152
- tokens=th.tensor(
153
- [tokens] * batch_size + [uncond_tokens] * batch_size, device=device
154
- ),
155
- mask=th.tensor(
156
- [mask] * batch_size + [uncond_mask] * batch_size,
157
- dtype=th.bool,
158
- device=device,
159
- ),
160
- )
161
-
162
- # Create a classifier-free guidance sampling function
163
- def model_fn(x_t, ts, **kwargs):
164
- half = x_t[: len(x_t) // 2]
165
- combined = th.cat([half, half], dim=0)
166
- model_out = model(combined, ts, **kwargs)
167
- eps, rest = model_out[:, :3], model_out[:, 3:]
168
- cond_eps, uncond_eps = th.split(eps, len(eps) // 2, dim=0)
169
- half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
170
- eps = th.cat([half_eps, half_eps], dim=0)
171
- return th.cat([eps, rest], dim=1)
172
-
173
- # Sample from the base model.
174
- model.del_cache()
175
- samples = diffusion.p_sample_loop(
176
- model_fn,
177
- (full_batch_size, 3, options["image_size"], options["image_size"]),
178
- device=device,
179
- clip_denoised=True,
180
- progress=True,
181
- model_kwargs=model_kwargs,
182
- cond_fn=None,
183
- )[:batch_size]
184
- model.del_cache()
185
-
186
- # Show the output
187
- show_images(samples)
188
-
189
-
190
- ##############################
191
- # Upsample the 64x64 samples #
192
- ##############################
193
-
194
- tokens = model_up.tokenizer.encode(image_description)
195
- tokens, mask = model_up.tokenizer.padded_tokens_and_mask(
196
- tokens, options_up['text_ctx']
197
- )
198
-
199
- # Create the model conditioning dict.
200
- model_kwargs = dict(
201
- # Low-res image to upsample.
202
- low_res=((samples+1)*127.5).round()/127.5 - 1,
203
-
204
- # Text tokens
205
- tokens=th.tensor(
206
- [tokens] * batch_size, device=device
207
- ),
208
- mask=th.tensor(
209
- [mask] * batch_size,
210
- dtype=th.bool,
211
- device=device,
212
- ),
213
- )
214
-
215
- # Sample from the base model.
216
- model_up.del_cache()
217
- up_shape = (batch_size, 3, options_up["image_size"], options_up["image_size"])
218
- image = diffusion_up.ddim_sample_loop(
219
- model_up,
220
- up_shape,
221
- noise=th.randn(up_shape, device=device) * upsample_temp,
222
- device=device,
223
- clip_denoised=True,
224
- progress=True,
225
- model_kwargs=model_kwargs,
226
- cond_fn=None,
227
- )[:batch_size]
228
- model_up.del_cache()
229
-
230
- # Show the output
231
- show_images(image)
232
- return image
233
 
 
 
234
 
235
  def upload_to_cloudinary(image, prompt_text):
236
  image_data = io.BytesIO()
 
67
  #
68
 
69
 
70
+ # Set up Hugging Face API endpoint
71
+ API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
72
+ headers = {"Authorization": "Bearer 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"}
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  def query_model_with_image(image_description):
76
+ payload = {
77
+ "inputs": image_description
78
+ }
79
+ response = requests.post(API_URL, headers=headers, json=payload)
80
+ image_bytes = response.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ image = Image.open(io.BytesIO(image_bytes))
83
+ return image
84
 
85
  def upload_to_cloudinary(image, prompt_text):
86
  image_data = io.BytesIO()