Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -252,42 +252,43 @@ def regenerate(state, image_process_mode):
|
|
252 |
@spaces.GPU
|
253 |
def get_interm_outs(state):
|
254 |
prompt = state.get_prompt()
|
|
|
255 |
images = state.get_images(return_pil=True)
|
256 |
#prompt, image_args = process_image(prompt, images)
|
257 |
|
258 |
-
if images is not None and len(images) > 0:
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
else:
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
280 |
-
|
281 |
-
interm_outs = model.get_visual_interpretations(
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
|
286 |
-
depth_outs = get_depth_images(interm_outs, image_sizes[0])
|
287 |
-
seg_outs = get_seg_images(interm_outs, images[0])
|
288 |
-
gen_outs = get_gen_images(interm_outs)
|
289 |
|
290 |
-
return
|
291 |
|
292 |
|
293 |
@spaces.GPU
|
|
|
252 |
@spaces.GPU
|
253 |
def get_interm_outs(state):
|
254 |
prompt = state.get_prompt()
|
255 |
+
print(prompt)
|
256 |
images = state.get_images(return_pil=True)
|
257 |
#prompt, image_args = process_image(prompt, images)
|
258 |
|
259 |
+
# if images is not None and len(images) > 0:
|
260 |
+
# if len(images) > 0:
|
261 |
+
# if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
|
262 |
+
# raise ValueError("Number of images does not match number of <image> tokens in prompt")
|
263 |
|
264 |
+
# #images = [load_image_from_base64(image) for image in images]
|
265 |
+
# image_sizes = [image.size for image in images]
|
266 |
+
# inp_images = process_images(images, image_processor, model.config)
|
267 |
+
|
268 |
+
# if type(inp_images) is list:
|
269 |
+
# inp_images = [image.to(model.device, dtype=torch.float16) for image in images]
|
270 |
+
# else:
|
271 |
+
# inp_images = inp_images.to(model.device, dtype=torch.float16)
|
272 |
+
# else:
|
273 |
+
# inp_images = None
|
274 |
+
# image_sizes = None
|
275 |
+
# image_args = {"images": inp_images, "image_sizes": image_sizes}
|
276 |
+
# else:
|
277 |
+
# inp_images = None
|
278 |
+
# image_args = {}
|
279 |
+
|
280 |
+
# input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
281 |
+
|
282 |
+
# interm_outs = model.get_visual_interpretations(
|
283 |
+
# input_ids,
|
284 |
+
# **image_args
|
285 |
+
# )
|
286 |
|
287 |
+
# depth_outs = get_depth_images(interm_outs, image_sizes[0])
|
288 |
+
# seg_outs = get_seg_images(interm_outs, images[0])
|
289 |
+
# gen_outs = get_gen_images(interm_outs)
|
290 |
|
291 |
+
return images[0], images[0], images[0]
|
292 |
|
293 |
|
294 |
@spaces.GPU
|