praeclarumjj3 commited on
Commit
15f569c
·
verified ·
1 Parent(s): 15341f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -30
app.py CHANGED
@@ -252,42 +252,43 @@ def regenerate(state, image_process_mode):
252
  @spaces.GPU
253
  def get_interm_outs(state):
254
  prompt = state.get_prompt()
 
255
  images = state.get_images(return_pil=True)
256
  #prompt, image_args = process_image(prompt, images)
257
 
258
- if images is not None and len(images) > 0:
259
- if len(images) > 0:
260
- if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
261
- raise ValueError("Number of images does not match number of <image> tokens in prompt")
262
 
263
- #images = [load_image_from_base64(image) for image in images]
264
- image_sizes = [image.size for image in images]
265
- inp_images = process_images(images, image_processor, model.config)
266
-
267
- if type(inp_images) is list:
268
- inp_images = [image.to(model.device, dtype=torch.float16) for image in images]
269
- else:
270
- inp_images = inp_images.to(model.device, dtype=torch.float16)
271
- else:
272
- inp_images = None
273
- image_sizes = None
274
- image_args = {"images": inp_images, "image_sizes": image_sizes}
275
- else:
276
- inp_images = None
277
- image_args = {}
278
-
279
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
280
-
281
- interm_outs = model.get_visual_interpretations(
282
- input_ids,
283
- **image_args
284
- )
285
 
286
- depth_outs = get_depth_images(interm_outs, image_sizes[0])
287
- seg_outs = get_seg_images(interm_outs, images[0])
288
- gen_outs = get_gen_images(interm_outs)
289
 
290
- return depth_outs, seg_outs, gen_outs
291
 
292
 
293
  @spaces.GPU
 
252
  @spaces.GPU
253
  def get_interm_outs(state):
254
  prompt = state.get_prompt()
255
+ print(prompt)
256
  images = state.get_images(return_pil=True)
257
  #prompt, image_args = process_image(prompt, images)
258
 
259
+ # if images is not None and len(images) > 0:
260
+ # if len(images) > 0:
261
+ # if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
262
+ # raise ValueError("Number of images does not match number of <image> tokens in prompt")
263
 
264
+ # #images = [load_image_from_base64(image) for image in images]
265
+ # image_sizes = [image.size for image in images]
266
+ # inp_images = process_images(images, image_processor, model.config)
267
+
268
+ # if type(inp_images) is list:
269
+ # inp_images = [image.to(model.device, dtype=torch.float16) for image in images]
270
+ # else:
271
+ # inp_images = inp_images.to(model.device, dtype=torch.float16)
272
+ # else:
273
+ # inp_images = None
274
+ # image_sizes = None
275
+ # image_args = {"images": inp_images, "image_sizes": image_sizes}
276
+ # else:
277
+ # inp_images = None
278
+ # image_args = {}
279
+
280
+ # input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
281
+
282
+ # interm_outs = model.get_visual_interpretations(
283
+ # input_ids,
284
+ # **image_args
285
+ # )
286
 
287
+ # depth_outs = get_depth_images(interm_outs, image_sizes[0])
288
+ # seg_outs = get_seg_images(interm_outs, images[0])
289
+ # gen_outs = get_gen_images(interm_outs)
290
 
291
+ return images[0], images[0], images[0]
292
 
293
 
294
  @spaces.GPU