seawolf2357 commited on
Commit
a18e824
·
verified ·
1 Parent(s): 4e071c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -38,11 +38,12 @@ def modify_caption(caption: str) -> str:
38
 
39
  def create_captions_rich(image: Image.Image) -> str:
40
  prompt = "caption en"
41
- image_tensor = processor(image, return_tensors="pt").pixel_values.to("cpu")
 
42
  model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu")
43
  input_len = model_inputs["input_ids"].shape[-1]
44
 
45
- with torch.inference_mode():
46
  generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
47
  generation = generation[0][input_len:]
48
  decoded = processor.decode(generation, skip_special_tokens=True)
@@ -92,7 +93,7 @@ async def process_image(image_url, message):
92
 
93
  async def download_image(url):
94
  response = requests.get(url)
95
- image = Image.open(io.BytesIO(response.content))
96
  return image
97
 
98
  if __name__ == "__main__":
 
38
 
39
  def create_captions_rich(image: Image.Image) -> str:
40
  prompt = "caption en"
41
+ # 이미지 데이터를 전처리하여 processor 전달
42
+ image_tensor = processor(images=image, return_tensors="pt").pixel_values.to("cpu")
43
  model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu")
44
  input_len = model_inputs["input_ids"].shape[-1]
45
 
46
+ with torch.no_grad():
47
  generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
48
  generation = generation[0][input_len:]
49
  decoded = processor.decode(generation, skip_special_tokens=True)
 
93
 
94
  async def download_image(url):
95
  response = requests.get(url)
96
+ image = Image.open(io.BytesIO(response.content)).convert("RGB") # 이미지 변환
97
  return image
98
 
99
  if __name__ == "__main__":