Spaces:
Running
on
Zero
Running
on
Zero
File size: 6,039 Bytes
d6bc023 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import argparse
import time
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from tinychart.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from tinychart.conversation import conv_templates, SeparatorStyle
from tinychart.model.builder import load_pretrained_model
from tinychart.utils import disable_torch_init
from tinychart.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, KeywordsStoppingCriteria
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import math
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
class EvalDataset(Dataset):
def __init__(self, data_items, image_folder, tokenizer, image_processor, model_config):
self.data_items = data_items
self.image_folder = image_folder
self.tokenizer = tokenizer
self.image_processor = image_processor
self.model_config = model_config
def __getitem__(self, index):
line = self.data_items[index]
image_file = line["image"]
qs = line["conversations"][0]["value"]
# if self.model_config.mm_use_im_start_end:
# qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
# else:
# qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
image_tensor = process_images([image], self.image_processor, self.model_config)[0]
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
return input_ids, image_tensor, image.size
def __len__(self):
return len(self.data_items)
def collate_fn(batch):
input_ids, image_tensors, image_sizes = zip(*batch)
input_ids = torch.stack(input_ids, dim=0)
image_tensors = torch.stack(image_tensors, dim=0)
return input_ids, image_tensors, image_sizes
# DataLoader
def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = EvalDataset(questions, image_folder, tokenizer, image_processor, model_config)
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, collate_fn=collate_fn)
return data_loader
def eval_model(args):
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
all_data = json.load(open(args.data_path, "r"))
all_data = get_chunk(all_data, args.num_chunks, args.chunk_idx)
answers_file = os.path.expanduser(args.output_path)
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
ans_file = open(answers_file, "w")
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
args.conv_mode = args.conv_mode + '_mmtag'
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
data_loader = create_data_loader(all_data, args.image_folder, tokenizer, image_processor, model.config)
for (input_ids, image_tensor, image_sizes), line in tqdm(zip(data_loader, all_data), total=len(all_data)):
idx = line["id"]
cur_prompt = line["conversations"][0]["value"]
input_ids = input_ids.to(device='cuda', non_blocking=True)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True),
pad_token_id=tokenizer.pad_token_id,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
top_p=args.top_p,
num_beams=args.num_beams,
max_new_tokens=args.max_new_tokens,
min_new_tokens=args.min_new_tokens,
use_cache=True)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
ans_id = shortuuid.uuid()
ans_file.write(json.dumps({"id": idx,
"question": cur_prompt,
"gt_answer": line["conversations"][1]["value"],
"model_answer": outputs}) + "\n")
ans_file.flush()
ans_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default="facebook/opt-350m")
parser.add_argument("--model_base", type=str, default=None)
parser.add_argument("--image_folder", type=str, default="")
parser.add_argument("--data_path", type=str, default="./data/test_chartqa+cot_shuffle.json")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--conv_mode", type=str, default="phi")
parser.add_argument("--num_chunks", type=int, default=1)
parser.add_argument("--chunk_idx", type=int, default=0)
parser.add_argument("--temperature", type=float, default=0.0)
parser.add_argument("--top_p", type=float, default=None)
parser.add_argument("--num_beams", type=int, default=1)
parser.add_argument("--max_new_tokens", type=int, default=1024)
parser.add_argument("--min_new_tokens", type=int, default=0)
args = parser.parse_args()
eval_model(args)
|