OpenMM_Medical / baichuan.py
lin5547's picture
Upload folder using huggingface_hub
1fc662a verified
import os
from .base import BaseModel
from ..smp import *
from ..dataset import DATASET_TYPE
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import torch
import json
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def load_model_tokenizer(checkpoint_path):
tokenizer = AutoTokenizer.from_pretrained(
checkpoint_path, trust_remote_code=True,
)
device_map = 'auto'
model = AutoModelForCausalLM.from_pretrained(
checkpoint_path,
device_map=device_map,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
)
return model, tokenizer
class Baichuan(BaseModel):
INSTALL_REQ = False
INTERLEAVE = False
def __init__(self, sft=True, model_path=None):
assert model_path is not None
self.device = "cuda"
self.model_path = model_path
self.model, self.tokenizer = load_model_tokenizer(model_path)
self.model.bind_processor(self.tokenizer, training=False)
torch.cuda.empty_cache()
self.use_reserve_qa_prompt = sft
self.reserve_qa_start_prompt = "<C_Q>"
self.reserve_qa_end_prompt = "<C_A>"
self.task_prompt=""
self.options_system_prompt = ('Carefully read the following question and select the letter corresponding '
'to the correct answer. Highlight the applicable choices without giving '
'explanations. ')
self.wo_options_system_prompt = 'Carefully read the following question Answer the question directly. '
self.detail_system_prompt = 'Answer this question in detail and step by step. '
self.vqa_prompt = 'Answer the question using a single word or phrase. '
def generate_inner(self, message, dataset=None):
image_str, question = '', ''
for s in message:
if s['type'] == 'image':
if len(s["value"].split(".")[-1]) > 2:
image_dict = {"local": s["value"]}
else:
image_dict = {"base64": s["value"]}
image_str += f"<img_start_baichuan>{json.dumps(image_dict)}<img_end_baichuan>\n"
elif s['type'] == 'text':
question += s['value']
# sft version: <C_Q>...<C_A>
if self.use_reserve_qa_prompt:
prompt = "{}{}{}{}{}".format(self.reserve_qa_start_prompt, image_str, question, self.task_prompt, self.reserve_qa_end_prompt)
else:
prompt = "{}{}{}".format(image_str, question, self.task_prompt)
print("****************************** prompt ******************************")
print(prompt)
print("********************************************************************")
with torch.inference_mode():
ret = self.model.processor(prompt)
input_ids = ret.input_ids
try:
ret = self.model.generate(
inputs=torch.LongTensor([input_ids]).cuda(),
images=[torch.tensor(img, dtype=torch.float32).cuda() for img in images] if ret.images is not None else None,
patch_nums=ret.patch_nums,
images_grid=ret.images_grid,
max_new_tokens=1024, do_sample=False, top_k=5, top_p=0.85, temperature=0,
num_return_sequences=1, repetition_penalty=1.05,
use_cache=False
)
ret = self.tokenizer.batch_decode(ret[:, torch.LongTensor([input_ids]).to(self.device).shape[1]:], skip_special_tokens=True)[0].strip()
except Exception as e:
print(e)
ret = ""
response = ret
print("=========================================== response ===========================================")
print(f"\033[32m{response}\033[0m")
print("================================================================================================")
return response
def use_custom_prompt(self, dataset):
if dataset is not None and listinstr(['M3GIA'], dataset):
return False
if listinstr(['MCQ', 'VQA'], DATASET_TYPE(dataset)):
return True
elif dataset is not None and listinstr(['HallusionBench'], dataset):
return True
return False
def build_prompt(self, line, dataset=None):
if isinstance(line, int):
line = self.data.iloc[line]
tgt_path = self.dump_image(line, dataset)
system_prompt = ''
question = line['question']
if DATASET_TYPE(dataset) == 'MCQ':
options = {
cand: line[cand]
for cand in string.ascii_uppercase
if cand in line and not pd.isna(line[cand])
}
options_prompt = 'Options:\n'
for key, item in options.items():
options_prompt += f'{key}. {item}\n'
hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
prompt = ''
if hint is not None:
prompt += f'Hint: {hint}\n'
prompt += f'Question: {question}\n'
if len(options):
prompt += options_prompt
if 'MMBench' in dataset:
prompt += 'Please select the correct answer from the options above. \n'
else:
system_prompt = self.options_system_prompt + '\nPlease just indicate your choice.'
else:
system_prompt = self.wo_options_system_prompt
if 'MMMU' in dataset: # Corner Case
prompt = system_prompt + '\n' + prompt
system_prompt = ''
elif dataset is not None and listinstr(['HallusionBench'], dataset):
question = line['question'] + ' Yes or No?'
prompt = question
elif dataset is not None and listinstr(['MME'], dataset):
question = line['question'] + ' Yes or No?'
prompt = question
elif dataset is not None and listinstr(['OCRBench'], dataset):
system_prompt = self.vqa_prompt
question = line['question']
prompt = question
elif DATASET_TYPE(dataset) == 'VQA':
if listinstr(['LLaVABench', 'MMLongBench_DOC'], dataset):
system_prompt = ''
prompt = question
elif listinstr(['MMVet'], dataset):
system_prompt = self.detail_system_prompt
prompt = question
elif listinstr(['ChartQA'], dataset):
system_prompt = 'Please answer the question using a single word. '
prompt = question
else:
system_prompt = self.vqa_prompt
prompt = question
msgs = []
if system_prompt:
msgs.append(dict(type='text', value=system_prompt))
if isinstance(tgt_path, list):
msgs.extend([dict(type='image', value=p) for p in tgt_path])
else:
msgs = [dict(type='image', value=tgt_path)]
msgs.append(dict(type='text', value=prompt))
return msgs