File size: 7,234 Bytes
1fc662a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import os

from .base import BaseModel
from ..smp import *
from ..dataset import DATASET_TYPE
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import torch
import json

os.environ["TOKENIZERS_PARALLELISM"] = "false"


def load_model_tokenizer(checkpoint_path):
    tokenizer = AutoTokenizer.from_pretrained(
        checkpoint_path, trust_remote_code=True,
    )
    device_map = 'auto'
    model = AutoModelForCausalLM.from_pretrained(
        checkpoint_path,
        device_map=device_map,
        trust_remote_code=True,
        torch_dtype=torch.bfloat16,
    )
    return model, tokenizer


class Baichuan(BaseModel):
    INSTALL_REQ = False
    INTERLEAVE = False

    def __init__(self, sft=True, model_path=None):
        assert model_path is not None
        self.device = "cuda"
        self.model_path = model_path

        self.model, self.tokenizer = load_model_tokenizer(model_path)
        self.model.bind_processor(self.tokenizer, training=False)

        torch.cuda.empty_cache()

        self.use_reserve_qa_prompt = sft
        self.reserve_qa_start_prompt = "<C_Q>"
        self.reserve_qa_end_prompt = "<C_A>"

        self.task_prompt=""
        self.options_system_prompt = ('Carefully read the following question and select the letter corresponding '
                                      'to the correct answer. Highlight the applicable choices without giving '
                                      'explanations. ')
        self.wo_options_system_prompt = 'Carefully read the following question Answer the question directly. '
        self.detail_system_prompt = 'Answer this question in detail and step by step. '
        self.vqa_prompt = 'Answer the question using a single word or phrase. '


    def generate_inner(self, message, dataset=None):
        image_str, question = '', ''
        for s in message:
            if s['type'] == 'image':
                if len(s["value"].split(".")[-1]) > 2:
                    image_dict = {"local": s["value"]}
                else:
                    image_dict = {"base64": s["value"]}
                image_str += f"<img_start_baichuan>{json.dumps(image_dict)}<img_end_baichuan>\n"
            elif s['type'] == 'text':
                question += s['value']

        # sft version: <C_Q>...<C_A>
        if self.use_reserve_qa_prompt:
            prompt = "{}{}{}{}{}".format(self.reserve_qa_start_prompt, image_str, question, self.task_prompt, self.reserve_qa_end_prompt)
        else:
            prompt = "{}{}{}".format(image_str, question, self.task_prompt)
            
        print("****************************** prompt ******************************")
        print(prompt)
        print("********************************************************************")

        with torch.inference_mode():
            ret = self.model.processor(prompt)
            input_ids = ret.input_ids
            try:
                ret = self.model.generate(
                    inputs=torch.LongTensor([input_ids]).cuda(),
                    images=[torch.tensor(img, dtype=torch.float32).cuda() for img in images] if ret.images is not None else None,
                    patch_nums=ret.patch_nums,
                    images_grid=ret.images_grid,
                    max_new_tokens=1024, do_sample=False, top_k=5, top_p=0.85, temperature=0,
                    num_return_sequences=1, repetition_penalty=1.05,
                    use_cache=False
                )
                ret = self.tokenizer.batch_decode(ret[:, torch.LongTensor([input_ids]).to(self.device).shape[1]:], skip_special_tokens=True)[0].strip()
            except Exception as e:
                print(e)
                ret = ""
            
        response = ret

        print("=========================================== response ===========================================")
        print(f"\033[32m{response}\033[0m")
        print("================================================================================================")
        return response


    def use_custom_prompt(self, dataset):
        if dataset is not None and listinstr(['M3GIA'], dataset):
            return False
        if listinstr(['MCQ', 'VQA'], DATASET_TYPE(dataset)):
            return True
        elif dataset is not None and listinstr(['HallusionBench'], dataset):
            return True
        return False


    def build_prompt(self, line, dataset=None):
        if isinstance(line, int):
            line = self.data.iloc[line]

        tgt_path = self.dump_image(line, dataset)
        system_prompt = ''

        question = line['question']
        if DATASET_TYPE(dataset) == 'MCQ':
            options = {
                cand: line[cand]
                for cand in string.ascii_uppercase
                if cand in line and not pd.isna(line[cand])
            }
            options_prompt = 'Options:\n'
            for key, item in options.items():
                options_prompt += f'{key}. {item}\n'
            hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
            prompt = ''
            if hint is not None:
                prompt += f'Hint: {hint}\n'
            prompt += f'Question: {question}\n'
            if len(options):
                prompt += options_prompt
                if 'MMBench' in dataset:
                    prompt += 'Please select the correct answer from the options above. \n'
                else:
                    system_prompt = self.options_system_prompt + '\nPlease just indicate your choice.'
            else:
                system_prompt = self.wo_options_system_prompt
            if 'MMMU' in dataset:  # Corner Case
                prompt = system_prompt + '\n' + prompt
                system_prompt = ''
        elif dataset is not None and listinstr(['HallusionBench'], dataset):
            question = line['question'] + ' Yes or No?'
            prompt = question
        elif dataset is not None and listinstr(['MME'], dataset):
            question = line['question'] + ' Yes or No?'
            prompt = question
        elif dataset is not None and listinstr(['OCRBench'], dataset):
            system_prompt = self.vqa_prompt
            question = line['question']
            prompt = question
        elif DATASET_TYPE(dataset) == 'VQA':
            if listinstr(['LLaVABench', 'MMLongBench_DOC'], dataset):
                system_prompt = ''
                prompt = question
            elif listinstr(['MMVet'], dataset):
                system_prompt = self.detail_system_prompt
                prompt = question
            elif listinstr(['ChartQA'], dataset):
                system_prompt = 'Please answer the question using a single word. '
                prompt = question
            else:
                system_prompt = self.vqa_prompt
                prompt = question

        msgs = []
        if system_prompt:
            msgs.append(dict(type='text', value=system_prompt))
        if isinstance(tgt_path, list):
            msgs.extend([dict(type='image', value=p) for p in tgt_path])
        else:
            msgs = [dict(type='image', value=tgt_path)]
        msgs.append(dict(type='text', value=prompt))

        return msgs