taroshi commited on
Commit
6da846d
·
verified ·
1 Parent(s): f32d987

Upload modeling_internvl_chat.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_internvl_chat.py +348 -0
modeling_internvl_chat.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import torch.utils.checkpoint
11
+ import transformers
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss
14
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
15
+ Qwen2ForCausalLM)
16
+ from transformers.modeling_outputs import CausalLMOutputWithPast
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import ModelOutput, logging
19
+
20
+ from .configuration_internvl_chat import InternVLChatConfig
21
+ from .conversation import get_conv_template
22
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def version_cmp(v1, v2, op='eq'):
28
+ import operator
29
+
30
+ from packaging import version
31
+ op_func = getattr(operator, op)
32
+ return op_func(version.parse(v1), version.parse(v2))
33
+
34
+
35
+ class InternVLChatModel(PreTrainedModel):
36
+ config_class = InternVLChatConfig
37
+ main_input_name = 'pixel_values'
38
+ base_model_prefix = 'language_model'
39
+ _supports_flash_attn_2 = True
40
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
41
+
42
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
43
+ super().__init__(config)
44
+
45
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
46
+ image_size = config.force_image_size or config.vision_config.image_size
47
+ patch_size = config.vision_config.patch_size
48
+ self.patch_size = patch_size
49
+ self.select_layer = config.select_layer
50
+ self.template = config.template
51
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
52
+ self.downsample_ratio = config.downsample_ratio
53
+ self.ps_version = config.ps_version
54
+ use_flash_attn = use_flash_attn if has_flash_attn else False
55
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
56
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
57
+
58
+ logger.info(f'num_image_token: {self.num_image_token}')
59
+ logger.info(f'ps_version: {self.ps_version}')
60
+ if vision_model is not None:
61
+ self.vision_model = vision_model
62
+ else:
63
+ self.vision_model = InternVisionModel(config.vision_config)
64
+ if language_model is not None:
65
+ self.language_model = language_model
66
+ else:
67
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
68
+ self.language_model = LlamaForCausalLM(config.llm_config)
69
+ elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
70
+ self.language_model = Qwen2ForCausalLM(config.llm_config)
71
+ else:
72
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
73
+
74
+ vit_hidden_size = config.vision_config.hidden_size
75
+ llm_hidden_size = config.llm_config.hidden_size
76
+
77
+ self.mlp1 = nn.Sequential(
78
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
79
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
80
+ nn.GELU(),
81
+ nn.Linear(llm_hidden_size, llm_hidden_size)
82
+ )
83
+
84
+ self.img_context_token_id = None
85
+ self.conv_template = get_conv_template(self.template)
86
+ self.system_message = self.conv_template.system_message
87
+
88
+ def forward(
89
+ self,
90
+ pixel_values: torch.FloatTensor,
91
+ input_ids: torch.LongTensor = None,
92
+ attention_mask: Optional[torch.Tensor] = None,
93
+ position_ids: Optional[torch.LongTensor] = None,
94
+ image_flags: Optional[torch.LongTensor] = None,
95
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
96
+ labels: Optional[torch.LongTensor] = None,
97
+ use_cache: Optional[bool] = None,
98
+ output_attentions: Optional[bool] = None,
99
+ output_hidden_states: Optional[bool] = None,
100
+ return_dict: Optional[bool] = None,
101
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
102
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
103
+
104
+ image_flags = image_flags.squeeze(-1)
105
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
106
+
107
+ vit_embeds = self.extract_feature(pixel_values)
108
+ vit_embeds = vit_embeds[image_flags == 1]
109
+ vit_batch_size = pixel_values.shape[0]
110
+
111
+ B, N, C = input_embeds.shape
112
+ input_embeds = input_embeds.reshape(B * N, C)
113
+
114
+ if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
115
+ print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
116
+
117
+ input_ids = input_ids.reshape(B * N)
118
+ selected = (input_ids == self.img_context_token_id)
119
+ try:
120
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
121
+ except Exception as e:
122
+ vit_embeds = vit_embeds.reshape(-1, C)
123
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
124
+ f'vit_embeds.shape={vit_embeds.shape}')
125
+ n_token = selected.sum()
126
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
127
+
128
+ input_embeds = input_embeds.reshape(B, N, C)
129
+
130
+ outputs = self.language_model(
131
+ inputs_embeds=input_embeds,
132
+ attention_mask=attention_mask,
133
+ position_ids=position_ids,
134
+ past_key_values=past_key_values,
135
+ use_cache=use_cache,
136
+ output_attentions=output_attentions,
137
+ output_hidden_states=output_hidden_states,
138
+ return_dict=return_dict,
139
+ )
140
+ logits = outputs.logits
141
+
142
+ loss = None
143
+ if labels is not None:
144
+ # Shift so that tokens < n predict n
145
+ shift_logits = logits[..., :-1, :].contiguous()
146
+ shift_labels = labels[..., 1:].contiguous()
147
+ # Flatten the tokens
148
+ loss_fct = CrossEntropyLoss()
149
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
150
+ shift_labels = shift_labels.view(-1)
151
+ # Enable model parallelism
152
+ shift_labels = shift_labels.to(shift_logits.device)
153
+ loss = loss_fct(shift_logits, shift_labels)
154
+
155
+ if not return_dict:
156
+ output = (logits,) + outputs[1:]
157
+ return (loss,) + output if loss is not None else output
158
+
159
+ return CausalLMOutputWithPast(
160
+ loss=loss,
161
+ logits=logits,
162
+ past_key_values=outputs.past_key_values,
163
+ hidden_states=outputs.hidden_states,
164
+ attentions=outputs.attentions,
165
+ )
166
+
167
+ def pixel_shuffle(self, x, scale_factor=0.5):
168
+ n, w, h, c = x.size()
169
+ # N, W, H, C --> N, W, H * scale, C // scale
170
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
171
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
172
+ x = x.permute(0, 2, 1, 3).contiguous()
173
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
174
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
175
+ int(c / (scale_factor * scale_factor)))
176
+ if self.ps_version == 'v1':
177
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
178
+ 'which results in a transposed image.')
179
+ else:
180
+ x = x.permute(0, 2, 1, 3).contiguous()
181
+ return x
182
+
183
+ def extract_feature(self, pixel_values):
184
+ if self.select_layer == -1:
185
+ vit_embeds = self.vision_model(
186
+ pixel_values=pixel_values,
187
+ output_hidden_states=False,
188
+ return_dict=True).last_hidden_state
189
+ else:
190
+ vit_embeds = self.vision_model(
191
+ pixel_values=pixel_values,
192
+ output_hidden_states=True,
193
+ return_dict=True).hidden_states[self.select_layer]
194
+ vit_embeds = vit_embeds[:, 1:, :]
195
+
196
+ h = w = int(vit_embeds.shape[1] ** 0.5)
197
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
198
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
199
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
200
+ vit_embeds = self.mlp1(vit_embeds)
201
+ return vit_embeds
202
+
203
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
204
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
205
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
206
+ if history is not None or return_history:
207
+ print('Now multi-turn chat is not supported in batch_chat.')
208
+ raise NotImplementedError
209
+
210
+ if image_counts is not None:
211
+ num_patches_list = image_counts
212
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
213
+
214
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
215
+ self.img_context_token_id = img_context_token_id
216
+
217
+ if verbose and pixel_values is not None:
218
+ image_bs = pixel_values.shape[0]
219
+ print(f'dynamic ViT batch size: {image_bs}')
220
+
221
+ queries = []
222
+ for idx, num_patches in enumerate(num_patches_list):
223
+ question = questions[idx]
224
+ if pixel_values is not None and '<image>' not in question:
225
+ question = '<image>\n' + question
226
+ template = get_conv_template(self.template)
227
+ template.system_message = self.system_message
228
+ template.append_message(template.roles[0], question)
229
+ template.append_message(template.roles[1], None)
230
+ query = template.get_prompt()
231
+
232
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
233
+ query = query.replace('<image>', image_tokens, 1)
234
+ queries.append(query)
235
+
236
+ tokenizer.padding_side = 'left'
237
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
238
+ input_ids = model_inputs['input_ids'].to(self.device)
239
+ attention_mask = model_inputs['attention_mask'].to(self.device)
240
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
241
+ generation_config['eos_token_id'] = eos_token_id
242
+ generation_output = self.generate(
243
+ pixel_values=pixel_values,
244
+ input_ids=input_ids,
245
+ attention_mask=attention_mask,
246
+ **generation_config
247
+ )
248
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
249
+ responses = [response.split(template.sep.strip())[0].strip() for response in responses]
250
+ return responses
251
+
252
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
253
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
254
+ verbose=False):
255
+
256
+ if history is None and pixel_values is not None and '<image>' not in question:
257
+ question = '<image>\n' + question
258
+
259
+ if num_patches_list is None:
260
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
261
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
262
+
263
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
264
+ self.img_context_token_id = img_context_token_id
265
+
266
+ template = get_conv_template(self.template)
267
+ template.system_message = self.system_message
268
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
269
+
270
+ history = [] if history is None else history
271
+ for (old_question, old_answer) in history:
272
+ template.append_message(template.roles[0], old_question)
273
+ template.append_message(template.roles[1], old_answer)
274
+ template.append_message(template.roles[0], question)
275
+ template.append_message(template.roles[1], None)
276
+ query = template.get_prompt()
277
+
278
+ if verbose and pixel_values is not None:
279
+ image_bs = pixel_values.shape[0]
280
+ print(f'dynamic ViT batch size: {image_bs}')
281
+
282
+ for num_patches in num_patches_list:
283
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
284
+ query = query.replace('<image>', image_tokens, 1)
285
+
286
+ model_inputs = tokenizer(query, return_tensors='pt')
287
+ input_ids = model_inputs['input_ids'].to(self.device)
288
+ attention_mask = model_inputs['attention_mask'].to(self.device)
289
+ generation_config['eos_token_id'] = eos_token_id
290
+ generation_output = self.generate(
291
+ pixel_values=pixel_values,
292
+ input_ids=input_ids,
293
+ attention_mask=attention_mask,
294
+ **generation_config
295
+ )
296
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
297
+ response = response.split(template.sep.strip())[0].strip()
298
+ history.append((question, response))
299
+ if return_history:
300
+ return response, history
301
+ else:
302
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
303
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
304
+ if verbose:
305
+ print(query_to_print, response)
306
+ return response
307
+
308
+ @torch.no_grad()
309
+ def generate(
310
+ self,
311
+ pixel_values: Optional[torch.FloatTensor] = None,
312
+ input_ids: Optional[torch.FloatTensor] = None,
313
+ attention_mask: Optional[torch.LongTensor] = None,
314
+ visual_features: Optional[torch.FloatTensor] = None,
315
+ generation_config: Optional[GenerationConfig] = None,
316
+ output_hidden_states: Optional[bool] = None,
317
+ **generate_kwargs,
318
+ ) -> torch.LongTensor:
319
+
320
+ assert self.img_context_token_id is not None
321
+ if pixel_values is not None:
322
+ if visual_features is not None:
323
+ vit_embeds = visual_features
324
+ else:
325
+ vit_embeds = self.extract_feature(pixel_values)
326
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
327
+ B, N, C = input_embeds.shape
328
+ input_embeds = input_embeds.reshape(B * N, C)
329
+
330
+ input_ids = input_ids.reshape(B * N)
331
+ selected = (input_ids == self.img_context_token_id)
332
+ assert selected.sum() != 0
333
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
334
+
335
+ input_embeds = input_embeds.reshape(B, N, C)
336
+ else:
337
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
338
+
339
+ outputs = self.language_model.generate(
340
+ inputs_embeds=input_embeds,
341
+ attention_mask=attention_mask,
342
+ generation_config=generation_config,
343
+ output_hidden_states=output_hidden_states,
344
+ use_cache=True,
345
+ **generate_kwargs,
346
+ )
347
+
348
+ return outputs