CCRss commited on
Commit
6b54a1a
·
verified ·
1 Parent(s): 064fdc1

Upload configuration_internvl_chat.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_internvl_chat.py +114 -0
configuration_internvl_chat.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig, Qwen2Config
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+
15
+ from transformers import WhisperConfig
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class InternVLChatConfig(PretrainedConfig):
20
+ model_type = 'internvl_chat'
21
+ is_composition = True
22
+
23
+ def __init__(
24
+ self,
25
+ vision_config=None,
26
+ audio_config=None, # added audio config
27
+ llm_config=None,
28
+ use_backbone_lora=0,
29
+ use_llm_lora=0,
30
+ select_layer=-1,
31
+ force_image_size=None,
32
+ downsample_ratio=0.5,
33
+ template=None,
34
+ dynamic_image_size=False,
35
+ use_thumbnail=False,
36
+ ps_version='v1',
37
+ min_dynamic_patch=1,
38
+ max_dynamic_patch=6,
39
+ **kwargs):
40
+ super().__init__(**kwargs)
41
+
42
+ if vision_config is None:
43
+ vision_config = {'architectures': ['InternVisionModel']}
44
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
45
+
46
+ if llm_config is None:
47
+ llm_config = {'architectures': ['Qwen2ForCausalLM']}
48
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
49
+
50
+ self.vision_config = InternVisionConfig(**vision_config)
51
+ if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
52
+ self.llm_config = LlamaConfig(**llm_config)
53
+ elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
54
+ self.llm_config = Qwen2Config(**llm_config)
55
+ else:
56
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
57
+
58
+ # Audio config initialization
59
+ if audio_config is None:
60
+ logger.info('audio_config is None. Loading default Whisper-large-v3 config.')
61
+ self.audio_config = WhisperConfig.from_pretrained("openai/whisper-large-v3").to_dict()
62
+ elif isinstance(audio_config, dict):
63
+ base_config = WhisperConfig.from_pretrained("openai/whisper-large-v3")
64
+ # Update base config with provided values
65
+ for key, value in audio_config.items():
66
+ setattr(base_config, key, value)
67
+ self.audio_config = base_config.to_dict()
68
+ elif isinstance(audio_config, PretrainedConfig):
69
+ self.audio_config = audio_config.to_dict()
70
+ else:
71
+ raise ValueError(f"Unsupported audio_config type: {type(audio_config)}")
72
+
73
+ self.use_backbone_lora = use_backbone_lora
74
+ self.use_llm_lora = use_llm_lora
75
+ self.select_layer = select_layer
76
+ self.force_image_size = force_image_size
77
+ self.downsample_ratio = downsample_ratio
78
+ self.template = template
79
+ self.dynamic_image_size = dynamic_image_size
80
+ self.use_thumbnail = use_thumbnail
81
+ self.ps_version = ps_version # pixel shuffle version
82
+ self.min_dynamic_patch = min_dynamic_patch
83
+ self.max_dynamic_patch = max_dynamic_patch
84
+
85
+ logger.info(f'vision_select_layer: {self.select_layer}')
86
+ logger.info(f'ps_version: {self.ps_version}')
87
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
88
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
89
+
90
+ def to_dict(self):
91
+ """
92
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
93
+
94
+ Returns:
95
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
96
+ """
97
+ output = copy.deepcopy(self.__dict__)
98
+ output['vision_config'] = self.vision_config.to_dict()
99
+ output['llm_config'] = self.llm_config.to_dict()
100
+ output['audio_config'] = self.audio_config # Add audio config to output
101
+ output['model_type'] = self.__class__.model_type
102
+ output['use_backbone_lora'] = self.use_backbone_lora
103
+ output['use_llm_lora'] = self.use_llm_lora
104
+ output['select_layer'] = self.select_layer
105
+ output['force_image_size'] = self.force_image_size
106
+ output['downsample_ratio'] = self.downsample_ratio
107
+ output['template'] = self.template
108
+ output['dynamic_image_size'] = self.dynamic_image_size
109
+ output['use_thumbnail'] = self.use_thumbnail
110
+ output['ps_version'] = self.ps_version
111
+ output['min_dynamic_patch'] = self.min_dynamic_patch
112
+ output['max_dynamic_patch'] = self.max_dynamic_patch
113
+
114
+ return output