Update README.md
Browse files- README.md +6 -6
- config.json +0 -55
README.md
CHANGED
@@ -178,7 +178,7 @@ from lmdeploy.vl import load_image
|
|
178 |
|
179 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
180 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
181 |
-
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
182 |
response = pipe(('describe this image', image))
|
183 |
print(response.text)
|
184 |
```
|
@@ -195,7 +195,7 @@ from lmdeploy.vl import load_image
|
|
195 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
196 |
|
197 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
198 |
-
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
199 |
|
200 |
image_urls=[
|
201 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
@@ -217,7 +217,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig
|
|
217 |
from lmdeploy.vl import load_image
|
218 |
|
219 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
220 |
-
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
221 |
|
222 |
image_urls=[
|
223 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
@@ -237,7 +237,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, GenerationConfig
|
|
237 |
from lmdeploy.vl import load_image
|
238 |
|
239 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
240 |
-
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192))
|
241 |
|
242 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
243 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
@@ -252,7 +252,7 @@ print(sess.response.text)
|
|
252 |
LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
|
253 |
|
254 |
```shell
|
255 |
-
lmdeploy serve api_server OpenGVLab/InternVL2_5-38B-MPO-AWQ --server-port 23333
|
256 |
```
|
257 |
|
258 |
To use the OpenAI-style interface, you need to install OpenAI:
|
@@ -291,7 +291,7 @@ print(response)
|
|
291 |
|
292 |
## License
|
293 |
|
294 |
-
This project is released under the MIT License. This project uses the pre-trained Qwen2.5-
|
295 |
|
296 |
## Citation
|
297 |
|
|
|
178 |
|
179 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
180 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
181 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192, tp=2))
|
182 |
response = pipe(('describe this image', image))
|
183 |
print(response.text)
|
184 |
```
|
|
|
195 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
196 |
|
197 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
198 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192, tp=2))
|
199 |
|
200 |
image_urls=[
|
201 |
'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
|
|
|
217 |
from lmdeploy.vl import load_image
|
218 |
|
219 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
220 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192, tp=2))
|
221 |
|
222 |
image_urls=[
|
223 |
"https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
|
|
|
237 |
from lmdeploy.vl import load_image
|
238 |
|
239 |
model = 'OpenGVLab/InternVL2_5-38B-MPO-AWQ'
|
240 |
+
pipe = pipeline(model, backend_config=TurbomindEngineConfig(session_len=8192, tp=2))
|
241 |
|
242 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
|
243 |
gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)
|
|
|
252 |
LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
|
253 |
|
254 |
```shell
|
255 |
+
lmdeploy serve api_server OpenGVLab/InternVL2_5-38B-MPO-AWQ --server-port 23333 --tp 2
|
256 |
```
|
257 |
|
258 |
To use the OpenAI-style interface, you need to install OpenAI:
|
|
|
291 |
|
292 |
## License
|
293 |
|
294 |
+
This project is released under the MIT License. This project uses the pre-trained Qwen2.5-32B-Instruct as a component, which is licensed under the Apache License 2.0.
|
295 |
|
296 |
## Citation
|
297 |
|
config.json
CHANGED
@@ -110,91 +110,36 @@
|
|
110 |
"select_layer": -1,
|
111 |
"template": "internvl2_5",
|
112 |
"torch_dtype": "float16",
|
113 |
-
"transformers_version": null,
|
114 |
"use_backbone_lora": 0,
|
115 |
"use_llm_lora": 0,
|
116 |
"use_thumbnail": true,
|
117 |
"vision_config": {
|
118 |
-
"_name_or_path": "",
|
119 |
-
"add_cross_attention": false,
|
120 |
"architectures": [
|
121 |
"InternVisionModel"
|
122 |
],
|
123 |
"attention_dropout": 0.0,
|
124 |
-
"bad_words_ids": null,
|
125 |
-
"begin_suppress_tokens": null,
|
126 |
-
"bos_token_id": null,
|
127 |
-
"chunk_size_feed_forward": 0,
|
128 |
-
"cross_attention_hidden_size": null,
|
129 |
-
"decoder_start_token_id": null,
|
130 |
-
"diversity_penalty": 0.0,
|
131 |
-
"do_sample": false,
|
132 |
"drop_path_rate": 0.0,
|
133 |
"dropout": 0.0,
|
134 |
-
"early_stopping": false,
|
135 |
-
"encoder_no_repeat_ngram_size": 0,
|
136 |
-
"eos_token_id": null,
|
137 |
-
"exponential_decay_length_penalty": null,
|
138 |
-
"finetuning_task": null,
|
139 |
-
"forced_bos_token_id": null,
|
140 |
-
"forced_eos_token_id": null,
|
141 |
"hidden_act": "gelu",
|
142 |
"hidden_size": 3200,
|
143 |
-
"id2label": {
|
144 |
-
"0": "LABEL_0",
|
145 |
-
"1": "LABEL_1"
|
146 |
-
},
|
147 |
"image_size": 448,
|
148 |
"initializer_factor": 0.1,
|
149 |
"initializer_range": 1e-10,
|
150 |
"intermediate_size": 12800,
|
151 |
-
"is_decoder": false,
|
152 |
-
"is_encoder_decoder": false,
|
153 |
-
"label2id": {
|
154 |
-
"LABEL_0": 0,
|
155 |
-
"LABEL_1": 1
|
156 |
-
},
|
157 |
"layer_norm_eps": 1e-06,
|
158 |
-
"length_penalty": 1.0,
|
159 |
-
"max_length": 20,
|
160 |
-
"min_length": 0,
|
161 |
"model_type": "intern_vit_6b",
|
162 |
-
"no_repeat_ngram_size": 0,
|
163 |
"norm_type": "rms_norm",
|
164 |
"num_attention_heads": 25,
|
165 |
-
"num_beam_groups": 1,
|
166 |
-
"num_beams": 1,
|
167 |
"num_channels": 3,
|
168 |
"num_hidden_layers": 45,
|
169 |
-
"num_return_sequences": 1,
|
170 |
"output_attentions": false,
|
171 |
"output_hidden_states": false,
|
172 |
-
"output_scores": false,
|
173 |
-
"pad_token_id": null,
|
174 |
"patch_size": 14,
|
175 |
-
"prefix": null,
|
176 |
-
"problem_type": null,
|
177 |
-
"pruned_heads": {},
|
178 |
"qk_normalization": true,
|
179 |
"qkv_bias": false,
|
180 |
-
"remove_invalid_values": false,
|
181 |
-
"repetition_penalty": 1.0,
|
182 |
"return_dict": true,
|
183 |
-
"return_dict_in_generate": false,
|
184 |
-
"sep_token_id": null,
|
185 |
-
"suppress_tokens": null,
|
186 |
-
"task_specific_params": null,
|
187 |
-
"temperature": 1.0,
|
188 |
-
"tf_legacy_loss": false,
|
189 |
-
"tie_encoder_decoder": false,
|
190 |
-
"tie_word_embeddings": true,
|
191 |
-
"tokenizer_class": null,
|
192 |
-
"top_k": 50,
|
193 |
-
"top_p": 1.0,
|
194 |
"torch_dtype": "bfloat16",
|
195 |
-
"torchscript": false,
|
196 |
"transformers_version": "4.45.1",
|
197 |
-
"typical_p": 1.0,
|
198 |
"use_bfloat16": true,
|
199 |
"use_flash_attn": true
|
200 |
}
|
|
|
110 |
"select_layer": -1,
|
111 |
"template": "internvl2_5",
|
112 |
"torch_dtype": "float16",
|
|
|
113 |
"use_backbone_lora": 0,
|
114 |
"use_llm_lora": 0,
|
115 |
"use_thumbnail": true,
|
116 |
"vision_config": {
|
|
|
|
|
117 |
"architectures": [
|
118 |
"InternVisionModel"
|
119 |
],
|
120 |
"attention_dropout": 0.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
"drop_path_rate": 0.0,
|
122 |
"dropout": 0.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
"hidden_act": "gelu",
|
124 |
"hidden_size": 3200,
|
|
|
|
|
|
|
|
|
125 |
"image_size": 448,
|
126 |
"initializer_factor": 0.1,
|
127 |
"initializer_range": 1e-10,
|
128 |
"intermediate_size": 12800,
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
"layer_norm_eps": 1e-06,
|
|
|
|
|
|
|
130 |
"model_type": "intern_vit_6b",
|
|
|
131 |
"norm_type": "rms_norm",
|
132 |
"num_attention_heads": 25,
|
|
|
|
|
133 |
"num_channels": 3,
|
134 |
"num_hidden_layers": 45,
|
|
|
135 |
"output_attentions": false,
|
136 |
"output_hidden_states": false,
|
|
|
|
|
137 |
"patch_size": 14,
|
|
|
|
|
|
|
138 |
"qk_normalization": true,
|
139 |
"qkv_bias": false,
|
|
|
|
|
140 |
"return_dict": true,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
"torch_dtype": "bfloat16",
|
|
|
142 |
"transformers_version": "4.45.1",
|
|
|
143 |
"use_bfloat16": true,
|
144 |
"use_flash_attn": true
|
145 |
}
|