{ "architectures": [ "InternVLForConditionalGeneration" ], "downsample_ratio": 0.5, "image_seq_length": 256, "image_token_index": 151667, "model_type": "internvl", "projector_hidden_act": "gelu", "text_config": { "_attn_implementation_autoset": true, "architectures": [ "Qwen2ForCausalLM" ], "bos_token_id": 151643, "eos_token_id": 151645, "hidden_size": 896, "intermediate_size": 4864, "max_window_layers": 21, "model_type": "qwen2", "num_attention_heads": 14, "num_hidden_layers": 24, "num_key_value_heads": 2, "rope_theta": 1000000.0, "sliding_window": 32768, "torch_dtype": "bfloat16", "vocab_size": 151674 }, "torch_dtype": "bfloat16", "transformers_version": "4.50.0.dev0", "vision_config": { "_attn_implementation_autoset": true, "architectures": [ "InternVisionModel" ], "attention_dropout": 0.0, "drop_path_rate": 0.0, "dropout": 0.0, "initializer_factor": 1.0, "layer_scale_init_value": 0.1, "model_type": "internvl_vision", "torch_dtype": "bfloat16" }, "vision_feature_layer": -1 }