|
{ |
|
"_name_or_path": "./FLMR", |
|
"architectures": [ |
|
"FLMRModelForRetrieval" |
|
], |
|
"auto_map": { |
|
"AutoConfig": "configuration_flmr.FLMRConfig", |
|
"AutoModel": "modeling_flmr.FLMRModelForRetrieval" |
|
}, |
|
"context_concat_output_from_text_encoder": true, |
|
"context_concat_output_from_vision_encoder": false, |
|
"dim": 128, |
|
"initializer_range": 0.02, |
|
"load_cpu_extension": false, |
|
"mapping_network_prefix_length": 32, |
|
"mask_instruction_token": null, |
|
"mask_punctuation": true, |
|
"model_type": "flmr", |
|
"query_concat_output_from_text_encoder": true, |
|
"query_concat_output_from_vision_encoder": true, |
|
"separate_query_and_context_text_encoder": false, |
|
"separate_query_and_context_vision_encoder": false, |
|
"text_config": { |
|
"architectures": [ |
|
"BertForMaskedLM" |
|
], |
|
"gradient_checkpointing": false, |
|
"model_type": "flmr_text_model", |
|
"use_cache": true, |
|
"vocab_size": 30531 |
|
}, |
|
"torch_dtype": "float32", |
|
"transformer_mapping_config_base": null, |
|
"transformer_mapping_cross_attention_length": 32, |
|
"transformer_mapping_num_hidden_layers": null, |
|
"transformers_version": "4.37.2", |
|
"use_transformer_mapping_network": false, |
|
"use_vision_encoder": true, |
|
"vision_config": { |
|
"dropout": 0.0, |
|
"model_type": "flmr_vision_model" |
|
}, |
|
"vision_model_version": "openai/clip-vit-base-patch32" |
|
} |
|
|