[ | |
{ | |
"text_encoder_name": "text_encoder_0", | |
"condition_adapter_name": "condition_adapter_0", | |
"condition_type": "clip-vit-large-patch14_text", | |
"pretrained_model_name_or_path": "openai/clip-vit-large-patch14", | |
"condition_max_length": 77, | |
"condition_dim": 768, | |
"cross_attention_dim": 768 | |
} | |
] |