TDXL / vae_decoder /config.json
jburtoft's picture
799c1441f85edf37c5ba89faa5c4b7d35ccb03c56c0f69e2daad7747f159be38
77b4a20 verified
raw
history blame
1.45 kB
{
"_class_name": "AutoencoderKL",
"_commit_hash": null,
"_diffusers_version": "0.23.0",
"_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--ThinkDiffusion--ThinkDiffusionXL/snapshots/e31c4693b7f87128fee02fa6b16c4dc2ada13721/vae",
"act_fn": "silu",
"block_out_channels": [
128,
256,
512,
512
],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D"
],
"force_upcast": true,
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 2,
"neuron": {
"auto_cast": "all",
"auto_cast_type": "bf16",
"compiler_type": "neuronx-cc",
"compiler_version": "2.12.54.0+f631c2365",
"disable_fallback": false,
"disable_fast_relayout": false,
"dynamic_batch_size": false,
"input_names": [
"latent_sample"
],
"model_type": "vae-decoder",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"sample"
],
"static_batch_size": 1,
"static_height": 128,
"static_num_beams": 1,
"static_num_channels": 4,
"static_width": 128
},
"norm_num_groups": 32,
"out_channels": 3,
"sample_size": 1024,
"scaling_factor": 0.13025,
"task": "semantic-segmentation",
"transformers_version": null,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D"
]
}