File size: 1,662 Bytes
d039aea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
{
"type": "act",
"n_obs_steps": 1,
"normalization_mapping": {
"VISUAL": "MEAN_STD",
"STATE": "MEAN_STD",
"ACTION": "MEAN_STD"
},
"input_features": {
"observation.images.cam_high": {
"type": "VISUAL",
"shape": [
3,
480,
848
]
},
"observation.images.cam_left_wrist": {
"type": "VISUAL",
"shape": [
3,
480,
848
]
},
"observation.images.cam_right_wrist": {
"type": "VISUAL",
"shape": [
3,
480,
848
]
},
"observation.state": {
"type": "STATE",
"shape": [
14
]
}
},
"output_features": {
"action": {
"type": "ACTION",
"shape": [
14
]
}
},
"chunk_size": 100,
"n_action_steps": 100,
"vision_backbone": "resnet18",
"pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
"replace_final_stride_with_dilation": 0,
"pre_norm": false,
"dim_model": 512,
"n_heads": 8,
"dim_feedforward": 3200,
"feedforward_activation": "relu",
"n_encoder_layers": 4,
"n_decoder_layers": 1,
"use_vae": true,
"latent_dim": 32,
"n_vae_encoder_layers": 4,
"temporal_ensemble_coeff": null,
"dropout": 0.1,
"kl_weight": 10.0,
"optimizer_lr": 1e-05,
"optimizer_weight_decay": 0.0001,
"optimizer_lr_backbone": 1e-05
} |