File size: 1,508 Bytes
bac2b2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
{
  "beta_end": 0.02,
  "beta_schedule": "squaredcos_cap_v2",
  "beta_start": 0.0001,
  "clip_sample": true,
  "clip_sample_range": 1.0,
  "crop_is_random": true,
  "crop_shape": [
    140,
    215
  ],
  "diffusion_step_embed_dim": 128,
  "do_mask_loss_for_padding": false,
  "down_dims": [
    512,
    1024,
    2048
  ],
  "horizon": 8,
  "input_normalization_modes": {
    "observation.images.bottom_left": "mean_std",
    "observation.images.bottom_right": "mean_std",
    "observation.images.top_left": "mean_std",
    "observation.images.top_right": "mean_std",
    "observation.state": "mean_std"
  },
  "input_shapes": {
    "observation.images.bottom_left": [
      3,
      160,
      240
    ],
    "observation.images.bottom_right": [
      3,
      160,
      240
    ],
    "observation.images.top_left": [
      3,
      160,
      240
    ],
    "observation.images.top_right": [
      3,
      160,
      240
    ],
    "observation.state": [
      1
    ]
  },
  "kernel_size": 5,
  "n_action_steps": 8,
  "n_groups": 8,
  "n_obs_steps": 1,
  "noise_scheduler_type": "DDIM",
  "num_inference_steps": 10,
  "num_train_timesteps": 100,
  "output_normalization_modes": {
    "action": "min_max"
  },
  "output_shapes": {
    "action": [
      8
    ]
  },
  "prediction_type": "epsilon",
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
  "spatial_softmax_num_keypoints": 32,
  "use_film_scale_modulation": true,
  "use_group_norm": false,
  "vision_backbone": "resnet18"
}