diogormalmeida commited on
Commit
d039aea
·
verified ·
1 Parent(s): a9c8ac8

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +70 -0
  2. model.safetensors +3 -0
  3. train_config.json +175 -0
config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.images.cam_high": {
11
+ "type": "VISUAL",
12
+ "shape": [
13
+ 3,
14
+ 480,
15
+ 848
16
+ ]
17
+ },
18
+ "observation.images.cam_left_wrist": {
19
+ "type": "VISUAL",
20
+ "shape": [
21
+ 3,
22
+ 480,
23
+ 848
24
+ ]
25
+ },
26
+ "observation.images.cam_right_wrist": {
27
+ "type": "VISUAL",
28
+ "shape": [
29
+ 3,
30
+ 480,
31
+ 848
32
+ ]
33
+ },
34
+ "observation.state": {
35
+ "type": "STATE",
36
+ "shape": [
37
+ 14
38
+ ]
39
+ }
40
+ },
41
+ "output_features": {
42
+ "action": {
43
+ "type": "ACTION",
44
+ "shape": [
45
+ 14
46
+ ]
47
+ }
48
+ },
49
+ "chunk_size": 100,
50
+ "n_action_steps": 100,
51
+ "vision_backbone": "resnet18",
52
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
53
+ "replace_final_stride_with_dilation": 0,
54
+ "pre_norm": false,
55
+ "dim_model": 512,
56
+ "n_heads": 8,
57
+ "dim_feedforward": 3200,
58
+ "feedforward_activation": "relu",
59
+ "n_encoder_layers": 4,
60
+ "n_decoder_layers": 1,
61
+ "use_vae": true,
62
+ "latent_dim": 32,
63
+ "n_vae_encoder_layers": 4,
64
+ "temporal_ensemble_coeff": null,
65
+ "dropout": 0.1,
66
+ "kl_weight": 10.0,
67
+ "optimizer_lr": 1e-05,
68
+ "optimizer_weight_decay": 0.0001,
69
+ "optimizer_lr_backbone": 1e-05
70
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4a61b4661a13ebf2050f4c8757e23c92d7f89ee4dd34ccec204369759da0242
3
+ size 206767160
train_config.json ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "HumanoidTeam/many_maltesers_task",
4
+ "episodes": null,
5
+ "image_transforms": {
6
+ "enable": false,
7
+ "max_num_transforms": 3,
8
+ "random_order": false,
9
+ "tfs": {
10
+ "brightness": {
11
+ "weight": 1.0,
12
+ "type": "ColorJitter",
13
+ "kwargs": {
14
+ "brightness": [
15
+ 0.8,
16
+ 1.2
17
+ ]
18
+ }
19
+ },
20
+ "contrast": {
21
+ "weight": 1.0,
22
+ "type": "ColorJitter",
23
+ "kwargs": {
24
+ "contrast": [
25
+ 0.8,
26
+ 1.2
27
+ ]
28
+ }
29
+ },
30
+ "saturation": {
31
+ "weight": 1.0,
32
+ "type": "ColorJitter",
33
+ "kwargs": {
34
+ "saturation": [
35
+ 0.5,
36
+ 1.5
37
+ ]
38
+ }
39
+ },
40
+ "hue": {
41
+ "weight": 1.0,
42
+ "type": "ColorJitter",
43
+ "kwargs": {
44
+ "hue": [
45
+ -0.05,
46
+ 0.05
47
+ ]
48
+ }
49
+ },
50
+ "sharpness": {
51
+ "weight": 1.0,
52
+ "type": "SharpnessJitter",
53
+ "kwargs": {
54
+ "sharpness": [
55
+ 0.5,
56
+ 1.5
57
+ ]
58
+ }
59
+ }
60
+ }
61
+ },
62
+ "local_files_only": false,
63
+ "use_imagenet_stats": true,
64
+ "video_backend": "pyav"
65
+ },
66
+ "env": null,
67
+ "policy": {
68
+ "type": "act",
69
+ "n_obs_steps": 1,
70
+ "normalization_mapping": {
71
+ "VISUAL": "MEAN_STD",
72
+ "STATE": "MEAN_STD",
73
+ "ACTION": "MEAN_STD"
74
+ },
75
+ "input_features": {
76
+ "observation.images.cam_high": {
77
+ "type": "VISUAL",
78
+ "shape": [
79
+ 3,
80
+ 480,
81
+ 848
82
+ ]
83
+ },
84
+ "observation.images.cam_left_wrist": {
85
+ "type": "VISUAL",
86
+ "shape": [
87
+ 3,
88
+ 480,
89
+ 848
90
+ ]
91
+ },
92
+ "observation.images.cam_right_wrist": {
93
+ "type": "VISUAL",
94
+ "shape": [
95
+ 3,
96
+ 480,
97
+ 848
98
+ ]
99
+ },
100
+ "observation.state": {
101
+ "type": "STATE",
102
+ "shape": [
103
+ 14
104
+ ]
105
+ }
106
+ },
107
+ "output_features": {
108
+ "action": {
109
+ "type": "ACTION",
110
+ "shape": [
111
+ 14
112
+ ]
113
+ }
114
+ },
115
+ "chunk_size": 100,
116
+ "n_action_steps": 100,
117
+ "vision_backbone": "resnet18",
118
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
119
+ "replace_final_stride_with_dilation": 0,
120
+ "pre_norm": false,
121
+ "dim_model": 512,
122
+ "n_heads": 8,
123
+ "dim_feedforward": 3200,
124
+ "feedforward_activation": "relu",
125
+ "n_encoder_layers": 4,
126
+ "n_decoder_layers": 1,
127
+ "use_vae": true,
128
+ "latent_dim": 32,
129
+ "n_vae_encoder_layers": 4,
130
+ "temporal_ensemble_coeff": null,
131
+ "dropout": 0.1,
132
+ "kl_weight": 10.0,
133
+ "optimizer_lr": 1e-05,
134
+ "optimizer_weight_decay": 0.0001,
135
+ "optimizer_lr_backbone": 1e-05
136
+ },
137
+ "output_dir": "outputs/train/2025-02-25/16-32-35_act",
138
+ "job_name": "act",
139
+ "resume": false,
140
+ "device": "cuda",
141
+ "use_amp": false,
142
+ "seed": 1000,
143
+ "num_workers": 4,
144
+ "batch_size": 16,
145
+ "steps": 1000000,
146
+ "eval_freq": 20000,
147
+ "log_freq": 200,
148
+ "save_checkpoint": true,
149
+ "save_freq": 5000,
150
+ "use_policy_training_preset": true,
151
+ "optimizer": {
152
+ "type": "adamw",
153
+ "lr": 1e-05,
154
+ "weight_decay": 0.0001,
155
+ "grad_clip_norm": 10.0,
156
+ "betas": [
157
+ 0.9,
158
+ 0.999
159
+ ],
160
+ "eps": 1e-08
161
+ },
162
+ "scheduler": null,
163
+ "eval": {
164
+ "n_episodes": 50,
165
+ "batch_size": 50,
166
+ "use_async_envs": false
167
+ },
168
+ "wandb": {
169
+ "enable": true,
170
+ "disable_artifact": false,
171
+ "project": "'tesco_demo'",
172
+ "entity": null,
173
+ "notes": null
174
+ }
175
+ }