Carl Qi commited on
Commit
5b490d3
·
1 Parent(s): 15eb40b

add kitchen related models

Browse files
kitchen/dlp_kitchen_dataset_40kp_64kpp_4zdim.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08b9eedcedce0f2f647178d41e61578afc391180d018dbbbfb665344f413351f
3
+ size 772268935
latent_rep_chkpts/dlp_kitchen/hparams.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ds": "franka_kitchen",
3
+ "batch_size": 64,
4
+ "lr": 0.0002,
5
+ "kp_activation": "tanh",
6
+ "pad_mode": "replicate",
7
+ "num_epochs": 250,
8
+ "n_kp": 1,
9
+ "recon_loss_type": "mse",
10
+ "sigma": 1.0,
11
+ "beta_kl": 0.1,
12
+ "beta_rec": 1.0,
13
+ "patch_size": 16,
14
+ "topk": 10,
15
+ "n_kp_enc": 40,
16
+ "eval_epoch_freq": 1,
17
+ "learned_feature_dim": 4,
18
+ "bg_learned_feature_dim": 1,
19
+ "n_kp_prior": 64,
20
+ "weight_decay": 0.0,
21
+ "kp_range": [
22
+ -1,
23
+ 1
24
+ ],
25
+ "run_prefix": "_40kp_64kpp_4zdim",
26
+ "warmup_epoch": 1,
27
+ "iou_thresh": 0.15,
28
+ "anchor_s": 0.25,
29
+ "kl_balance": 0.001,
30
+ "milestones": [
31
+ 20,
32
+ 40,
33
+ 80
34
+ ],
35
+ "image_size": 128,
36
+ "cdim": 3,
37
+ "enc_channels": [
38
+ 32,
39
+ 64,
40
+ 128
41
+ ],
42
+ "prior_channels": [
43
+ 16,
44
+ 32,
45
+ 64
46
+ ],
47
+ "scale_std": 0.3,
48
+ "offset_std": 0.2,
49
+ "obj_on_alpha": 0.1,
50
+ "obj_on_beta": 0.1,
51
+ "use_correlation_heatmaps": false
52
+ }
latent_rep_chkpts/dlp_kitchen/saves/dlp_kitchen.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aa98e16ee9794e948e8a373afad0e738b059e52057472e2bf32c7a31e4b308f
3
+ size 55887527
latent_rep_chkpts/dlp_kitchen/saves/franka_kitchen_dlp_40kp_64kpp_4zdim.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd8dfde0a1a0e8f12f5dc20dd7ef849260543503e2a858a0ee9ed8a297200739
3
+ size 55886353
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/completion_idx_800.json ADDED
Binary file (983 Bytes). View file
 
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/dataset_config.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04b75b06377d4239aa177e0f4ab0b906a43f526919bd699f1ea760ff778ac5e8
3
+ size 498
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/diff.txt ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diff --git a/analylize_trajectory.ipynb b/analylize_trajectory.ipynb
2
+ index ee01840..6ac8c75 100644
3
+ --- a/analylize_trajectory.ipynb
4
+ +++ b/analylize_trajectory.ipynb
5
+ @@ -2,7 +2,7 @@
6
+ "cells": [
7
+ {
8
+ "cell_type": "code",
9
+ - "execution_count": 2,
10
+ + "execution_count": 1,
11
+ "metadata": {},
12
+ "outputs": [],
13
+ "source": [
14
+ @@ -180,19 +180,20 @@
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ - "execution_count": 5,
19
+ + "execution_count": 7,
20
+ "metadata": {},
21
+ "outputs": [
22
+ {
23
+ "name": "stdout",
24
+ "output_type": "stream",
25
+ "text": [
26
+ - "t: 0 target_object: 51 max_obj_idxes: [20, 51, 100, 154, 202]\n",
27
+ - "t: 5 target_object: 7 max_obj_idxes: [7, 54, 106, 150, 212]\n",
28
+ - "t: 10 target_object: 1 max_obj_idxes: [1, 63, 111, 157, 215]\n",
29
+ - "t: 15 target_object: 1 max_obj_idxes: [1, 71, 111, 169, 203]\n",
30
+ - "t: 20 target_object: 10 max_obj_idxes: [10, 56, 122, 168, 212]\n",
31
+ - "t: 25 target_object: 4 max_obj_idxes: [4, 71, 115, 170, 220]\n"
32
+ + "t: 0 target_object: 51 max_obj_idxes: [20, 13, 16, 10, 11, 51, 62, 70, 63, 57, 100, 109, 112, 119, 117, 154, 171, 161, 149, 151, 202, 212, 201, 200, 207] hit_percentage: 0.32\n",
33
+ + "t: 5 target_object: 7 max_obj_idxes: [7, 17, 8, 4, 23, 54, 65, 63, 62, 69, 106, 110, 112, 99, 113, 150, 161, 158, 163, 166, 204, 207, 210, 201, 211] hit_percentage: 0.28\n",
34
+ + "t: 10 target_object: 1 max_obj_idxes: [1, 23, 6, 4, 8, 63, 57, 54, 70, 66, 111, 101, 104, 120, 100, 157, 165, 148, 170, 166, 215, 212, 201, 207, 204] hit_percentage: 0.28\n",
35
+ + "t: 15 target_object: 1 max_obj_idxes: [1, 8, 21, 3, 16, 71, 54, 51, 65, 69, 111, 107, 103, 106, 100, 169, 163, 151, 159, 162, 203, 218, 205, 215, 207] hit_percentage: 0.6\n",
36
+ + "t: 20 target_object: 10 max_obj_idxes: [10, 16, 14, 23, 15, 56, 57, 50, 55, 64, 122, 106, 100, 103, 116, 168, 159, 148, 155, 153, 212, 202, 200, 198, 201] hit_percentage: 0.36\n",
37
+ + "t: 25 target_object: 4 max_obj_idxes: [4, 5, 10, 6, 1, 71, 58, 65, 51, 73, 115, 111, 113, 107, 112, 170, 160, 148, 155, 167, 218, 220, 210, 211, 203] hit_percentage: 0.24\n",
38
+ + "overall_hit_percentage: 0.3466666666666667\n"
39
+ ]
40
+ }
41
+ ],
42
+ @@ -213,21 +214,29 @@
43
+ " new_idx = i * 24 + j\n",
44
+ " return new_idx\n",
45
+ "\n",
46
+ + "overall_hit_percentage = []\n",
47
+ "for t in [0, 5, 10, 15, 20, 25]:\n",
48
+ " with open(f'logs/panda_push/plans/3C_plan_3C_pintlarge_dlp_analysis_H5_T100/step_latest/particles_{t}.pkl', 'rb') as f:\n",
49
+ " particles = pickle.load(f)\n",
50
+ " particles = particles[0, :, :240]\n",
51
+ " particles = particles.reshape(-1, 10)[:, 2:]\n",
52
+ - " particle_similarity = cdist(particles, particles, metric='cosine')\n",
53
+ + " particle_similarity = cdist(particles, particles, metric='euclidean')\n",
54
+ " particle_similarity = 1 - particle_similarity\n",
55
+ "\n",
56
+ "\n",
57
+ " target_obj = objects_of_interest_dict[t][0]\n",
58
+ " target_particle = convert_obj_idx_to_particle_idx(target_obj)\n",
59
+ - " max_particle_idxes = np.argmax(particle_similarity[target_particle].reshape(5, -1), axis=1)\n",
60
+ - " max_particle_idxes = [i*24 + j for i, j in enumerate(max_particle_idxes)]\n",
61
+ + " # max_particle_idxes = np.argmax(particle_similarity[target_particle].reshape(5, -1), axis=1)\n",
62
+ + " top_k_particle_indices = np.argsort(particle_similarity[target_particle].reshape(5, -1), axis=-1)[:, ::-1][:, :5]\n",
63
+ + " max_particle_idxes = []\n",
64
+ + " for i in range(5):\n",
65
+ + " for idx in top_k_particle_indices[i]:\n",
66
+ + " max_particle_idxes.append(i*24 + idx)\n",
67
+ + " # max_particle_idxes = [i*24 + j for i, j in enumerate(max_particle_idxes)]\n",
68
+ " max_obj_idxes = [convert_particle_idx_to_obj_idx(idx) for idx in max_particle_idxes]\n",
69
+ - " print(\"t:\", t, \"target_object:\", target_obj, \"max_obj_idxes:\", max_obj_idxes)\n",
70
+ + " hit_percentage = np.mean([1 if obj_idx in objects_of_interest_dict[t] else 0 for obj_idx in max_obj_idxes])\n",
71
+ + " print(\"t:\", t, \"target_object:\", target_obj, \"max_obj_idxes:\", max_obj_idxes, \"hit_percentage:\", hit_percentage)\n",
72
+ + " overall_hit_percentage.append(hit_percentage)\n",
73
+ " n_particles = 120\n",
74
+ "\n",
75
+ " # fig, ax = plt.subplots()\n",
76
+ @@ -238,33 +247,16 @@
77
+ " # ax.imshow(particle_similarity)\n",
78
+ " # ax.set_title(f\"Particle Similarity\", fontsize=12)\n",
79
+ " # ax.set_xticks(range(n_particles), [f\"{i}\" for i in range(n_particles)])\n",
80
+ - " # ax.set_yticks(range(n_particles), [f\"{i}\" for i in range(n_particles)])"
81
+ + " # ax.set_yticks(range(n_particles), [f\"{i}\" for i in range(n_particles)])\n",
82
+ + "print(\"overall_hit_percentage:\", np.mean(overall_hit_percentage))"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ - "execution_count": 38,
88
+ + "execution_count": null,
89
+ "metadata": {},
90
+ - "outputs": [
91
+ - {
92
+ - "data": {
93
+ - "text/plain": [
94
+ - "(5, 24)"
95
+ - ]
96
+ - },
97
+ - "execution_count": 38,
98
+ - "metadata": {},
99
+ - "output_type": "execute_result"
100
+ - }
101
+ - ],
102
+ - "source": [
103
+ - "t: 0 target_object: 51 max_obj_idxes: [20, 51, 100, 154, 202]\n",
104
+ - "t: 5 target_object: 7 max_obj_idxes: [7, 54, 106, 150, 204]\n",
105
+ - "t: 10 target_object: 1 max_obj_idxes: [1, 63, 111, 157, 215]\n",
106
+ - "t: 15 target_object: 1 max_obj_idxes: [1, 71, 111, 169, 203]\n",
107
+ - "t: 20 target_object: 10 max_obj_idxes: [10, 56, 122, 168, 212]\n",
108
+ - "t: 25 target_object: 4 max_obj_idxes: [4, 71, 115, 170, 218]"
109
+ - ]
110
+ + "outputs": [],
111
+ + "source": []
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ diff --git a/config/TrainDLPConfig.yaml b/config/TrainDLPConfig.yaml
116
+ index 6cfcd34..528c308 100644
117
+ --- a/config/TrainDLPConfig.yaml
118
+ +++ b/config/TrainDLPConfig.yaml
119
+ @@ -23,14 +23,14 @@ beta_kl: 0.1 # original
120
+ beta_rec: 1.0
121
+ scale_std: 0.3 # default
122
+ offset_std: 0.2 # default
123
+ -n_kp_enc: 20 # total kp to output from the encoder / filter from prior
124
+ -n_kp_prior: 32
125
+ -patch_size: 16 # prior patch size need to be lower than posterior patch size: posterior is (image size * anchor_s)
126
+ +n_kp_enc: 50 # total kp to output from the encoder / filter from prior
127
+ +n_kp_prior: 128
128
+ +patch_size: 8 # prior patch size need to be lower than posterior patch size: posterior is (image size * anchor_s)
129
+ learned_feature_dim: 4 # latent visual features for each kp (excluding bg)
130
+ bg_learned_feature_dim: 1
131
+ topk: 10 # display top-10 kp with the smallest variance
132
+ recon_loss_type: "mse"
133
+ -anchor_s: 0.25 # reduce this to 0.125 for small glimpses
134
+ +anchor_s: 0.125 # reduce this to 0.125 for small glimpses
135
+ kl_balance: 0.001
136
+
137
+
138
+ diff --git a/diffuser/config/pandapush_pint.py b/diffuser/config/pandapush_pint.py
139
+ index e18ff84..e96a7ee 100644
140
+ --- a/diffuser/config/pandapush_pint.py
141
+ +++ b/diffuser/config/pandapush_pint.py
142
+ @@ -32,8 +32,8 @@ mode_to_args = {
143
+ 'device': 'cuda:1',
144
+ 'droupout': 0.0,
145
+ 'renderer': 'utils.ParticleRenderer',
146
+ - 'eval_freq': 200,
147
+ - 'n_train_steps': 1e6,
148
+ + 'eval_freq': 800,
149
+ + 'n_train_steps': 9e5,
150
+ },
151
+ '1C_dlp_pusht': {'env_config_dir': 'config/push_t_old',
152
+ 'features_dim': 10,
153
+ @@ -62,7 +62,7 @@ mode_to_args = {
154
+ '3C_dlp_pusht': {'env_config_dir': 'config/push_t',
155
+ 'features_dim': 12,
156
+ 'multiview': True,
157
+ - 'n_diffusion_steps': 5,
158
+ + 'n_diffusion_steps': 50,
159
+ 'model': 'models.AdaLNPINTDenoiser',
160
+ 'particle_normalizer': 'ParticleLimitsNormalizer',
161
+ 'horizon': 5,
162
+ @@ -70,7 +70,12 @@ mode_to_args = {
163
+ 'device': 'cuda:1',
164
+ 'droupout': 0.0,
165
+ 'renderer': 'utils.ParticleRenderer',
166
+ - 'vis_freq': 999,
167
+ + 'vis_freq': 20,
168
+ + 'hidden_dim': 512, # 512
169
+ + 'projection_dim': 512, # 512
170
+ + 'n_heads': 8, # 4, 8
171
+ + 'n_layers': 12, # 4, 6, # 12
172
+ + 'n_saves': 5,
173
+ },
174
+ '1C_state': {'env_config_dir': 'config/n_cubes_state',
175
+ 'features_dim': 4,
176
+ diff --git a/diffuser/config/plan_config/plan_pandapush_pint.py b/diffuser/config/plan_config/plan_pandapush_pint.py
177
+ index d742936..937c936 100644
178
+ --- a/diffuser/config/plan_config/plan_pandapush_pint.py
179
+ +++ b/diffuser/config/plan_config/plan_pandapush_pint.py
180
+ @@ -51,7 +51,8 @@ mode_to_args = {
181
+ 'n_diffusion_steps': 5,
182
+ 'horizon': 5,
183
+ 'device': 'cuda:1',
184
+ - 'diffusion_loadpath': 'diffusion/PushT_3C_dlp_adalnpint_new_H5_T5',
185
+ + # 'diffusion_loadpath': 'diffusion/PushT_3C_dlp_adalnpint_new_H5_T5',
186
+ + 'diffusion_loadpath': 'diffusion/PushT_3C_dlp_pintlarge_H5_T5',
187
+ # 'diffusion_loadpath': 'diffusion/PushT_1C_dlp_eit_H1_T5',
188
+ # 'vis_freq': 999,
189
+ # 'policy': 'sampling.GoalConditionedBCPolicy',
190
+ diff --git a/diffuser/scripts/train_kitchen.py b/diffuser/scripts/train_kitchen.py
191
+ index bb2f5de..3af843c 100644
192
+ --- a/diffuser/scripts/train_kitchen.py
193
+ +++ b/diffuser/scripts/train_kitchen.py
194
+ @@ -250,4 +250,4 @@ for i in range(n_epochs):
195
+
196
+ ## TODO: add evaluation code here:
197
+ if i % args.eval_freq == 0:
198
+ - evaluate_kitchen(policy, env, latent_rep_model, goal_fn, plan_args, video, i, plan_args.savepath, num_evals=40)
199
+ + evaluate_kitchen(policy, env, latent_rep_model, goal_fn, plan_args, video, i, plan_args.savepath, num_evals=100)
200
+ diff --git a/run_batch_plan.sh b/run_batch_plan.sh
201
+ index 47315f9..ba59d13 100644
202
+ --- a/run_batch_plan.sh
203
+ +++ b/run_batch_plan.sh
204
+ @@ -1,10 +1,13 @@
205
+ #!/bin/bash
206
+
207
+ -seeds=(42 188 288 388 488)
208
+ -gpu_numbers=(1 2 3 1 2)
209
+ +# seeds=(42 188 288 388 488)
210
+ +# gpu_numbers=(3 2 3 1 2)
211
+
212
+ -for i in "${!seeds[@]}"; do
213
+ - seed=${seeds[$i]}
214
+ - gpu_number=${gpu_numbers[$i]}
215
+ - CUDA_VISIBLE_DEVICES=0,$gpu_number python diffuser/scripts/plan_gc_pandapush.py --planning_only --config config.plan_config.plan_pandapush_pint --seed $seed --num_entity 1 --push_t --exp_note pint_action &
216
+ -done
217
+
218
+ +# for i in "${!seeds[@]}"; do
219
+ +# seed=${seeds[$i]}
220
+ +# gpu_number=${gpu_numbers[$i]}
221
+ +# CUDA_VISIBLE_DEVICES=0,$gpu_number python diffuser/scripts/plan_gc_pandapush.py --vis_traj_wandb --planning_only --config config.plan_config.plan_pandapush_pint --seed $seed --num_entity 6 --push_t --exp_note adalnpint &
222
+ +# done
223
+ +
224
+ +CUDA_VISIBLE_DEVICES=0,1 python diffuser/scripts/plan_gc_pandapush.py --vis_traj_wandb --planning_only --config config.plan_config.plan_pandapush_pint --seed 188 --num_entity 4 --push_t --exp_note pintlarge_3color &
225
+ +# CUDA_VISIBLE_DEVICES=0,3 python diffuser/scripts/plan_gc_pandapush.py --vis_traj_wandb --planning_only --config config.plan_config.plan_pandapush_pint --seed 188 --num_entity 4 --push_t --exp_note pintlarge_2color
226
+
227
+ diff --git a/run_batch_train.sh b/run_batch_train.sh
228
+ index 4af8c5a..f51b78b 100644
229
+ --- a/run_batch_train.sh
230
+ +++ b/run_batch_train.sh
231
+ @@ -1,10 +1,10 @@
232
+ #!/bin/bash
233
+
234
+ -seeds=(42 188 288 388 488)
235
+ -gpu_numbers=(1 2 3 1 2)
236
+ +seeds=(188 288 388 488)
237
+ +gpu_numbers=(1 2 3 3)
238
+
239
+ for i in "${!seeds[@]}"; do
240
+ seed=${seeds[$i]}
241
+ gpu_number=${gpu_numbers[$i]}
242
+ - CUDA_VISIBLE_DEVICES=0,$gpu_number python diffuser/scripts/train.py --seed $seed --num_entity 3 --input_type vqvae --exp_note adalnpint &
243
+ + CUDA_VISIBLE_DEVICES=0,$gpu_number python diffuser/scripts/train_kitchen.py --seed $seed --num_entity 1 --kitchen --exp_note 40kp_0.25anchor_s &
244
+ done
245
+
246
+ diff --git a/vq_bet_official/examples/configs/train_pandapush.yaml b/vq_bet_official/examples/configs/train_pandapush.yaml
247
+ index 45b6976..ac8559a 100644
248
+ --- a/vq_bet_official/examples/configs/train_pandapush.yaml
249
+ +++ b/vq_bet_official/examples/configs/train_pandapush.yaml
250
+ @@ -16,8 +16,9 @@ sequentially_select: false
251
+ # vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-25/21-18-19/silvery-wind-20/trained_vqvae.pt" # 2C
252
+ # vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-25/21-35-15/apricot-haze-25/trained_vqvae.pt" # 3C
253
+ # vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-26/00-37-23/floral-frost-33/trained_vqvae.pt" # 1T
254
+ -vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-26/15-23-53/classic-capybara-39/trained_vqvae.pt"
255
+ -num_entity: 2
256
+ +# vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-26/15-23-53/classic-capybara-39/trained_vqvae.pt"
257
+ +vqvae_load_dir: "vq_bet_official/checkpoints/pandapush/2024-09-26/22-36-48/woven-music-45/trained_vqvae.pt"
258
+ +num_entity: 3
259
+ env_config_dir: config/vqvae_push_t
260
+ # env_config_dir: config/n_cubes_raw
261
+ push_t: true
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/diffusion_config.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a093c84c6e03ffb58464bffdaf3c2a41da282703b7c8fd20bb16d36599decbf
3
+ size 341
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/eval_800_0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30a460bc9515a5911ab5b577ae473e6343843af02757e9ab638207c3643b6fda
3
+ size 2938530
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/model_config.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1485a1fc1ce71da742f9629ae7e82142e1eeac50568d7787622b31ca7c3a7f2f
3
+ size 329
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/render_config.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:534ea1b7376c27b5bc94d8185cafddfb86361d6151d6a266ae9aaac98a5ab7f3
3
+ size 157
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/state_810000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1989c9f09d77db7583ad8dc61dc1ed45388fb99a5153ce52a30ee7aa344d19c0
3
+ size 64892725
pretrained_models/kitchen/diffusion/kitchen_1C_dlp_40kp_0.25anchor_s_H5_T5/trainer_config.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2497149f77e0498c9f1b096ddc5ceebb4b0cf7619a05e89358d0d21463767aba
3
+ size 401