Upload folder using huggingface_hub
Browse files- README.md +12 -0
- config.json +73 -0
- meta.json +1 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: lerobot
|
3 |
+
tags:
|
4 |
+
- diffusion-policy
|
5 |
+
- model_hub_mixin
|
6 |
+
- pytorch_model_hub_mixin
|
7 |
+
- robotics
|
8 |
+
---
|
9 |
+
|
10 |
+
This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
|
11 |
+
- Library: https://github.com/huggingface/lerobot
|
12 |
+
- Docs: [More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"beta_end": 0.02,
|
3 |
+
"beta_schedule": "squaredcos_cap_v2",
|
4 |
+
"beta_start": 0.0001,
|
5 |
+
"clip_sample": true,
|
6 |
+
"clip_sample_range": 1.0,
|
7 |
+
"crop_is_random": true,
|
8 |
+
"crop_shape": [
|
9 |
+
140,
|
10 |
+
215
|
11 |
+
],
|
12 |
+
"diffusion_step_embed_dim": 128,
|
13 |
+
"do_mask_loss_for_padding": false,
|
14 |
+
"down_dims": [
|
15 |
+
512,
|
16 |
+
1024,
|
17 |
+
2048
|
18 |
+
],
|
19 |
+
"horizon": 8,
|
20 |
+
"input_normalization_modes": {
|
21 |
+
"observation.images.bottom_left": "mean_std",
|
22 |
+
"observation.images.bottom_right": "mean_std",
|
23 |
+
"observation.images.top_left": "mean_std",
|
24 |
+
"observation.images.top_right": "mean_std",
|
25 |
+
"observation.state": "mean_std"
|
26 |
+
},
|
27 |
+
"input_shapes": {
|
28 |
+
"observation.images.bottom_left": [
|
29 |
+
3,
|
30 |
+
160,
|
31 |
+
240
|
32 |
+
],
|
33 |
+
"observation.images.bottom_right": [
|
34 |
+
3,
|
35 |
+
160,
|
36 |
+
240
|
37 |
+
],
|
38 |
+
"observation.images.top_left": [
|
39 |
+
3,
|
40 |
+
160,
|
41 |
+
240
|
42 |
+
],
|
43 |
+
"observation.images.top_right": [
|
44 |
+
3,
|
45 |
+
160,
|
46 |
+
240
|
47 |
+
],
|
48 |
+
"observation.state": [
|
49 |
+
1
|
50 |
+
]
|
51 |
+
},
|
52 |
+
"kernel_size": 5,
|
53 |
+
"n_action_steps": 8,
|
54 |
+
"n_groups": 8,
|
55 |
+
"n_obs_steps": 1,
|
56 |
+
"noise_scheduler_type": "DDIM",
|
57 |
+
"num_inference_steps": 10,
|
58 |
+
"num_train_timesteps": 100,
|
59 |
+
"output_normalization_modes": {
|
60 |
+
"action": "min_max"
|
61 |
+
},
|
62 |
+
"output_shapes": {
|
63 |
+
"action": [
|
64 |
+
8
|
65 |
+
]
|
66 |
+
},
|
67 |
+
"prediction_type": "epsilon",
|
68 |
+
"pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
|
69 |
+
"spatial_softmax_num_keypoints": 32,
|
70 |
+
"use_film_scale_modulation": true,
|
71 |
+
"use_group_norm": false,
|
72 |
+
"vision_backbone": "resnet18"
|
73 |
+
}
|
meta.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"loss": 0.014469992201775313, "time": 1729895477.2541552, "epoch": 41.07142857142857, "step": 1150, "params": {"epochs": 600, "root": "episodes", "repo_id": "Frontier-Machines/can_in_box-20241025_135037", "frequency": 10, "img_width": 240, "img_height": 160, "gripper_dof": 1, "position_dof": 3, "rotation_dof": 4, "batch_size": 200, "log_freq": 50, "checkpoint_freq": 50, "observation_length": 1, "inference_steps": 10}}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b28e8cfa4d9f9abc49080eb910c39f1b4618079186a1216eda71e8694fdc324
|
3 |
+
size 1065331128
|