Image-to-3D
Diffusers
Safetensors
MIDIPipeline
huanngzh commited on
Commit
194c4af
·
verified ·
1 Parent(s): 6edfd19

Upload folder using huggingface_hub

Browse files
feature_extractor_1/preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 224
26
+ }
27
+ }
feature_extractor_2/preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 512,
4
+ "width": 512
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 512
26
+ }
27
+ }
image_encoder_1/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPVisionModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "dropout": 0.0,
7
+ "hidden_act": "quick_gelu",
8
+ "hidden_size": 1024,
9
+ "image_size": 224,
10
+ "initializer_factor": 1.0,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-05,
14
+ "model_type": "clip_vision_model",
15
+ "num_attention_heads": 16,
16
+ "num_channels": 3,
17
+ "num_hidden_layers": 24,
18
+ "patch_size": 14,
19
+ "projection_dim": 768,
20
+ "torch_dtype": "bfloat16",
21
+ "transformers_version": "4.45.2"
22
+ }
image_encoder_1/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4b33d864f89a793357a768cb07d0dc18d6a14e6664f4110a0d535ca9ba78da8
3
+ size 607980488
image_encoder_2/config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_layernorm": true,
3
+ "architectures": [
4
+ "Dinov2Model"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "drop_path_rate": 0.0,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 1024,
11
+ "image_size": 518,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_eps": 1e-06,
14
+ "layerscale_value": 1.0,
15
+ "mlp_ratio": 4,
16
+ "model_type": "dinov2",
17
+ "num_attention_heads": 16,
18
+ "num_channels": 7,
19
+ "num_hidden_layers": 24,
20
+ "out_features": [
21
+ "stage24"
22
+ ],
23
+ "out_indices": [
24
+ 24
25
+ ],
26
+ "patch_size": 14,
27
+ "qkv_bias": true,
28
+ "reshape_hidden_states": true,
29
+ "stage_names": [
30
+ "stem",
31
+ "stage1",
32
+ "stage2",
33
+ "stage3",
34
+ "stage4",
35
+ "stage5",
36
+ "stage6",
37
+ "stage7",
38
+ "stage8",
39
+ "stage9",
40
+ "stage10",
41
+ "stage11",
42
+ "stage12",
43
+ "stage13",
44
+ "stage14",
45
+ "stage15",
46
+ "stage16",
47
+ "stage17",
48
+ "stage18",
49
+ "stage19",
50
+ "stage20",
51
+ "stage21",
52
+ "stage22",
53
+ "stage23",
54
+ "stage24"
55
+ ],
56
+ "torch_dtype": "bfloat16",
57
+ "transformers_version": "4.45.2",
58
+ "use_swiglu_ffn": false
59
+ }
image_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a354885ca4601d0eb89aa2ec4f5a32a56a9e7a92c725c6faadb2f783b0bb65ba
3
+ size 610391440
model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "MIDIPipeline",
3
+ "_diffusers_version": "0.32.2",
4
+ "feature_extractor_1": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "feature_extractor_2": [
9
+ "transformers",
10
+ "BitImageProcessor"
11
+ ],
12
+ "image_encoder_1": [
13
+ "transformers",
14
+ "CLIPVisionModelWithProjection"
15
+ ],
16
+ "image_encoder_2": [
17
+ "transformers",
18
+ "Dinov2Model"
19
+ ],
20
+ "scheduler": [
21
+ "midi.schedulers.scheduling_rectified_flow",
22
+ "RectifiedFlowScheduler"
23
+ ],
24
+ "transformer": [
25
+ "midi.models.transformers.triposg_transformer",
26
+ "TripoSGDiTModel"
27
+ ],
28
+ "vae": [
29
+ "midi.models.autoencoders.autoencoder_kl_triposg",
30
+ "TripoSGVAEModel"
31
+ ]
32
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "RectifiedFlowScheduler",
3
+ "_diffusers_version": "0.32.2",
4
+ "num_train_timesteps": 1000,
5
+ "shift": 2,
6
+ "use_dynamic_shifting": false
7
+ }
transformer/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "TripoSGDiTModel",
3
+ "_diffusers_version": "0.32.2",
4
+ "cross_attention_2_dim": 1024,
5
+ "cross_attention_dim": 768,
6
+ "in_channels": 64,
7
+ "num_attention_heads": 16,
8
+ "num_layers": 21,
9
+ "width": 2048
10
+ }
transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c7177fa8f3a4b70b29b12c36725fee71c10b404f5cbe4edcdf1daa3c713b1b
3
+ size 3363898656
vae/config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "TripoSGVAEModel",
3
+ "_diffusers_version": "0.32.2",
4
+ "embed_frequency": 8,
5
+ "embed_include_pi": false,
6
+ "embedding_type": "frequency",
7
+ "in_channels": 3,
8
+ "latent_channels": 64,
9
+ "num_attention_heads": 8,
10
+ "num_layers_decoder": 16,
11
+ "num_layers_encoder": 8,
12
+ "width_decoder": 1024,
13
+ "width_encoder": 512
14
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826352fb8e5dd13d190cbb679c790f6caf7ed4048c949c00e326cb203e30e56d
3
+ size 485361730