jingya huang commited on
Commit
88c383c
1 Parent(s): 2f0da2c

add artifacts

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.neuron filter=lfs diff=lfs merge=lfs -text
model_index.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLImg2ImgPipeline",
3
+ "_diffusers_version": "0.26.2",
4
+ "_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
5
+ "feature_extractor": [
6
+ null,
7
+ null
8
+ ],
9
+ "force_zeros_for_empty_prompt": true,
10
+ "image_encoder": [
11
+ null,
12
+ null
13
+ ],
14
+ "requires_aesthetics_score": false,
15
+ "scheduler": [
16
+ "diffusers",
17
+ "EulerDiscreteScheduler"
18
+ ],
19
+ "text_encoder": [
20
+ "transformers",
21
+ "CLIPTextModel"
22
+ ],
23
+ "text_encoder_2": [
24
+ "transformers",
25
+ "CLIPTextModelWithProjection"
26
+ ],
27
+ "tokenizer": [
28
+ "transformers",
29
+ "CLIPTokenizer"
30
+ ],
31
+ "tokenizer_2": [
32
+ "transformers",
33
+ "CLIPTokenizer"
34
+ ],
35
+ "unet": [
36
+ "diffusers",
37
+ "UNet2DConditionModel"
38
+ ],
39
+ "vae": [
40
+ "diffusers",
41
+ "AutoencoderKL"
42
+ ]
43
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.26.2",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "rescale_betas_zero_snr": false,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": false,
14
+ "sigma_max": null,
15
+ "sigma_min": null,
16
+ "skip_prk_steps": true,
17
+ "steps_offset": 1,
18
+ "timestep_spacing": "leading",
19
+ "timestep_type": "discrete",
20
+ "trained_betas": null,
21
+ "use_karras_sigmas": false
22
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "462165984030d82259a11f4367a4eed129e94a7b",
3
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder",
4
+ "architectures": [
5
+ "CLIPTextModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "dropout": 0.0,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "quick_gelu",
12
+ "hidden_size": 768,
13
+ "initializer_factor": 1.0,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 77,
18
+ "model_type": "clip-text-model",
19
+ "neuron": {
20
+ "auto_cast": "matmul",
21
+ "auto_cast_type": "bf16",
22
+ "compiler_type": "neuronx-cc",
23
+ "compiler_version": "2.12.68.0+4480452af",
24
+ "dynamic_batch_size": false,
25
+ "input_names": [
26
+ "input_ids"
27
+ ],
28
+ "output_names": [
29
+ "last_hidden_state",
30
+ "pooler_output",
31
+ "hidden_states"
32
+ ],
33
+ "static_batch_size": 1,
34
+ "static_sequence_length": 77
35
+ },
36
+ "num_attention_heads": 12,
37
+ "num_hidden_layers": 12,
38
+ "output_hidden_states": true,
39
+ "pad_token_id": 1,
40
+ "projection_dim": 768,
41
+ "task": "feature-extraction",
42
+ "torch_dtype": "float16",
43
+ "torchscript": true,
44
+ "transformers_version": "4.32.0.dev0",
45
+ "vocab_size": 49408
46
+ }
text_encoder/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe3de7569b601039daa5e1fa1ead9620ca7e0b321df3ddf7ddce53aeccb67c0
3
+ size 375823927
text_encoder_2/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "462165984030d82259a11f4367a4eed129e94a7b",
3
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder_2",
4
+ "architectures": [
5
+ "CLIPTextModelWithProjection"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "dropout": 0.0,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_size": 1280,
13
+ "initializer_factor": 1.0,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 5120,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 77,
18
+ "model_type": "clip-text-model",
19
+ "neuron": {
20
+ "auto_cast": "matmul",
21
+ "auto_cast_type": "bf16",
22
+ "compiler_type": "neuronx-cc",
23
+ "compiler_version": "2.12.68.0+4480452af",
24
+ "dynamic_batch_size": false,
25
+ "input_names": [
26
+ "input_ids"
27
+ ],
28
+ "output_names": [
29
+ "text_embeds",
30
+ "last_hidden_state",
31
+ "hidden_states"
32
+ ],
33
+ "static_batch_size": 1,
34
+ "static_sequence_length": 77
35
+ },
36
+ "num_attention_heads": 20,
37
+ "num_hidden_layers": 32,
38
+ "output_hidden_states": true,
39
+ "pad_token_id": 1,
40
+ "projection_dim": 1280,
41
+ "task": "feature-extraction",
42
+ "torch_dtype": "float16",
43
+ "torchscript": true,
44
+ "transformers_version": "4.32.0.dev0",
45
+ "vocab_size": 49408
46
+ }
text_encoder_2/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c04452be5b6018781cc96e81b98ee6a64dcde6bf92a544cd099f75895ba152b8
3
+ size 1788872895
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_commit_hash": null,
4
+ "_diffusers_version": "0.26.2",
5
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/unet",
6
+ "_use_default_values": [
7
+ "attention_type",
8
+ "dropout",
9
+ "reverse_transformer_layers_per_block"
10
+ ],
11
+ "act_fn": "silu",
12
+ "addition_embed_type": "text_time",
13
+ "addition_embed_type_num_heads": 64,
14
+ "addition_time_embed_dim": 256,
15
+ "attention_head_dim": [
16
+ 5,
17
+ 10,
18
+ 20
19
+ ],
20
+ "attention_type": "default",
21
+ "block_out_channels": [
22
+ 320,
23
+ 640,
24
+ 1280
25
+ ],
26
+ "center_input_sample": false,
27
+ "class_embed_type": null,
28
+ "class_embeddings_concat": false,
29
+ "conv_in_kernel": 3,
30
+ "conv_out_kernel": 3,
31
+ "cross_attention_dim": 2048,
32
+ "cross_attention_norm": null,
33
+ "down_block_types": [
34
+ "DownBlock2D",
35
+ "CrossAttnDownBlock2D",
36
+ "CrossAttnDownBlock2D"
37
+ ],
38
+ "downsample_padding": 1,
39
+ "dropout": 0.0,
40
+ "dual_cross_attention": false,
41
+ "encoder_hid_dim": null,
42
+ "encoder_hid_dim_type": null,
43
+ "flip_sin_to_cos": true,
44
+ "freq_shift": 0,
45
+ "in_channels": 4,
46
+ "layers_per_block": 2,
47
+ "mid_block_only_cross_attention": null,
48
+ "mid_block_scale_factor": 1,
49
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
50
+ "model_type": "unet",
51
+ "neuron": {
52
+ "auto_cast": "matmul",
53
+ "auto_cast_type": "bf16",
54
+ "compiler_type": "neuronx-cc",
55
+ "compiler_version": "2.12.68.0+4480452af",
56
+ "dynamic_batch_size": false,
57
+ "input_names": [
58
+ "sample",
59
+ "timestep",
60
+ "encoder_hidden_states",
61
+ "text_embeds",
62
+ "time_ids"
63
+ ],
64
+ "output_names": [
65
+ "sample"
66
+ ],
67
+ "static_batch_size": 1,
68
+ "static_height": 128,
69
+ "static_num_channels": 4,
70
+ "static_sequence_length": 77,
71
+ "static_width": 128
72
+ },
73
+ "norm_eps": 1e-05,
74
+ "norm_num_groups": 32,
75
+ "num_attention_heads": null,
76
+ "num_class_embeds": null,
77
+ "only_cross_attention": false,
78
+ "out_channels": 4,
79
+ "projection_class_embeddings_input_dim": 2816,
80
+ "resnet_out_scale_factor": 1.0,
81
+ "resnet_skip_time_act": false,
82
+ "resnet_time_scale_shift": "default",
83
+ "reverse_transformer_layers_per_block": null,
84
+ "sample_size": 128,
85
+ "task": "semantic-segmentation",
86
+ "time_cond_proj_dim": null,
87
+ "time_embedding_act_fn": null,
88
+ "time_embedding_dim": null,
89
+ "time_embedding_type": "positional",
90
+ "timestep_post_act": null,
91
+ "transformer_layers_per_block": [
92
+ 1,
93
+ 2,
94
+ 10
95
+ ],
96
+ "transformers_version": null,
97
+ "up_block_types": [
98
+ "CrossAttnUpBlock2D",
99
+ "CrossAttnUpBlock2D",
100
+ "UpBlock2D"
101
+ ],
102
+ "upcast_attention": null,
103
+ "use_linear_projection": true
104
+ }
unet/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e43e58b15d9de316cabccad378a5205d2fcf1b9ac38d34df64d522d811454c8e
3
+ size 4175315275
vae_decoder/config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_commit_hash": null,
4
+ "_diffusers_version": "0.26.2",
5
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/vae",
6
+ "act_fn": "silu",
7
+ "block_out_channels": [
8
+ 128,
9
+ 256,
10
+ 512,
11
+ 512
12
+ ],
13
+ "down_block_types": [
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D",
17
+ "DownEncoderBlock2D"
18
+ ],
19
+ "force_upcast": true,
20
+ "in_channels": 3,
21
+ "latent_channels": 4,
22
+ "layers_per_block": 2,
23
+ "model_type": "vae-decoder",
24
+ "neuron": {
25
+ "auto_cast": "matmul",
26
+ "auto_cast_type": "bf16",
27
+ "compiler_type": "neuronx-cc",
28
+ "compiler_version": "2.12.68.0+4480452af",
29
+ "dynamic_batch_size": false,
30
+ "input_names": [
31
+ "latent_sample"
32
+ ],
33
+ "output_names": [
34
+ "sample"
35
+ ],
36
+ "static_batch_size": 1,
37
+ "static_height": 128,
38
+ "static_num_channels": 4,
39
+ "static_width": 128
40
+ },
41
+ "norm_num_groups": 32,
42
+ "out_channels": 3,
43
+ "sample_size": 1024,
44
+ "scaling_factor": 0.13025,
45
+ "task": "semantic-segmentation",
46
+ "transformers_version": null,
47
+ "up_block_types": [
48
+ "UpDecoderBlock2D",
49
+ "UpDecoderBlock2D",
50
+ "UpDecoderBlock2D",
51
+ "UpDecoderBlock2D"
52
+ ]
53
+ }
vae_decoder/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50668ee9cbcb715962fd3e799d46ad63e898043de6b8a22a6dad52059187199b
3
+ size 824602243
vae_encoder/config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_commit_hash": null,
4
+ "_diffusers_version": "0.26.2",
5
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/vae",
6
+ "act_fn": "silu",
7
+ "block_out_channels": [
8
+ 128,
9
+ 256,
10
+ 512,
11
+ 512
12
+ ],
13
+ "down_block_types": [
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D",
17
+ "DownEncoderBlock2D"
18
+ ],
19
+ "force_upcast": true,
20
+ "in_channels": 3,
21
+ "latent_channels": 4,
22
+ "layers_per_block": 2,
23
+ "model_type": "vae-encoder",
24
+ "neuron": {
25
+ "auto_cast": "matmul",
26
+ "auto_cast_type": "bf16",
27
+ "compiler_type": "neuronx-cc",
28
+ "compiler_version": "2.12.68.0+4480452af",
29
+ "dynamic_batch_size": false,
30
+ "input_names": [
31
+ "sample"
32
+ ],
33
+ "output_names": [
34
+ "latent_sample"
35
+ ],
36
+ "static_batch_size": 1,
37
+ "static_height": 1024,
38
+ "static_num_channels": 3,
39
+ "static_width": 1024
40
+ },
41
+ "norm_num_groups": 32,
42
+ "out_channels": 3,
43
+ "sample_size": 1024,
44
+ "scaling_factor": 0.13025,
45
+ "task": "semantic-segmentation",
46
+ "transformers_version": null,
47
+ "up_block_types": [
48
+ "UpDecoderBlock2D",
49
+ "UpDecoderBlock2D",
50
+ "UpDecoderBlock2D",
51
+ "UpDecoderBlock2D"
52
+ ]
53
+ }
vae_encoder/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20099325a5d5d21b3015ce17b7259a71e3cef09b9876376f016b06f6219db3eb
3
+ size 425662083