artspark commited on
Commit
bc29254
·
1 Parent(s): 54febca
model_index.json CHANGED
@@ -1,10 +1,11 @@
1
  {
2
  "_class_name": "StableDiffusionXLPipeline",
3
- "_diffusers_version": "0.19.3",
 
4
  "force_zeros_for_empty_prompt": true,
5
  "scheduler": [
6
  "diffusers",
7
- "EulerDiscreteScheduler"
8
  ],
9
  "text_encoder": [
10
  "transformers",
 
1
  {
2
  "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.20.0",
4
+ "_name_or_path": "./dsxl",
5
  "force_zeros_for_empty_prompt": true,
6
  "scheduler": [
7
  "diffusers",
8
+ "EulerAncestralDiscreteScheduler"
9
  ],
10
  "text_encoder": [
11
  "transformers",
scheduler/scheduler_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "_class_name": "EulerDiscreteScheduler",
3
- "_diffusers_version": "0.19.3",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
1
  {
2
+ "_class_name": "EulerAncestralDiscreteScheduler",
3
+ "_diffusers_version": "0.20.0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -20,6 +20,6 @@
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float16",
23
- "transformers_version": "4.31.0",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "./dsxl/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.29.1",
24
  "vocab_size": 49408
25
  }
text_encoder_2/config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "CLIPTextModelWithProjection"
4
  ],
@@ -19,6 +20,6 @@
19
  "pad_token_id": 1,
20
  "projection_dim": 1280,
21
  "torch_dtype": "float16",
22
- "transformers_version": "4.31.0",
23
  "vocab_size": 49408
24
  }
 
1
  {
2
+ "_name_or_path": "./dsxl/text_encoder_2",
3
  "architectures": [
4
  "CLIPTextModelWithProjection"
5
  ],
 
20
  "pad_token_id": 1,
21
  "projection_dim": 1280,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.29.1",
24
  "vocab_size": 49408
25
  }
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.19.3",
 
4
  "act_fn": "silu",
5
  "addition_embed_type": "text_time",
6
  "addition_embed_type_num_heads": 64,
@@ -10,6 +11,7 @@
10
  10,
11
  20
12
  ],
 
13
  "block_out_channels": [
14
  320,
15
  640,
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.20.0",
4
+ "_name_or_path": "./dsxl/unet",
5
  "act_fn": "silu",
6
  "addition_embed_type": "text_time",
7
  "addition_embed_type_num_heads": 64,
 
11
  10,
12
  20
13
  ],
14
+ "attention_type": "default",
15
  "block_out_channels": [
16
  320,
17
  640,
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.19.3",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.0",
4
+ "_name_or_path": "./dsxl/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,