Upload folder using huggingface_hub

#1
by dn6 HF staff - opened
README.md CHANGED
@@ -6,7 +6,7 @@ tags:
6
  - image-to-video
7
  ---
8
 
9
- Unofficial Diffusers-format weights for https://huggingface.co/Lightricks/LTX-Video (version 0.9.0).
10
 
11
  Text-to-Video:
12
 
@@ -15,7 +15,7 @@ import torch
15
  from diffusers import LTXPipeline
16
  from diffusers.utils import export_to_video
17
 
18
- pipe = LTXPipeline.from_pretrained("a-r-r-o-w/LTX-Video-diffusers", torch_dtype=torch.bfloat16)
19
  pipe.to("cuda")
20
 
21
  prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"
@@ -28,6 +28,8 @@ video = pipe(
28
  height=480,
29
  num_frames=161,
30
  num_inference_steps=50,
 
 
31
  ).frames[0]
32
  export_to_video(video, "output.mp4", fps=24)
33
  ```
@@ -39,7 +41,7 @@ import torch
39
  from diffusers import LTXImageToVideoPipeline
40
  from diffusers.utils import export_to_video, load_image
41
 
42
- pipe = LTXImageToVideoPipeline.from_pretrained("a-r-r-o-w/LTX-Video-diffusers", torch_dtype=torch.bfloat16)
43
  pipe.to("cuda")
44
 
45
  image = load_image(
@@ -56,6 +58,8 @@ video = pipe(
56
  height=480,
57
  num_frames=161,
58
  num_inference_steps=50,
 
 
59
  ).frames[0]
60
  export_to_video(video, "output.mp4", fps=24)
61
  ```
 
6
  - image-to-video
7
  ---
8
 
9
+ Unofficial Diffusers-format weights for https://huggingface.co/Lightricks/LTX-Video (version 0.9.1).
10
 
11
  Text-to-Video:
12
 
 
15
  from diffusers import LTXPipeline
16
  from diffusers.utils import export_to_video
17
 
18
+ pipe = LTXPipeline.from_pretrained("a-r-r-o-w/LTX-Video-0.9.1-diffusers", torch_dtype=torch.bfloat16)
19
  pipe.to("cuda")
20
 
21
  prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"
 
28
  height=480,
29
  num_frames=161,
30
  num_inference_steps=50,
31
+ decode_timestep=0.03,
32
+ decode_noise_scale=0.025,
33
  ).frames[0]
34
  export_to_video(video, "output.mp4", fps=24)
35
  ```
 
41
  from diffusers import LTXImageToVideoPipeline
42
  from diffusers.utils import export_to_video, load_image
43
 
44
+ pipe = LTXImageToVideoPipeline.from_pretrained("a-r-r-o-w/LTX-Video-0.9.1-diffusers", torch_dtype=torch.bfloat16)
45
  pipe.to("cuda")
46
 
47
  image = load_image(
 
58
  height=480,
59
  num_frames=161,
60
  num_inference_steps=50,
61
+ decode_timestep=0.03,
62
+ decode_noise_scale=0.025,
63
  ).frames[0]
64
  export_to_video(video, "output.mp4", fps=24)
65
  ```
model_index.json CHANGED
@@ -15,10 +15,10 @@
15
  ],
16
  "transformer": [
17
  "diffusers",
18
- "LTXTransformer3DModel"
19
  ],
20
  "vae": [
21
  "diffusers",
22
- "AutoencoderKLLTX"
23
  ]
24
  }
 
15
  ],
16
  "transformer": [
17
  "diffusers",
18
+ "LTXVideoTransformer3DModel"
19
  ],
20
  "vae": [
21
  "diffusers",
22
+ "AutoencoderKLLTXVideo"
23
  ]
24
  }
text_encoder/config.json CHANGED
@@ -26,7 +26,7 @@
26
  "relative_attention_num_buckets": 32,
27
  "tie_word_embeddings": false,
28
  "torch_dtype": "float32",
29
- "transformers_version": "4.46.2",
30
  "use_cache": true,
31
  "vocab_size": 32128
32
  }
 
26
  "relative_attention_num_buckets": 32,
27
  "tie_word_embeddings": false,
28
  "torch_dtype": "float32",
29
+ "transformers_version": "4.48.0.dev0",
30
  "use_cache": true,
31
  "vocab_size": 32128
32
  }
tokenizer/tokenizer_config.json CHANGED
@@ -931,6 +931,7 @@
931
  "clean_up_tokenization_spaces": false,
932
  "eos_token": "</s>",
933
  "extra_ids": 100,
 
934
  "legacy": true,
935
  "model_max_length": 128,
936
  "pad_token": "<pad>",
 
931
  "clean_up_tokenization_spaces": false,
932
  "eos_token": "</s>",
933
  "extra_ids": 100,
934
+ "extra_special_tokens": {},
935
  "legacy": true,
936
  "model_max_length": 128,
937
  "pad_token": "<pad>",
transformer/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_class_name": "LTXTransformer3DModel",
3
  "_diffusers_version": "0.32.0.dev0",
4
  "activation_fn": "gelu-approximate",
5
  "attention_bias": true,
 
1
  {
2
+ "_class_name": "LTXVideoTransformer3DModel",
3
  "_diffusers_version": "0.32.0.dev0",
4
  "activation_fn": "gelu-approximate",
5
  "attention_bias": true,
transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75e214999dbee61d5b8dd3d865dd4927c501f71510ab4efe612db1b50f96b973
3
+ size 3846852608
vae/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_class_name": "AutoencoderKLLTX",
3
  "_diffusers_version": "0.32.0.dev0",
4
  "block_out_channels": [
5
  128,
@@ -7,7 +7,29 @@
7
  512,
8
  512
9
  ],
 
 
 
 
 
10
  "decoder_causal": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "encoder_causal": true,
12
  "in_channels": 3,
13
  "latent_channels": 128,
@@ -28,5 +50,16 @@
28
  true,
29
  true,
30
  false
 
 
 
 
 
 
 
 
 
 
 
31
  ]
32
  }
 
1
  {
2
+ "_class_name": "AutoencoderKLLTXVideo",
3
  "_diffusers_version": "0.32.0.dev0",
4
  "block_out_channels": [
5
  128,
 
7
  512,
8
  512
9
  ],
10
+ "decoder_block_out_channels": [
11
+ 256,
12
+ 512,
13
+ 1024
14
+ ],
15
  "decoder_causal": false,
16
+ "decoder_inject_noise": [
17
+ true,
18
+ true,
19
+ true,
20
+ false
21
+ ],
22
+ "decoder_layers_per_block": [
23
+ 5,
24
+ 6,
25
+ 7,
26
+ 8
27
+ ],
28
+ "decoder_spatio_temporal_scaling": [
29
+ true,
30
+ true,
31
+ true
32
+ ],
33
  "encoder_causal": true,
34
  "in_channels": 3,
35
  "latent_channels": 128,
 
50
  true,
51
  true,
52
  false
53
+ ],
54
+ "timestep_conditioning": true,
55
+ "upsample_factor": [
56
+ 2,
57
+ 2,
58
+ 2
59
+ ],
60
+ "upsample_residual": [
61
+ true,
62
+ true,
63
+ true
64
  ]
65
  }
vae/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:265ca87cb5dff5e37f924286e957324e282fe7710a952a7dafc0df43883e2010
3
- size 1676798532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a70d8d49fb2cc3698ffe9ed0e09fba5cd65d6a7d83fe89320ebc1d6fcc94536
3
+ size 1869989690