Julien Blanchon commited on
Commit
ebe8085
·
1 Parent(s): 423995d
Files changed (3) hide show
  1. README.md +1 -0
  2. app.py +1 -3
  3. requirements.txt +1 -2
README.md CHANGED
@@ -7,6 +7,7 @@ sdk: gradio
7
  sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  <h1 align="center">Transition Models: Rethinking the Generative Learning Objective</h1>
 
7
  sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: false
10
+ python_version: 3.10
11
  ---
12
 
13
  <h1 align="center">Transition Models: Rethinking the Generative Learning Objective</h1>
app.py CHANGED
@@ -47,9 +47,7 @@ def load_model_components(device: str = "cuda"):
47
  from huggingface_hub import hf_hub_download
48
 
49
  ckpt_path = hf_hub_download(
50
- repo_id="blanchon/TiM-checkpoints",
51
- filename="t2i_model.bin",
52
- local_dir=Path(__file__).parent,
53
  )
54
 
55
  if not Path(config_path).exists():
 
47
  from huggingface_hub import hf_hub_download
48
 
49
  ckpt_path = hf_hub_download(
50
+ repo_id="blanchon/TiM-checkpoints", filename="t2i_model.bin"
 
 
51
  )
52
 
53
  if not Path(config_path).exists():
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  gradio>=4.0.0
2
  spaces>=0.28.0
3
- torch>=2.1.0
4
  torchvision
5
  diffusers
6
  transformers>=4.25.0
@@ -10,6 +10,5 @@ numpy
10
  Pillow
11
  safetensors
12
  tqdm
13
- # flash-attn>=2.0.0
14
  flash-attn @ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.14/flash_attn-2.8.2+cu129torch2.8-cp310-cp310-linux_x86_64.whl
15
  accelerate
 
1
  gradio>=4.0.0
2
  spaces>=0.28.0
3
+ torch==2.8.0
4
  torchvision
5
  diffusers
6
  transformers>=4.25.0
 
10
  Pillow
11
  safetensors
12
  tqdm
 
13
  flash-attn @ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.3.14/flash_attn-2.8.2+cu129torch2.8-cp310-cp310-linux_x86_64.whl
14
  accelerate