|
black==23.7.0 |
|
chardet>=5.1.0 |
|
clip @ git+https://github.com/openai/CLIP.git |
|
einops>=0.6.1 |
|
fairscale>=0.4.13 |
|
fire>=0.5.0 |
|
fsspec>=2023.6.0 |
|
invisible-watermark>=0.2.0 |
|
kornia==0.6.9 |
|
matplotlib>=3.7.2 |
|
natsort>=8.4.0 |
|
numpy>=1.24.4 |
|
omegaconf>=2.3.0 |
|
onnx<=1.12.0 |
|
open-clip-torch>=2.20.0 |
|
opencv-python==4.6.0.66 |
|
pandas>=2.0.3 |
|
pillow>=9.5.0 |
|
pudb>=2022.1.3 |
|
pytorch-lightning==1.8.5 |
|
pyyaml>=6.0.1 |
|
scipy>=1.10.1 |
|
streamlit>=1.25.0 |
|
tensorboardx==2.5.1 |
|
timm>=0.9.2 |
|
tokenizers==0.12.1 |
|
--extra-index-url https://download.pytorch.org/whl/cu117 |
|
torch==1.13.1+cu117 |
|
torchaudio==0.13.1 |
|
torchdata==0.5.1 |
|
torchmetrics>=1.0.1 |
|
torchvision==0.14.1+cu117 |
|
tqdm>=4.65.0 |
|
transformers==4.19.1 |
|
triton==2.0.0.post1 |
|
urllib3<1.27,>=1.25.4 |
|
wandb>=0.15.6 |
|
webdataset>=0.2.33 |
|
wheel>=0.41.0 |
|
xformers==0.0.16 |