Spaces:
Paused
Paused
Commit
•
eb6be39
1
Parent(s):
858f7c7
Speed-up by 30% (#136)
Browse files- add necessary changegs (52d10dd449900fc775900aa37dafb64ed75022ea)
Co-authored-by: Patrick von Platen <[email protected]>
- Dockerfile +90 -0
- README.md +1 -2
- model.py +9 -0
- requirements.txt +1 -3
Dockerfile
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download LFS content while building in order to make this step cacheable
|
2 |
+
# #===== LFS =====
|
3 |
+
# FROM alpine/git:2.36.2 AS lfs
|
4 |
+
# WORKDIR /app
|
5 |
+
# COPY --link .lfs.hf.co .
|
6 |
+
# RUN --mount=type=secret,id=SPACE_REPOSITORY,mode=0444,required=true \
|
7 |
+
# git init \
|
8 |
+
# && git remote add origin $(cat /run/secrets/SPACE_REPOSITORY) \
|
9 |
+
# && git add --all \
|
10 |
+
# && git config user.email "[email protected]" \
|
11 |
+
# && git config user.name "Name" \
|
12 |
+
# && git commit -m "lfs" \
|
13 |
+
# && git lfs pull \
|
14 |
+
# && rm -rf .git .gitattributes
|
15 |
+
# #===============
|
16 |
+
|
17 |
+
FROM nvidia/cuda:11.8.0-runtime-ubuntu18.04
|
18 |
+
# BEGIN Static part
|
19 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
20 |
+
TZ=Europe/Paris
|
21 |
+
|
22 |
+
RUN apt-get update && apt-get install -y \
|
23 |
+
git \
|
24 |
+
make build-essential libssl-dev zlib1g-dev \
|
25 |
+
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
|
26 |
+
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \
|
27 |
+
ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \
|
28 |
+
&& rm -rf /var/lib/apt/lists/* \
|
29 |
+
&& git lfs install
|
30 |
+
|
31 |
+
# User
|
32 |
+
RUN useradd -m -u 1000 user
|
33 |
+
USER user
|
34 |
+
ENV HOME=/home/user \
|
35 |
+
PATH=/home/user/.local/bin:$PATH
|
36 |
+
WORKDIR /home/user/app
|
37 |
+
|
38 |
+
# Pyenv
|
39 |
+
RUN curl https://pyenv.run | bash
|
40 |
+
ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH
|
41 |
+
|
42 |
+
ARG PIP_VERSION=22.3.1
|
43 |
+
ARG PYTHON_VERSION=3.10
|
44 |
+
# Python
|
45 |
+
RUN pyenv install $PYTHON_VERSION && \
|
46 |
+
pyenv global $PYTHON_VERSION && \
|
47 |
+
pyenv rehash && \
|
48 |
+
pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools wheel && \
|
49 |
+
pip install --no-cache-dir \
|
50 |
+
datasets \
|
51 |
+
"huggingface-hub>=0.12.1" "protobuf<4" "click<8.1"
|
52 |
+
|
53 |
+
#^ Waiting for https://github.com/huggingface/huggingface_hub/pull/1345/files to be merge
|
54 |
+
|
55 |
+
USER root
|
56 |
+
# User Debian packages
|
57 |
+
## Security warning : Potential user code executed as root (build time)
|
58 |
+
RUN --mount=target=/root/packages.txt,source=packages.txt \
|
59 |
+
apt-get update && \
|
60 |
+
xargs -r -a /root/packages.txt apt-get install -y \
|
61 |
+
&& rm -rf /var/lib/apt/lists/*
|
62 |
+
|
63 |
+
USER user
|
64 |
+
|
65 |
+
# Pre requirements (e.g. upgrading pip)
|
66 |
+
RUN --mount=target=pre-requirements.txt,source=pre-requirements.txt \
|
67 |
+
pip install --no-cache-dir -r pre-requirements.txt
|
68 |
+
|
69 |
+
# Python packages
|
70 |
+
RUN pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu117
|
71 |
+
RUN --mount=target=requirements.txt,source=requirements.txt \
|
72 |
+
pip install --no-cache-dir -r requirements.txt
|
73 |
+
|
74 |
+
ARG SDK=gradio \
|
75 |
+
SDK_VERSION=3.27.0
|
76 |
+
RUN pip install --no-cache-dir \
|
77 |
+
${SDK}==${SDK_VERSION}
|
78 |
+
|
79 |
+
# App
|
80 |
+
# COPY --link --chown=1000 --from=lfs /app /home/user/app
|
81 |
+
COPY --link --chown=1000 ./ /home/user/app
|
82 |
+
ENV PYTHONPATH=$HOME/app \
|
83 |
+
PYTHONUNBUFFERED=1 \
|
84 |
+
GRADIO_ALLOW_FLAGGING=never \
|
85 |
+
GRADIO_NUM_PORTS=1 \
|
86 |
+
GRADIO_SERVER_NAME=0.0.0.0 \
|
87 |
+
GRADIO_THEME=huggingface \
|
88 |
+
SYSTEM=spaces
|
89 |
+
|
90 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
@@ -3,8 +3,7 @@ title: IF
|
|
3 |
emoji: 🔥
|
4 |
colorFrom: pink
|
5 |
colorTo: red
|
6 |
-
sdk:
|
7 |
-
sdk_version: 3.27.0
|
8 |
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
|
|
3 |
emoji: 🔥
|
4 |
colorFrom: pink
|
5 |
colorTo: red
|
6 |
+
sdk: docker
|
|
|
7 |
python_version: 3.10.11
|
8 |
app_file: app.py
|
9 |
pinned: false
|
model.py
CHANGED
@@ -59,8 +59,17 @@ class Model:
|
|
59 |
if DISABLE_AUTOMATIC_CPU_OFFLOAD:
|
60 |
self.pipe.to(self.device)
|
61 |
self.super_res_1_pipe.to(self.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
if not DISABLE_SD_X4_UPSCALER:
|
63 |
self.super_res_2_pipe.to(self.device)
|
|
|
|
|
64 |
else:
|
65 |
self.pipe.enable_model_cpu_offload()
|
66 |
self.super_res_1_pipe.enable_model_cpu_offload()
|
|
|
59 |
if DISABLE_AUTOMATIC_CPU_OFFLOAD:
|
60 |
self.pipe.to(self.device)
|
61 |
self.super_res_1_pipe.to(self.device)
|
62 |
+
|
63 |
+
self.pipe.unet.to(memory_format=torch.channels_last)
|
64 |
+
self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
|
65 |
+
|
66 |
+
self.super_res_1_pipe.unet.to(memory_format=torch.channels_last)
|
67 |
+
self.super_res_1_pipe.unet = torch.compile(self.super_res_1_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
68 |
+
|
69 |
if not DISABLE_SD_X4_UPSCALER:
|
70 |
self.super_res_2_pipe.to(self.device)
|
71 |
+
self.super_res_2_pipe.unet.to(memory_format=torch.channels_last)
|
72 |
+
self.super_res_2_pipe.unet = torch.compile(self.super_res_2_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
73 |
else:
|
74 |
self.pipe.enable_model_cpu_offload()
|
75 |
self.super_res_1_pipe.enable_model_cpu_offload()
|
requirements.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
accelerate==0.18.0
|
2 |
beautifulsoup4==4.12.2
|
3 |
bitsandbytes==0.38.1
|
4 |
-
diffusers
|
5 |
ftfy==6.1.1
|
6 |
gradio==3.27.0
|
7 |
huggingface_hub==0.14.1
|
@@ -10,7 +10,5 @@ Pillow==9.5.0
|
|
10 |
safetensors==0.3.0
|
11 |
sentencepiece==0.1.98
|
12 |
tokenizers==0.13.3
|
13 |
-
torch==2.0.0
|
14 |
-
torchvision==0.15.1
|
15 |
tqdm==4.65.0
|
16 |
transformers==4.28.1
|
|
|
1 |
accelerate==0.18.0
|
2 |
beautifulsoup4==4.12.2
|
3 |
bitsandbytes==0.38.1
|
4 |
+
git+https://github.com/huggingface/diffusers@fix_torch_compile
|
5 |
ftfy==6.1.1
|
6 |
gradio==3.27.0
|
7 |
huggingface_hub==0.14.1
|
|
|
10 |
safetensors==0.3.0
|
11 |
sentencepiece==0.1.98
|
12 |
tokenizers==0.13.3
|
|
|
|
|
13 |
tqdm==4.65.0
|
14 |
transformers==4.28.1
|