@@ -3,87 +3,116 @@ ARG UID=1000
3
3
ARG VERSION=EDGE
4
4
ARG RELEASE=0
5
5
6
- FROM python:3.10-slim as build
6
+ # #######################################
7
+ # Base stage
8
+ # #######################################
9
+ FROM docker.io/library/python:3.11-slim-bullseye AS base
10
+
11
+ # RUN mount cache for multi-arch: https://github.com/docker/buildx/issues/549#issuecomment-1788297892
12
+ ARG TARGETARCH
13
+ ARG TARGETVARIANT
14
+
15
+ WORKDIR /tmp
16
+
17
+ ENV NVIDIA_VISIBLE_DEVICES=all
18
+ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
19
+
20
+ # Install CUDA partially
21
+ # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#debian
22
+ # Installing the complete CUDA Toolkit system-wide usually adds around 8GB to the image size.
23
+ # Since most CUDA packages already installed through pip, there's no need to download the entire toolkit.
24
+ # Therefore, we opt to install only the essential libraries.
25
+ # Here is the package list for your reference: https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64
26
+
27
+ ADD https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb /tmp/cuda-keyring_x86_64.deb
28
+ RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
29
+ --mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
30
+ dpkg -i cuda-keyring_x86_64.deb && \
31
+ rm -f cuda-keyring_x86_64.deb && \
32
+ apt-get update && \
33
+ apt-get install -y --no-install-recommends \
34
+ # !If you experience any related issues, replace the following line with `cuda-12-4` to obtain the complete CUDA package.
35
+ cuda-nvcc-12-4
36
+
37
+ ENV PATH="/usr/local/cuda/bin${PATH:+:${PATH}}"
38
+ ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64
39
+ ENV CUDA_VERSION=12.4
40
+ ENV NVIDIA_REQUIRE_CUDA=cuda>=12.4
41
+ ENV CUDA_HOME=/usr/local/cuda
42
+
43
+ # #######################################
44
+ # Build stage
45
+ # #######################################
46
+ FROM base AS build
7
47
8
48
# RUN mount cache for multi-arch: https://github.com/docker/buildx/issues/549#issuecomment-1788297892
9
49
ARG TARGETARCH
10
50
ARG TARGETVARIANT
11
51
12
52
WORKDIR /app
13
53
14
- # Install under /root/.local
15
- ENV PIP_USER="true"
16
- ARG PIP_NO_WARN_SCRIPT_LOCATION=0
17
- ARG PIP_ROOT_USER_ACTION="ignore"
54
+ # Install uv
55
+ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
56
+
57
+ ENV UV_PROJECT_ENVIRONMENT=/venv
58
+ ENV VIRTUAL_ENV=/venv
59
+ ENV UV_LINK_MODE=copy
60
+ ENV UV_PYTHON_DOWNLOADS=0
61
+ ENV UV_INDEX=https://download.pytorch.org/whl/cu124
18
62
19
63
# Install build dependencies
20
64
RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
21
65
--mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
22
66
apt-get update && apt-get upgrade -y && \
23
67
apt-get install -y --no-install-recommends python3-launchpadlib git curl
24
68
25
- # Install PyTorch
26
- # The versions must align and be in sync with the requirements_linux_docker.txt
27
- # hadolint ignore=SC2102
28
- RUN --mount=type=cache,id=pip-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/pip \
29
- pip install -U --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://pypi.nvidia.com \
30
- torch==2.1.2 torchvision==0.16.2 \
31
- xformers==0.0.23.post1 \
32
- ninja \
33
- pip setuptools wheel
34
-
35
- # Install requirements
36
- RUN --mount=type=cache,id=pip-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/pip \
37
- --mount=source=requirements_linux_docker.txt,target=requirements_linux_docker.txt \
38
- --mount=source=requirements.txt,target=requirements.txt \
39
- --mount=source=setup/docker_setup.py,target=setup.py \
40
- --mount=source=sd-scripts,target=sd-scripts,rw \
41
- pip install -r requirements_linux_docker.txt -r requirements.txt
69
+ # Install big dependencies separately for layer caching
70
+ # !Please note that the version restrictions should be the same as pyproject.toml
71
+ # No packages listed should be removed in the next `uv sync` command
72
+ # If this happens, please update the version restrictions or update the uv.lock file
73
+ RUN --mount=type=cache,id=uv-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/uv \
74
+ uv venv --system-site-packages /venv && \
75
+ uv pip install --no-deps \
76
+ # torch (866.2MiB)
77
+ torch==2.5.1+cu124 \
78
+ # triton (199.8MiB)
79
+ triton==3.1.0 \
80
+ # tensorflow (615.0MiB)
81
+ tensorflow>=2.16.1 \
82
+ # onnxruntime-gpu (215.7MiB)
83
+ onnxruntime-gpu==1.19.2
84
+
85
+ # Install dependencies
86
+ RUN --mount=type=cache,id=uv-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/root/.cache/uv \
87
+ --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
88
+ --mount=type=bind,source=uv.lock,target=uv.lock \
89
+ --mount=type=bind,source=sd-scripts,target=sd-scripts,rw \
90
+ uv sync --frozen --no-dev --no-install-project --no-editable
42
91
43
92
# Replace pillow with pillow-simd (Only for x86)
44
93
ARG TARGETPLATFORM
45
94
RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
46
95
--mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
47
96
if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
48
97
apt-get update && apt-get install -y --no-install-recommends zlib1g-dev libjpeg62-turbo-dev build-essential && \
49
- pip uninstall -y pillow && \
50
- CC="cc -mavx2" pip install -U --force-reinstall pillow-simd; \
98
+ uv pip uninstall pillow && \
99
+ CC="cc -mavx2" uv pip install pillow-simd; \
51
100
fi
52
101
53
- FROM python:3.10-slim as final
102
+ # #######################################
103
+ # Final stage
104
+ # #######################################
105
+ FROM base AS final
54
106
55
107
ARG TARGETARCH
56
108
ARG TARGETVARIANT
57
109
58
- ENV NVIDIA_VISIBLE_DEVICES all
59
- ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
60
-
61
110
WORKDIR /tmp
62
111
63
- ENV CUDA_VERSION=12.1.1
64
- ENV NV_CUDA_CUDART_VERSION=12.1.105-1
65
- ENV NVIDIA_REQUIRE_CUDA=cuda>=12.1
66
- ENV NV_CUDA_COMPAT_PACKAGE=cuda-compat-12-1
67
-
68
- # Install CUDA partially
69
- ADD https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.0-1_all.deb .
70
- RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
71
- --mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
72
- dpkg -i cuda-keyring_1.0-1_all.deb && \
73
- rm cuda-keyring_1.0-1_all.deb && \
74
- sed -i 's/^Components: main$/& contrib/' /etc/apt/sources.list.d/debian.sources && \
75
- apt-get update && \
76
- apt-get install -y --no-install-recommends \
77
- # Installing the whole CUDA typically increases the image size by approximately **8GB**.
78
- # To decrease the image size, we opt to install only the necessary libraries.
79
- # Here is the package list for your reference: https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64
80
- # !If you experience any related issues, replace the following line with `cuda-12-1` to obtain the complete CUDA package.
81
- cuda-cudart-12-1=${NV_CUDA_CUDART_VERSION} ${NV_CUDA_COMPAT_PACKAGE} libcusparse-12-1 libnvjitlink-12-1
82
-
83
112
# Install runtime dependencies
84
113
RUN --mount=type=cache,id=apt-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/cache/apt \
85
114
--mount=type=cache,id=aptlists-$TARGETARCH$TARGETVARIANT,sharing=locked,target=/var/lib/apt/lists \
86
- apt-get update && \
115
+ apt-get update && apt-get upgrade -y && \
87
116
apt-get install -y --no-install-recommends libgl1 libglib2.0-0 libjpeg62 libtcl8.6 libtk8.6 libgoogle-perftools-dev dumb-init
88
117
89
118
# Fix missing libnvinfer7
@@ -98,20 +127,23 @@ RUN groupadd -g $UID $UID && \
98
127
# Create directories with correct permissions
99
128
RUN install -d -m 775 -o $UID -g 0 /dataset && \
100
129
install -d -m 775 -o $UID -g 0 /licenses && \
101
- install -d -m 775 -o $UID -g 0 /app
130
+ install -d -m 775 -o $UID -g 0 /app && \
131
+ install -d -m 775 -o $UID -g 0 /venv
102
132
103
133
# Copy licenses (OpenShift Policy)
104
134
COPY --link --chmod=775 LICENSE.md /licenses/LICENSE.md
105
135
106
136
# Copy dependencies and code (and support arbitrary uid for OpenShift best practice)
107
- COPY --link --chown=$UID:0 --chmod=775 --from=build /root/.local /home/$UID/.local
137
+ COPY --link --chown=$UID:0 --chmod=775 --from=build /venv /venv
108
138
COPY --link --chown=$UID:0 --chmod=775 . /app
109
139
110
- ENV PATH="/usr/local/cuda/lib:/usr/local/cuda/lib64:/home/$UID/.local/bin:$PATH"
111
- ENV PYTHONPATH="${PYTHONPATH}:/home/$UID/.local/lib/python3.10/site-packages"
112
- ENV LD_LIBRARY_PATH="/usr/local/cuda/lib:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
140
+ ENV PATH="/venv/bin${PATH:+:${PATH}}"
141
+ ENV PYTHONPATH="/venv/lib/python3.11/site-packages"
142
+
143
+ ENV LD_LIBRARY_PATH="/venv/lib/python3.11/site-packages/nvidia/cudnn/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"
113
144
ENV LD_PRELOAD=libtcmalloc.so
114
145
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
146
+
115
147
# Rich logging
116
148
# https://rich.readthedocs.io/en/stable/console.html#interactive-mode
117
149
ENV FORCE_COLOR="true"
@@ -130,7 +162,7 @@ STOPSIGNAL SIGINT
130
162
131
163
# Use dumb-init as PID 1 to handle signals properly
132
164
ENTRYPOINT ["dumb-init" , "--" ]
133
- CMD ["python3" , "kohya_gui.py" , "--listen" , "0.0.0.0" , "--server_port" , "7860" , "--headless" ]
165
+ CMD ["python3" , "kohya_gui.py" , "--listen" , "0.0.0.0" , "--server_port" , "7860" , "--headless" , "--noverify" ]
134
166
135
167
ARG VERSION
136
168
ARG RELEASE
@@ -144,4 +176,4 @@ LABEL name="bmaltais/kohya_ss" \
144
176
release=${RELEASE} \
145
177
io.k8s.display-name="kohya_ss" \
146
178
summary="Kohya's GUI: This repository provides a Gradio GUI for Kohya's Stable Diffusion trainers(https://github.com/kohya-ss/sd-scripts)." \
147
- description="The GUI allows you to set the training parameters and generate and run the required CLI commands to train the model. This is the docker image for Kohya's GUI. For more information about this tool, please visit the following website: https://github.com/bmaltais/kohya_ss."
179
+ description="The GUI allows you to set the training parameters and generate and run the required CLI commands to train the model. This is the docker image for Kohya's GUI. For more information about this tool, please visit the following website: https://github.com/bmaltais/kohya_ss."
0 commit comments