Skip to content

Commit 8f7fde5

Browse files
authored
[CI] Update Release Tests (#9274)
* update * update
1 parent a596726 commit 8f7fde5

File tree

2 files changed

+389
-2
lines changed

2 files changed

+389
-2
lines changed

.github/workflows/push_tests.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@ on:
44
push:
55
branches:
66
- main
7-
- "v*.*.*-release"
8-
- "v*.*.*-patch"
97
paths:
108
- "src/diffusers/**.py"
119
- "examples/**.py"
Lines changed: 389 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,389 @@
1+
# Duplicate workflow to push_tests.yml that is meant to run on release/patch branches as a final check
2+
# Creating a duplicate workflow here is simpler than adding complex path/branch parsing logic to push_tests.yml
3+
# Needs to be updated if push_tests.yml updated
4+
name: (Release) Fast GPU Tests on main
5+
6+
on:
7+
push:
8+
branches:
9+
- "v*.*.*-release"
10+
- "v*.*.*-patch"
11+
12+
env:
13+
DIFFUSERS_IS_CI: yes
14+
OMP_NUM_THREADS: 8
15+
MKL_NUM_THREADS: 8
16+
PYTEST_TIMEOUT: 600
17+
PIPELINE_USAGE_CUTOFF: 50000
18+
19+
jobs:
20+
setup_torch_cuda_pipeline_matrix:
21+
name: Setup Torch Pipelines CUDA Slow Tests Matrix
22+
runs-on:
23+
group: aws-general-8-plus
24+
container:
25+
image: diffusers/diffusers-pytorch-cpu
26+
outputs:
27+
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
28+
steps:
29+
- name: Checkout diffusers
30+
uses: actions/checkout@v3
31+
with:
32+
fetch-depth: 2
33+
- name: Install dependencies
34+
run: |
35+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
36+
python -m uv pip install -e [quality,test]
37+
- name: Environment
38+
run: |
39+
python utils/print_env.py
40+
- name: Fetch Pipeline Matrix
41+
id: fetch_pipeline_matrix
42+
run: |
43+
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
44+
echo $matrix
45+
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
46+
- name: Pipeline Tests Artifacts
47+
if: ${{ always() }}
48+
uses: actions/upload-artifact@v2
49+
with:
50+
name: test-pipelines.json
51+
path: reports
52+
53+
torch_pipelines_cuda_tests:
54+
name: Torch Pipelines CUDA Tests
55+
needs: setup_torch_cuda_pipeline_matrix
56+
strategy:
57+
fail-fast: false
58+
max-parallel: 8
59+
matrix:
60+
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
61+
runs-on:
62+
group: aws-g4dn-2xlarge
63+
container:
64+
image: diffusers/diffusers-pytorch-cuda
65+
options: --shm-size "16gb" --ipc host --gpus 0
66+
steps:
67+
- name: Checkout diffusers
68+
uses: actions/checkout@v3
69+
with:
70+
fetch-depth: 2
71+
- name: NVIDIA-SMI
72+
run: |
73+
nvidia-smi
74+
- name: Install dependencies
75+
run: |
76+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
77+
python -m uv pip install -e [quality,test]
78+
python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
79+
- name: Environment
80+
run: |
81+
python utils/print_env.py
82+
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
83+
env:
84+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
85+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
86+
CUBLAS_WORKSPACE_CONFIG: :16:8
87+
run: |
88+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
89+
-s -v -k "not Flax and not Onnx" \
90+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
91+
tests/pipelines/${{ matrix.module }}
92+
- name: Failure short reports
93+
if: ${{ failure() }}
94+
run: |
95+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
96+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
97+
- name: Test suite reports artifacts
98+
if: ${{ always() }}
99+
uses: actions/upload-artifact@v2
100+
with:
101+
name: pipeline_${{ matrix.module }}_test_reports
102+
path: reports
103+
104+
torch_cuda_tests:
105+
name: Torch CUDA Tests
106+
runs-on:
107+
group: aws-g4dn-2xlarge
108+
container:
109+
image: diffusers/diffusers-pytorch-cuda
110+
options: --shm-size "16gb" --ipc host --gpus 0
111+
defaults:
112+
run:
113+
shell: bash
114+
strategy:
115+
fail-fast: false
116+
max-parallel: 2
117+
matrix:
118+
module: [models, schedulers, lora, others, single_file]
119+
steps:
120+
- name: Checkout diffusers
121+
uses: actions/checkout@v3
122+
with:
123+
fetch-depth: 2
124+
125+
- name: Install dependencies
126+
run: |
127+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
128+
python -m uv pip install -e [quality,test]
129+
python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
130+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
131+
132+
- name: Environment
133+
run: |
134+
python utils/print_env.py
135+
136+
- name: Run PyTorch CUDA tests
137+
env:
138+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
139+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
140+
CUBLAS_WORKSPACE_CONFIG: :16:8
141+
run: |
142+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
143+
-s -v -k "not Flax and not Onnx" \
144+
--make-reports=tests_torch_cuda \
145+
tests/${{ matrix.module }}
146+
147+
- name: Failure short reports
148+
if: ${{ failure() }}
149+
run: |
150+
cat reports/tests_torch_cuda_stats.txt
151+
cat reports/tests_torch_cuda_failures_short.txt
152+
153+
- name: Test suite reports artifacts
154+
if: ${{ always() }}
155+
uses: actions/upload-artifact@v2
156+
with:
157+
name: torch_cuda_test_reports
158+
path: reports
159+
160+
flax_tpu_tests:
161+
name: Flax TPU Tests
162+
runs-on: docker-tpu
163+
container:
164+
image: diffusers/diffusers-flax-tpu
165+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
166+
defaults:
167+
run:
168+
shell: bash
169+
steps:
170+
- name: Checkout diffusers
171+
uses: actions/checkout@v3
172+
with:
173+
fetch-depth: 2
174+
175+
- name: Install dependencies
176+
run: |
177+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
178+
python -m uv pip install -e [quality,test]
179+
python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
180+
181+
- name: Environment
182+
run: |
183+
python utils/print_env.py
184+
185+
- name: Run slow Flax TPU tests
186+
env:
187+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
188+
run: |
189+
python -m pytest -n 0 \
190+
-s -v -k "Flax" \
191+
--make-reports=tests_flax_tpu \
192+
tests/
193+
194+
- name: Failure short reports
195+
if: ${{ failure() }}
196+
run: |
197+
cat reports/tests_flax_tpu_stats.txt
198+
cat reports/tests_flax_tpu_failures_short.txt
199+
200+
- name: Test suite reports artifacts
201+
if: ${{ always() }}
202+
uses: actions/upload-artifact@v2
203+
with:
204+
name: flax_tpu_test_reports
205+
path: reports
206+
207+
onnx_cuda_tests:
208+
name: ONNX CUDA Tests
209+
runs-on:
210+
group: aws-g4dn-2xlarge
211+
container:
212+
image: diffusers/diffusers-onnxruntime-cuda
213+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
214+
defaults:
215+
run:
216+
shell: bash
217+
steps:
218+
- name: Checkout diffusers
219+
uses: actions/checkout@v3
220+
with:
221+
fetch-depth: 2
222+
223+
- name: Install dependencies
224+
run: |
225+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
226+
python -m uv pip install -e [quality,test]
227+
python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
228+
229+
- name: Environment
230+
run: |
231+
python utils/print_env.py
232+
233+
- name: Run slow ONNXRuntime CUDA tests
234+
env:
235+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
236+
run: |
237+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
238+
-s -v -k "Onnx" \
239+
--make-reports=tests_onnx_cuda \
240+
tests/
241+
242+
- name: Failure short reports
243+
if: ${{ failure() }}
244+
run: |
245+
cat reports/tests_onnx_cuda_stats.txt
246+
cat reports/tests_onnx_cuda_failures_short.txt
247+
248+
- name: Test suite reports artifacts
249+
if: ${{ always() }}
250+
uses: actions/upload-artifact@v2
251+
with:
252+
name: onnx_cuda_test_reports
253+
path: reports
254+
255+
run_torch_compile_tests:
256+
name: PyTorch Compile CUDA tests
257+
258+
runs-on:
259+
group: aws-g4dn-2xlarge
260+
261+
container:
262+
image: diffusers/diffusers-pytorch-compile-cuda
263+
options: --gpus 0 --shm-size "16gb" --ipc host
264+
265+
steps:
266+
- name: Checkout diffusers
267+
uses: actions/checkout@v3
268+
with:
269+
fetch-depth: 2
270+
271+
- name: NVIDIA-SMI
272+
run: |
273+
nvidia-smi
274+
- name: Install dependencies
275+
run: |
276+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
277+
python -m uv pip install -e [quality,test,training]
278+
- name: Environment
279+
run: |
280+
python utils/print_env.py
281+
- name: Run example tests on GPU
282+
env:
283+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
284+
RUN_COMPILE: yes
285+
run: |
286+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
287+
- name: Failure short reports
288+
if: ${{ failure() }}
289+
run: cat reports/tests_torch_compile_cuda_failures_short.txt
290+
291+
- name: Test suite reports artifacts
292+
if: ${{ always() }}
293+
uses: actions/upload-artifact@v2
294+
with:
295+
name: torch_compile_test_reports
296+
path: reports
297+
298+
run_xformers_tests:
299+
name: PyTorch xformers CUDA tests
300+
301+
runs-on:
302+
group: aws-g4dn-2xlarge
303+
304+
container:
305+
image: diffusers/diffusers-pytorch-xformers-cuda
306+
options: --gpus 0 --shm-size "16gb" --ipc host
307+
308+
steps:
309+
- name: Checkout diffusers
310+
uses: actions/checkout@v3
311+
with:
312+
fetch-depth: 2
313+
314+
- name: NVIDIA-SMI
315+
run: |
316+
nvidia-smi
317+
- name: Install dependencies
318+
run: |
319+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
320+
python -m uv pip install -e [quality,test,training]
321+
- name: Environment
322+
run: |
323+
python utils/print_env.py
324+
- name: Run example tests on GPU
325+
env:
326+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
327+
run: |
328+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
329+
- name: Failure short reports
330+
if: ${{ failure() }}
331+
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
332+
333+
- name: Test suite reports artifacts
334+
if: ${{ always() }}
335+
uses: actions/upload-artifact@v2
336+
with:
337+
name: torch_xformers_test_reports
338+
path: reports
339+
340+
run_examples_tests:
341+
name: Examples PyTorch CUDA tests on Ubuntu
342+
343+
runs-on:
344+
group: aws-g4dn-2xlarge
345+
346+
container:
347+
image: diffusers/diffusers-pytorch-cuda
348+
options: --gpus 0 --shm-size "16gb" --ipc host
349+
350+
steps:
351+
- name: Checkout diffusers
352+
uses: actions/checkout@v3
353+
with:
354+
fetch-depth: 2
355+
356+
- name: NVIDIA-SMI
357+
run: |
358+
nvidia-smi
359+
360+
- name: Install dependencies
361+
run: |
362+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
363+
python -m uv pip install -e [quality,test,training]
364+
365+
- name: Environment
366+
run: |
367+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
368+
python utils/print_env.py
369+
370+
- name: Run example tests on GPU
371+
env:
372+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
373+
run: |
374+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
375+
python -m uv pip install timm
376+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
377+
378+
- name: Failure short reports
379+
if: ${{ failure() }}
380+
run: |
381+
cat reports/examples_torch_cuda_stats.txt
382+
cat reports/examples_torch_cuda_failures_short.txt
383+
384+
- name: Test suite reports artifacts
385+
if: ${{ always() }}
386+
uses: actions/upload-artifact@v2
387+
with:
388+
name: examples_test_reports
389+
path: reports

0 commit comments

Comments
 (0)