Skip to content

enable several pipeline integration tests on XPU #11526

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions tests/pipelines/dance_diffusion/test_dance_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,14 @@
import torch

from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
nightly,
require_torch_accelerator,
skip_mps,
torch_device,
)

from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
Expand Down Expand Up @@ -116,19 +123,19 @@ def test_inference_batch_single_identical(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class PipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def test_dance_diffusion(self):
device = torch_device
Expand Down
10 changes: 6 additions & 4 deletions tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,15 @@
VQModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
require_torch_accelerator,
torch_device,
)

from ..test_pipelines_common import PipelineTesterMixin
Expand Down Expand Up @@ -226,19 +228,19 @@ def test_inference_batch_single_identical(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def test_kandinsky_controlnet(self):
expected_image = load_numpy(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,15 @@
VQModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
require_torch_accelerator,
torch_device,
)

from ..test_pipelines_common import PipelineTesterMixin
Expand Down Expand Up @@ -233,19 +235,19 @@ def test_float16_inference(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def test_kandinsky_controlnet_img2img(self):
expected_image = load_numpy(
Expand Down Expand Up @@ -309,4 +311,4 @@ def test_kandinsky_controlnet_img2img(self):
assert image.shape == (512, 512, 3)

max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten())
assert max_diff < 1e-4
assert max_diff < 5e-4
15 changes: 8 additions & 7 deletions tests/pipelines/latent_diffusion/test_latent_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@

from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
require_torch_accelerator,
torch_device,
)

Expand Down Expand Up @@ -136,17 +137,17 @@ def test_inference_text2img(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class LDMTextToImagePipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def get_inputs(self, device, dtype=torch.float32, seed=0):
generator = torch.manual_seed(seed)
Expand Down Expand Up @@ -177,17 +178,17 @@ def test_ldm_default_ddim(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class LDMTextToImagePipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def get_inputs(self, device, dtype=torch.float32, seed=0):
generator = torch.manual_seed(seed)
Expand Down
14 changes: 10 additions & 4 deletions tests/pipelines/musicldm/test_musicldm.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,13 @@
UNet2DConditionModel,
)
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
nightly,
require_torch_accelerator,
torch_device,
)

from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
Expand Down Expand Up @@ -408,17 +414,17 @@ def test_to_dtype(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class MusicLDMPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
Expand Down
14 changes: 10 additions & 4 deletions tests/pipelines/shap_e/test_shap_e.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,13 @@

from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device
from diffusers.utils.testing_utils import (
backend_empty_cache,
load_numpy,
nightly,
require_torch_accelerator,
torch_device,
)

from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference

Expand Down Expand Up @@ -222,19 +228,19 @@ def test_sequential_cpu_offload_forward_pass(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class ShapEPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def test_shap_e(self):
expected_image = load_numpy(
Expand Down
9 changes: 5 additions & 4 deletions tests/pipelines/shap_e/test_shap_e_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,12 @@
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils.testing_utils import (
backend_empty_cache,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
require_torch_accelerator,
torch_device,
)

Expand Down Expand Up @@ -250,19 +251,19 @@ def test_sequential_cpu_offload_forward_pass(self):


@nightly
@require_torch_gpu
@require_torch_accelerator
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
backend_empty_cache(torch_device)

def test_shap_e_img2img(self):
input_image = load_image(
Expand Down