diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 1f60c0b421f3..881946e6a0cc 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -20,7 +20,14 @@ import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + skip_mps, + torch_device, +) from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -116,19 +123,19 @@ def test_inference_batch_single_identical(self): @nightly -@require_torch_gpu +@require_torch_accelerator class PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_dance_diffusion(self): device = torch_device diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py index 10a95d6177b2..6454152b7abc 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -28,13 +28,15 @@ VQModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, + torch_device, ) from ..test_pipelines_common import PipelineTesterMixin @@ -226,19 +228,19 @@ def test_inference_batch_single_identical(self): @nightly -@require_torch_gpu +@require_torch_accelerator class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_kandinsky_controlnet(self): expected_image = load_numpy( diff --git a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py index 58fbbecc0569..c99b7b738a2d 100644 --- a/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -29,13 +29,15 @@ VQModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, + torch_device, ) from ..test_pipelines_common import PipelineTesterMixin @@ -233,19 +235,19 @@ def test_float16_inference(self): @nightly -@require_torch_gpu +@require_torch_accelerator class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_kandinsky_controlnet_img2img(self): expected_image = load_numpy( @@ -309,4 +311,4 @@ def test_kandinsky_controlnet_img2img(self): assert image.shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) - assert max_diff < 1e-4 + assert max_diff < 5e-4 diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index e751240e43b0..245116d5fa11 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -22,10 +22,11 @@ from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, load_numpy, nightly, - require_torch_gpu, + require_torch_accelerator, torch_device, ) @@ -136,17 +137,17 @@ def test_inference_text2img(self): @nightly -@require_torch_gpu +@require_torch_accelerator class LDMTextToImagePipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) @@ -177,17 +178,17 @@ def test_ldm_default_ddim(self): @nightly -@require_torch_gpu +@require_torch_accelerator class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) diff --git a/tests/pipelines/musicldm/test_musicldm.py b/tests/pipelines/musicldm/test_musicldm.py index bdd536b6ff86..7f553e919c71 100644 --- a/tests/pipelines/musicldm/test_musicldm.py +++ b/tests/pipelines/musicldm/test_musicldm.py @@ -39,7 +39,13 @@ UNet2DConditionModel, ) from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + torch_device, +) from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -408,17 +414,17 @@ def test_to_dtype(self): @nightly -@require_torch_gpu +@require_torch_accelerator class MusicLDMPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) diff --git a/tests/pipelines/shap_e/test_shap_e.py b/tests/pipelines/shap_e/test_shap_e.py index 6cf643fe47a6..638de7e8cc75 100644 --- a/tests/pipelines/shap_e/test_shap_e.py +++ b/tests/pipelines/shap_e/test_shap_e.py @@ -21,7 +21,13 @@ from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer -from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -222,19 +228,19 @@ def test_sequential_cpu_offload_forward_pass(self): @nightly -@require_torch_gpu +@require_torch_accelerator class ShapEPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_shap_e(self): expected_image = load_numpy( diff --git a/tests/pipelines/shap_e/test_shap_e_img2img.py b/tests/pipelines/shap_e/test_shap_e_img2img.py index 72eee3e35eb1..ed0a4d47b626 100644 --- a/tests/pipelines/shap_e/test_shap_e_img2img.py +++ b/tests/pipelines/shap_e/test_shap_e_img2img.py @@ -23,11 +23,12 @@ from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import ( + backend_empty_cache, floats_tensor, load_image, load_numpy, nightly, - require_torch_gpu, + require_torch_accelerator, torch_device, ) @@ -250,19 +251,19 @@ def test_sequential_cpu_offload_forward_pass(self): @nightly -@require_torch_gpu +@require_torch_accelerator class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_shap_e_img2img(self): input_image = load_image(