From 1177d376dfa586b3c3b8305c68b9bfbd0824ff6a Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Wed, 13 Dec 2023 13:24:28 +0000 Subject: [PATCH] update --- tests/lora/test_lora_layers_old_backend.py | 19 +++++++++++++++++-- tests/lora/test_lora_layers_peft.py | 4 ++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index 19505a1d906d..728d75dd4c2b 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -343,6 +343,21 @@ def test_stable_diffusion_attn_processors(self): image = sd_pipe(**inputs).images assert image.shape == (1, 64, 64, 3) + @unittest.skipIf(not torch.cuda.is_available() or not is_xformers_available(), reason="xformers requires cuda") + def test_stable_diffusion_set_xformers_attn_processors(self): + # disable_full_determinism() + device = "cuda" # ensure determinism for the device-dependent torch.Generator + components, _ = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs() + + # run normal sd pipe + image = sd_pipe(**inputs).images + assert image.shape == (1, 64, 64, 3) + # run lora xformers attention attn_processors, _ = create_unet_lora_layers(sd_pipe.unet) attn_processors = { @@ -607,7 +622,7 @@ def test_unload_lora_sd(self): orig_image_slice, orig_image_slice_two, atol=1e-3 ), "Unloading LoRA parameters should lead to results similar to what was obtained with the pipeline without any LoRA parameters." - @unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU") + @unittest.skipIf(torch_device != "cuda" or not is_xformers_available(), "This test is supposed to run on GPU") def test_lora_unet_attn_processors_with_xformers(self): with tempfile.TemporaryDirectory() as tmpdirname: self.create_lora_weight_file(tmpdirname) @@ -644,7 +659,7 @@ def test_lora_unet_attn_processors_with_xformers(self): if isinstance(module, Attention): self.assertIsInstance(module.processor, XFormersAttnProcessor) - @unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU") + @unittest.skipIf(torch_device != "cuda" or not is_xformers_available(), "This test is supposed to run on GPU") def test_lora_save_load_with_xformers(self): pipeline_components, lora_components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**pipeline_components) diff --git a/tests/lora/test_lora_layers_peft.py b/tests/lora/test_lora_layers_peft.py index df496a76563a..a0eb9d23bdc8 100644 --- a/tests/lora/test_lora_layers_peft.py +++ b/tests/lora/test_lora_layers_peft.py @@ -1864,14 +1864,14 @@ def test_sdxl_1_0_lora_unfusion(self): pipe.enable_model_cpu_offload() images = pipe( - "masterpiece, best quality, mountain", output_type="pil", generator=generator, num_inference_steps=3 + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 ).images images_with_fusion = images.flatten() pipe.unfuse_lora() generator = torch.Generator("cpu").manual_seed(0) images = pipe( - "masterpiece, best quality, mountain", output_type="pil", generator=generator, num_inference_steps=3 + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 ).images images_without_fusion = images.flatten()