From ba3b2724e4ad13375293653978f79fc1c34d1945 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 2 Nov 2023 11:47:56 +0000 Subject: [PATCH 1/2] fix more --- tests/pipelines/test_pipelines_common.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 353add3b4d37..3d1333424f51 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -742,14 +742,8 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") - self.assertTrue( - all( - v.device == "cpu" - for k, v in pipe.components.values() - if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload - ), - "CPU offloading should leave all pipeline components on the CPU after inference", - ) + offloaded_modules = [v for k,v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload] + self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}" @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), From 06ee4edc6cb88b3b0a99d96bbf7eaf35ef3e65be Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 2 Nov 2023 11:48:54 +0000 Subject: [PATCH 2/2] fix more --- tests/pipelines/test_pipelines_common.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 3d1333424f51..3ad1c4f50f1d 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -742,8 +742,14 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") - offloaded_modules = [v for k,v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload] - self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}" + offloaded_modules = [ + v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + ] + self.assertTrue( + all(v.device.type == "cpu" for v in offloaded_modules) + ), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}" @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(),