From b643eae08b7f0c8eb69b77bd61e31009bfb325b9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 1 Sep 2024 01:01:54 -0400 Subject: [PATCH] Make minimum_inference_memory() depend on --reserve-vram --- comfy/model_management.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 59dce4dac52..f6a76a5724f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -370,12 +370,9 @@ def offloaded_memory(loaded_models, device): offloaded_mem += m.model_offloaded_memory() return offloaded_mem -def minimum_inference_memory(): - return (1024 * 1024 * 1024) * 1.2 - -EXTRA_RESERVED_VRAM = 200 * 1024 * 1024 +EXTRA_RESERVED_VRAM = 400 * 1024 * 1024 if any(platform.win32_ver()): - EXTRA_RESERVED_VRAM = 500 * 1024 * 1024 #Windows is higher because of the shared vram issue + EXTRA_RESERVED_VRAM = 600 * 1024 * 1024 #Windows is higher because of the shared vram issue if args.reserve_vram is not None: EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024 @@ -384,6 +381,9 @@ def minimum_inference_memory(): def extra_reserved_memory(): return EXTRA_RESERVED_VRAM +def minimum_inference_memory(): + return (1024 * 1024 * 1024) * 0.8 + extra_reserved_memory() + def unload_model_clones(model, unload_weights_only=True, force_unload=True): to_unload = [] for i in range(len(current_loaded_models)):