Skip to content

Commit

Permalink
Make minimum_inference_memory() depend on --reserve-vram
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Sep 1, 2024
1 parent baa6b4d commit b643eae
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,12 +370,9 @@ def offloaded_memory(loaded_models, device):
offloaded_mem += m.model_offloaded_memory()
return offloaded_mem

def minimum_inference_memory():
return (1024 * 1024 * 1024) * 1.2

EXTRA_RESERVED_VRAM = 200 * 1024 * 1024
EXTRA_RESERVED_VRAM = 400 * 1024 * 1024
if any(platform.win32_ver()):
EXTRA_RESERVED_VRAM = 500 * 1024 * 1024 #Windows is higher because of the shared vram issue
EXTRA_RESERVED_VRAM = 600 * 1024 * 1024 #Windows is higher because of the shared vram issue

if args.reserve_vram is not None:
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024
Expand All @@ -384,6 +381,9 @@ def minimum_inference_memory():
def extra_reserved_memory():
return EXTRA_RESERVED_VRAM

def minimum_inference_memory():
return (1024 * 1024 * 1024) * 0.8 + extra_reserved_memory()

def unload_model_clones(model, unload_weights_only=True, force_unload=True):
to_unload = []
for i in range(len(current_loaded_models)):
Expand Down

0 comments on commit b643eae

Please sign in to comment.