Skip to content

Commit

Permalink
Make --always-offload-from-vram actually work properly, fixes lllyasv…
Browse files Browse the repository at this point in the history
  • Loading branch information
infinity0 committed Jul 10, 2024
1 parent 5a71495 commit a388003
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions ldm_patched/modules/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,12 +369,12 @@ def free_memory(memory_required, device, keep_loaded=[]):
unloaded_model = True

if unloaded_model:
soft_empty_cache()
soft_empty_cache(force=ALWAYS_VRAM_OFFLOAD)
else:
if vram_state != VRAMState.HIGH_VRAM:
mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True)
if mem_free_torch > mem_free_total * 0.25:
soft_empty_cache()
soft_empty_cache(force=ALWAYS_VRAM_OFFLOAD)

def load_models_gpu(models, memory_required=0):
global vram_state
Expand Down

0 comments on commit a388003

Please sign in to comment.