Skip to content

Commit

Permalink
--disable-smart-memory now unloads everything like it did originally.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Dec 23, 2023
1 parent 36a7953 commit a252963
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 0 deletions.
4 changes: 4 additions & 0 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,6 +754,10 @@ def soft_empty_cache(force=False):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()

def unload_all_models():
free_memory(1e30, get_torch_device())


def resolve_lowvram_weight(weight, model, key): #TODO: remove
return weight

Expand Down
2 changes: 2 additions & 0 deletions execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
for x in executed:
self.old_prompt[x] = copy.deepcopy(prompt[x])
self.server.last_node_id = None
if comfy.model_management.DISABLE_SMART_MEMORY:
comfy.model_management.unload_all_models()



Expand Down

0 comments on commit a252963

Please sign in to comment.