Skip to content

Commit

Permalink
Revert "Disable cuda malloc by default."
Browse files Browse the repository at this point in the history
This reverts commit 50bf66e.
  • Loading branch information
comfyanonymous committed Aug 14, 2024
1 parent 33fb282 commit f1d6cef
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 8 deletions.
4 changes: 2 additions & 2 deletions comfy/cli_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def __call__(self, parser, namespace, values, option_string=None):
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
cm_group = parser.add_mutually_exclusive_group()
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync.")
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync (The current default).")
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")


fp_group = parser.add_mutually_exclusive_group()
Expand Down
8 changes: 2 additions & 6 deletions cuda_malloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import importlib.util
from comfy.cli_args import args
import subprocess
import logging

#Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import.
def get_gpu_names():
Expand Down Expand Up @@ -64,7 +63,7 @@ def cuda_malloc_supported():
return True


if args.cuda_malloc:
if not args.cuda_malloc:
try:
version = ""
torch_spec = importlib.util.find_spec("torch")
Expand All @@ -75,11 +74,8 @@ def cuda_malloc_supported():
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
version = module.__version__
supported = False
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
supported = cuda_malloc_supported()
if not supported:
logging.warning("WARNING: cuda malloc enabled but not supported.")
args.cuda_malloc = cuda_malloc_supported()
except:
pass

Expand Down

0 comments on commit f1d6cef

Please sign in to comment.