mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 15:04:50 +08:00
Disable cuda malloc by default.
This commit is contained in:
@@ -2,6 +2,7 @@ import os
|
||||
import importlib.util
|
||||
from comfy.cli_args import args
|
||||
import subprocess
|
||||
import logging
|
||||
|
||||
#Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import.
|
||||
def get_gpu_names():
|
||||
@@ -63,7 +64,7 @@ def cuda_malloc_supported():
|
||||
return True
|
||||
|
||||
|
||||
if not args.cuda_malloc:
|
||||
if args.cuda_malloc:
|
||||
try:
|
||||
version = ""
|
||||
torch_spec = importlib.util.find_spec("torch")
|
||||
@@ -74,8 +75,11 @@ if not args.cuda_malloc:
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
version = module.__version__
|
||||
supported = False
|
||||
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
||||
args.cuda_malloc = cuda_malloc_supported()
|
||||
supported = cuda_malloc_supported()
|
||||
if not supported:
|
||||
logging.warning("WARNING: cuda malloc enabled but not supported.")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
Reference in New Issue
Block a user