mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 23:14:49 +08:00
Auto disable cuda malloc on some GPUs on windows.
This commit is contained in:
25
main.py
25
main.py
@@ -61,30 +61,7 @@ if __name__ == "__main__":
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
|
||||
print("Set cuda device to:", args.cuda_device)
|
||||
|
||||
if not args.cuda_malloc:
|
||||
try: #if there's a better way to check the torch version without importing it let me know
|
||||
version = ""
|
||||
torch_spec = importlib.util.find_spec("torch")
|
||||
for folder in torch_spec.submodule_search_locations:
|
||||
ver_file = os.path.join(folder, "version.py")
|
||||
if os.path.isfile(ver_file):
|
||||
spec = importlib.util.spec_from_file_location("torch_version_import", ver_file)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
version = module.__version__
|
||||
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
||||
args.cuda_malloc = True
|
||||
except:
|
||||
pass
|
||||
|
||||
if args.cuda_malloc and not args.disable_cuda_malloc:
|
||||
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
|
||||
if env_var is None:
|
||||
env_var = "backend:cudaMallocAsync"
|
||||
else:
|
||||
env_var += ",backend:cudaMallocAsync"
|
||||
|
||||
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var
|
||||
import cuda_malloc
|
||||
|
||||
import comfy.utils
|
||||
import yaml
|
||||
|
Reference in New Issue
Block a user