1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 23:14:49 +08:00

Add a --reserve-vram argument if you don't want comfy to use all of it.

--reserve-vram 1.0 for example will make ComfyUI try to keep 1GB vram free.

This can also be useful if workflows are failing because of OOM errors but
in that case please report it if --reserve-vram improves your situation.
This commit is contained in:
comfyanonymous
2024-08-19 17:16:18 -04:00
parent 4d341b78e8
commit 045377ea89
2 changed files with 16 additions and 2 deletions

View File

@@ -367,6 +367,17 @@ def offloaded_memory(loaded_models, device):
def minimum_inference_memory():
return (1024 * 1024 * 1024) * 1.2
EXTRA_RESERVED_VRAM = 200 * 1024 * 1024
if any(platform.win32_ver()):
EXTRA_RESERVED_VRAM = 400 * 1024 * 1024 #Windows is higher because of the shared vram issue
if args.reserve_vram is not None:
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024
logging.debug("Reserving {}MB vram for other applications.".format(EXTRA_RESERVED_VRAM / (1024 * 1024)))
def extra_reserved_memory():
return EXTRA_RESERVED_VRAM
def unload_model_clones(model, unload_weights_only=True, force_unload=True):
to_unload = []
for i in range(len(current_loaded_models)):
@@ -436,11 +447,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
global vram_state
inference_memory = minimum_inference_memory()
extra_mem = max(inference_memory, memory_required + 300 * 1024 * 1024)
extra_mem = max(inference_memory, memory_required + extra_reserved_memory())
if minimum_memory_required is None:
minimum_memory_required = extra_mem
else:
minimum_memory_required = max(inference_memory, minimum_memory_required + 300 * 1024 * 1024)
minimum_memory_required = max(inference_memory, minimum_memory_required + extra_reserved_memory())
models = set(models)