mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 23:14:49 +08:00
Add a /free route to unload models or free all memory.
A POST request to /free with: {"unload_models":true} will unload models from vram. A POST request to /free with: {"free_memory":true} will unload models and free all cached data from the last run workflow.
This commit is contained in:
15
main.py
15
main.py
@@ -97,7 +97,7 @@ def prompt_worker(q, server):
|
||||
gc_collect_interval = 10.0
|
||||
|
||||
while True:
|
||||
timeout = None
|
||||
timeout = 1000.0
|
||||
if need_gc:
|
||||
timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0)
|
||||
|
||||
@@ -118,6 +118,19 @@ def prompt_worker(q, server):
|
||||
execution_time = current_time - execution_start_time
|
||||
print("Prompt executed in {:.2f} seconds".format(execution_time))
|
||||
|
||||
flags = q.get_flags()
|
||||
free_memory = flags.get("free_memory", False)
|
||||
|
||||
if flags.get("unload_models", free_memory):
|
||||
comfy.model_management.unload_all_models()
|
||||
need_gc = True
|
||||
last_gc_collect = 0
|
||||
|
||||
if free_memory:
|
||||
e.reset()
|
||||
need_gc = True
|
||||
last_gc_collect = 0
|
||||
|
||||
if need_gc:
|
||||
current_time = time.perf_counter()
|
||||
if (current_time - last_gc_collect) > gc_collect_interval:
|
||||
|
Reference in New Issue
Block a user