mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 15:04:50 +08:00
Add a /free route to unload models or free all memory.
A POST request to /free with: {"unload_models":true} will unload models from vram. A POST request to /free with: {"free_memory":true} will unload models and free all cached data from the last run workflow.
This commit is contained in:
20
execution.py
20
execution.py
@@ -268,11 +268,14 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item
|
||||
|
||||
class PromptExecutor:
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.outputs = {}
|
||||
self.object_storage = {}
|
||||
self.outputs_ui = {}
|
||||
self.old_prompt = {}
|
||||
self.server = server
|
||||
|
||||
def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex):
|
||||
node_id = error["node_id"]
|
||||
@@ -706,6 +709,7 @@ class PromptQueue:
|
||||
self.queue = []
|
||||
self.currently_running = {}
|
||||
self.history = {}
|
||||
self.flags = {}
|
||||
server.prompt_queue = self
|
||||
|
||||
def put(self, item):
|
||||
@@ -792,3 +796,17 @@ class PromptQueue:
|
||||
def delete_history_item(self, id_to_delete):
|
||||
with self.mutex:
|
||||
self.history.pop(id_to_delete, None)
|
||||
|
||||
def set_flag(self, name, data):
|
||||
with self.mutex:
|
||||
self.flags[name] = data
|
||||
self.not_empty.notify()
|
||||
|
||||
def get_flags(self, reset=True):
|
||||
with self.mutex:
|
||||
if reset:
|
||||
ret = self.flags
|
||||
self.flags = {}
|
||||
return ret
|
||||
else:
|
||||
return self.flags.copy()
|
||||
|
Reference in New Issue
Block a user