mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 23:14:49 +08:00
Add ruff rule for empty line with trailing whitespace.
This commit is contained in:
@@ -83,7 +83,7 @@ def set_model_options_pre_cfg_function(model_options, pre_cfg_function, disable_
|
||||
|
||||
def create_model_options_clone(orig_model_options: dict):
|
||||
return comfy.patcher_extension.copy_nested_dicts(orig_model_options)
|
||||
|
||||
|
||||
def create_hook_patches_clone(orig_hook_patches):
|
||||
new_hook_patches = {}
|
||||
for hook_ref in orig_hook_patches:
|
||||
@@ -141,7 +141,7 @@ class AutoPatcherEjector:
|
||||
self.was_injected = False
|
||||
self.prev_skip_injection = False
|
||||
self.skip_and_inject_on_exit_only = skip_and_inject_on_exit_only
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
self.was_injected = False
|
||||
self.prev_skip_injection = self.model.skip_injection
|
||||
@@ -164,7 +164,7 @@ class MemoryCounter:
|
||||
self.value = initial
|
||||
self.minimum = minimum
|
||||
# TODO: add a safe limit besides 0
|
||||
|
||||
|
||||
def use(self, weight: torch.Tensor):
|
||||
weight_size = weight.nelement() * weight.element_size()
|
||||
if self.is_useable(weight_size):
|
||||
@@ -711,7 +711,7 @@ class ModelPatcher:
|
||||
else:
|
||||
comfy.utils.set_attr_param(self.model, key, bk.weight)
|
||||
self.backup.pop(key)
|
||||
|
||||
|
||||
weight_key = "{}.weight".format(n)
|
||||
bias_key = "{}.bias".format(n)
|
||||
if move_weight:
|
||||
@@ -789,7 +789,7 @@ class ModelPatcher:
|
||||
def add_callback_with_key(self, call_type: str, key: str, callback: Callable):
|
||||
c = self.callbacks.setdefault(call_type, {}).setdefault(key, [])
|
||||
c.append(callback)
|
||||
|
||||
|
||||
def remove_callbacks_with_key(self, call_type: str, key: str):
|
||||
c = self.callbacks.get(call_type, {})
|
||||
if key in c:
|
||||
@@ -797,7 +797,7 @@ class ModelPatcher:
|
||||
|
||||
def get_callbacks(self, call_type: str, key: str):
|
||||
return self.callbacks.get(call_type, {}).get(key, [])
|
||||
|
||||
|
||||
def get_all_callbacks(self, call_type: str):
|
||||
c_list = []
|
||||
for c in self.callbacks.get(call_type, {}).values():
|
||||
@@ -810,7 +810,7 @@ class ModelPatcher:
|
||||
def add_wrapper_with_key(self, wrapper_type: str, key: str, wrapper: Callable):
|
||||
w = self.wrappers.setdefault(wrapper_type, {}).setdefault(key, [])
|
||||
w.append(wrapper)
|
||||
|
||||
|
||||
def remove_wrappers_with_key(self, wrapper_type: str, key: str):
|
||||
w = self.wrappers.get(wrapper_type, {})
|
||||
if key in w:
|
||||
@@ -831,7 +831,7 @@ class ModelPatcher:
|
||||
def remove_attachments(self, key: str):
|
||||
if key in self.attachments:
|
||||
self.attachments.pop(key)
|
||||
|
||||
|
||||
def get_attachment(self, key: str):
|
||||
return self.attachments.get(key, None)
|
||||
|
||||
@@ -851,7 +851,7 @@ class ModelPatcher:
|
||||
|
||||
def get_additional_models_with_key(self, key: str):
|
||||
return self.additional_models.get(key, [])
|
||||
|
||||
|
||||
def get_additional_models(self):
|
||||
all_models = []
|
||||
for models in self.additional_models.values():
|
||||
@@ -906,7 +906,7 @@ class ModelPatcher:
|
||||
self.model.current_patcher = self
|
||||
for callback in self.get_all_callbacks(CallbacksMP.ON_PRE_RUN):
|
||||
callback(self)
|
||||
|
||||
|
||||
def prepare_state(self, timestep):
|
||||
for callback in self.get_all_callbacks(CallbacksMP.ON_PREPARE_STATE):
|
||||
callback(self, timestep)
|
||||
@@ -918,7 +918,7 @@ class ModelPatcher:
|
||||
|
||||
def set_hook_mode(self, hook_mode: comfy.hooks.EnumHookMode):
|
||||
self.hook_mode = hook_mode
|
||||
|
||||
|
||||
def prepare_hook_patches_current_keyframe(self, t: torch.Tensor, hook_group: comfy.hooks.HookGroup):
|
||||
curr_t = t[0]
|
||||
reset_current_hooks = False
|
||||
@@ -975,7 +975,7 @@ class ModelPatcher:
|
||||
key = k[0]
|
||||
if len(k) > 2:
|
||||
function = k[2]
|
||||
|
||||
|
||||
if key in model_sd:
|
||||
p.add(k)
|
||||
current_patches: list[tuple] = current_hook_patches.get(key, [])
|
||||
@@ -1063,7 +1063,7 @@ class ModelPatcher:
|
||||
def patch_hook_weight_to_device(self, hooks: comfy.hooks.HookGroup, combined_patches: dict, key: str, original_weights: dict, memory_counter: MemoryCounter):
|
||||
if key not in combined_patches:
|
||||
return
|
||||
|
||||
|
||||
weight, set_func, convert_func = get_key_weight(self.model, key)
|
||||
weight: torch.Tensor
|
||||
if key not in self.hook_backup:
|
||||
@@ -1098,7 +1098,7 @@ class ModelPatcher:
|
||||
del temp_weight
|
||||
del out_weight
|
||||
del weight
|
||||
|
||||
|
||||
def unpatch_hooks(self) -> None:
|
||||
with self.use_ejected():
|
||||
if len(self.hook_backup) == 0:
|
||||
@@ -1107,7 +1107,7 @@ class ModelPatcher:
|
||||
keys = list(self.hook_backup.keys())
|
||||
for k in keys:
|
||||
comfy.utils.copy_to_param(self.model, k, self.hook_backup[k][0].to(device=self.hook_backup[k][1]))
|
||||
|
||||
|
||||
self.hook_backup.clear()
|
||||
self.current_hooks = None
|
||||
|
||||
|
Reference in New Issue
Block a user