1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 23:14:49 +08:00

Compare commits

...

31 Commits

Author SHA1 Message Date
Jedrzej Kosinski
e5cac06bbe Merge branch 'master' into v3-definition 2025-07-23 16:32:22 -07:00
Jedrzej Kosinski
f672515ba6 Merge pull request #9030 from comfyanonymous/v3-definition-wip
V3 update - Add 'enable_expand' toggle to Schema
2025-07-23 16:31:00 -07:00
comfyanonymous
d3504e1778 Enable pytorch attention by default for gfx1201 on torch 2.8 (#9029) 2025-07-23 19:21:29 -04:00
Jedrzej Kosinski
2e6ed6a10f Added enable_expand toggle on Schema and corresponding enforcement in EXECUTE_NORMALIZED* functions 2025-07-23 16:18:03 -07:00
Jedrzej Kosinski
32c46c044c Merge pull request #9028 from comfyanonymous/v3-definition-wip
V3 refactor+cleanup - Drop 'V3' from names of classes intended to be commonly used, add '_' to some classes
2025-07-23 15:48:06 -07:00
Jedrzej Kosinski
ddb84a3991 Renamed IO_V3 to _IO_V3 2025-07-23 15:37:43 -07:00
comfyanonymous
a86a58c308 Fix xpu function not implemented p2. (#9027) 2025-07-23 18:18:20 -04:00
comfyanonymous
39dda1d40d Fix xpu function not implemented. (#9026) 2025-07-23 18:10:59 -04:00
Jedrzej Kosinski
6adaf6c776 Renamed ComfyType to _ComfyType 2025-07-23 15:09:22 -07:00
Jedrzej Kosinski
d984cee318 Renamed ComfyNodeV3 to ComfyNode, renamed ComfyNodeInternal to _ComfyNodeInternal 2025-07-23 15:05:58 -07:00
Jedrzej Kosinski
b0f73174b2 Renamed SchemaV3 to Schema 2025-07-23 14:55:53 -07:00
Jedrzej Kosinski
a9f5554342 Remove unnecessary **kwargs in io.py 2025-07-23 14:46:56 -07:00
Jedrzej Kosinski
c6dcf7afd9 Merge pull request #9025 from comfyanonymous/v3-definition-wip
V3 update - remove NumberDisplay.color as it does not exist in the frontend at all currently
2025-07-23 14:43:33 -07:00
Jedrzej Kosinski
b561dfe8b2 Removed NumberDisplay.color, as it does not exist in the frontend 2025-07-23 14:38:33 -07:00
Jedrzej Kosinski
ce1d30e9c3 Merge pull request #9019 from bigcat88/v3/nodes/extras-8-files
[V3] next 8 converted files
2025-07-23 14:26:30 -07:00
Jedrzej Kosinski
e374ee1f1c Merge pull request #9016 from bigcat88/v3/preview-refactor
[V3] Audio-Image Preview refactor
2025-07-23 14:08:23 -07:00
comfyanonymous
5ad33787de Add default device argument. (#9023) 2025-07-23 14:20:49 -04:00
bigcat88
9208b4a7c1 converted to V3 schema 2025-07-23 16:59:05 +03:00
bigcat88
bed60d6ed9 refactored Preview/Save of audios 2025-07-23 10:16:15 +03:00
bigcat88
333d942f30 refactored Preview/Save of images 2025-07-23 06:54:15 +03:00
Simon Lui
255f139863 Add xpu version for async offload and some other things. (#9004) 2025-07-22 15:20:09 -04:00
comfyanonymous
5ac9ec214b Try to fix line endings workflow. (#9001) 2025-07-22 04:07:51 -04:00
comfyanonymous
0aa1c58b04 This is not needed. (#8991) 2025-07-21 16:48:25 -04:00
comfyanonymous
5249e45a1c Add hidream e1.1 example to readme. (#8990) 2025-07-21 15:23:41 -04:00
comfyanonymous
54a45b9967 Replace torchaudio.load with pyav. (#8989) 2025-07-21 14:19:14 -04:00
comfyanonymous
9a470e073e ComfyUI version 0.3.45 2025-07-21 14:05:43 -04:00
ComfyUI Wiki
7d627f764c Update template to 0.1.39 (#8981) 2025-07-20 15:58:35 -04:00
comfyanonymous
a0c0785635 Document what the fast_fp16_accumulation is in the portable. (#8973) 2025-07-20 01:24:09 -04:00
chaObserv
100c2478ea Add SamplingPercentToSigma node (#8963)
It's helpful to adjust start_percent or end_percent based on the corresponding sigma.
2025-07-19 23:09:11 -04:00
ComfyUI Wiki
1da5639e86 Update template to 0.1.37 (#8967) 2025-07-19 06:08:00 -04:00
comfyanonymous
1b96fae1d4 Add nested style of dual cfg to DualCFGGuider node. (#8965) 2025-07-19 04:55:23 -04:00
64 changed files with 1850 additions and 626 deletions

View File

@@ -4,6 +4,9 @@ if you have a NVIDIA gpu:
run_nvidia_gpu.bat
if you want to enable the fast fp16 accumulation (faster for fp16 models with slightly less quality):
run_nvidia_gpu_fast_fp16_accumulation.bat
To run it in slow CPU mode:

View File

@@ -17,6 +17,7 @@ jobs:
- name: Check for Windows line endings (CRLF)
run: |
# Get the list of changed files in the PR
git merge origin/${{ github.base_ref }} --no-edit
CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}..HEAD)
# Flag to track if CRLF is found

View File

@@ -69,6 +69,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith
- Image Editing Models
- [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/)
- [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model)
- [HiDream E1.1](https://comfyanonymous.github.io/ComfyUI_examples/hidream/#hidream-e11)
- Video Models
- [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/)
- [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/)

View File

@@ -49,7 +49,8 @@ parser.add_argument("--temp-directory", type=str, default=None, help="Set the Co
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory. Overrides --base-directory.")
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use. All other devices will not be visible.")
parser.add_argument("--default-device", type=int, default=None, metavar="DEFAULT_DEVICE_ID", help="Set the id of the default device, all other devices will stay visible.")
cm_group = parser.add_mutually_exclusive_group()
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")

View File

@@ -101,7 +101,7 @@ if args.directml is not None:
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
try:
import intel_extension_for_pytorch as ipex
import intel_extension_for_pytorch as ipex # noqa: F401
_ = torch.xpu.device_count()
xpu_available = xpu_available or torch.xpu.is_available()
except:
@@ -186,8 +186,9 @@ def get_total_memory(dev=None, torch_total_too=False):
elif is_intel_xpu():
stats = torch.xpu.memory_stats(dev)
mem_reserved = stats['reserved_bytes.all.current']
mem_total_xpu = torch.xpu.get_device_properties(dev).total_memory
mem_total_torch = mem_reserved
mem_total = torch.xpu.get_device_properties(dev).total_memory
mem_total = mem_total_xpu
elif is_ascend_npu():
stats = torch.npu.memory_stats(dev)
mem_reserved = stats['reserved_bytes.all.current']
@@ -307,7 +308,10 @@ try:
logging.info("ROCm version: {}".format(rocm_version))
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx1201 and gfx950
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950
ENABLE_PYTORCH_ATTENTION = True
if torch_version_numeric >= (2, 8):
if any((a in arch) for a in ["gfx1201"]):
ENABLE_PYTORCH_ATTENTION = True
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
if any((a in arch) for a in ["gfx1201", "gfx942", "gfx950"]): # TODO: more arches
@@ -876,6 +880,7 @@ def vae_dtype(device=None, allowed_dtypes=[]):
return d
# NOTE: bfloat16 seems to work on AMD for the VAE but is extremely slow in some cases compared to fp32
# slowness still a problem on pytorch nightly 2.9.0.dev20250720+rocm6.4 tested on RDNA3
if d == torch.bfloat16 and (not is_amd()) and should_use_bf16(device):
return d
@@ -929,7 +934,7 @@ def device_supports_non_blocking(device):
if is_device_mps(device):
return False #pytorch bug? mps doesn't support non blocking
if is_intel_xpu():
return False
return True
if args.deterministic: #TODO: figure out why deterministic breaks non blocking from gpu to cpu (previews)
return False
if directml_enabled:
@@ -968,6 +973,8 @@ def get_offload_stream(device):
stream_counter = (stream_counter + 1) % len(ss)
if is_device_cuda(device):
ss[stream_counter].wait_stream(torch.cuda.current_stream())
elif is_device_xpu(device):
ss[stream_counter].wait_stream(torch.xpu.current_stream())
stream_counters[device] = stream_counter
return s
elif is_device_cuda(device):
@@ -979,6 +986,15 @@ def get_offload_stream(device):
stream_counter = (stream_counter + 1) % len(ss)
stream_counters[device] = stream_counter
return s
elif is_device_xpu(device):
ss = []
for k in range(NUM_STREAMS):
ss.append(torch.xpu.Stream(device=device, priority=0))
STREAMS[device] = ss
s = ss[stream_counter]
stream_counter = (stream_counter + 1) % len(ss)
stream_counters[device] = stream_counter
return s
return None
def sync_stream(device, stream):
@@ -986,6 +1002,8 @@ def sync_stream(device, stream):
return
if is_device_cuda(device):
torch.cuda.current_stream().wait_stream(stream)
elif is_device_xpu(device):
torch.xpu.current_stream().wait_stream(stream)
def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, stream=None):
if device is None or weight.device == device:
@@ -1092,8 +1110,8 @@ def get_free_memory(dev=None, torch_free_too=False):
stats = torch.xpu.memory_stats(dev)
mem_active = stats['active_bytes.all.current']
mem_reserved = stats['reserved_bytes.all.current']
mem_free_torch = mem_reserved - mem_active
mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved
mem_free_torch = mem_reserved - mem_active
mem_free_total = mem_free_xpu + mem_free_torch
elif is_ascend_npu():
stats = torch.npu.memory_stats(dev)
@@ -1142,6 +1160,9 @@ def is_device_cpu(device):
def is_device_mps(device):
return is_device_type(device, 'mps')
def is_device_xpu(device):
return is_device_type(device, 'xpu')
def is_device_cuda(device):
return is_device_type(device, 'cuda')
@@ -1173,7 +1194,10 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
return False
if is_intel_xpu():
return True
if torch_version_numeric < (2, 3):
return True
else:
return torch.xpu.get_device_properties(device).has_fp16
if is_ascend_npu():
return True
@@ -1236,7 +1260,10 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
return False
if is_intel_xpu():
return True
if torch_version_numeric < (2, 6):
return True
else:
return torch.xpu.get_device_capability(device)['has_bfloat16_conversions']
if is_ascend_npu():
return True

View File

@@ -17,7 +17,7 @@ def first_real_override(cls: type, name: str, *, base: type=None) -> Optional[Ca
if base_attr is None:
return None
base_func = base_attr.__func__
for c in cls.mro(): # NodeB, NodeA, ComfyNodeV3, object …
for c in cls.mro(): # NodeB, NodeA, ComfyNode, object …
if c is base: # reached the placeholder we're done
break
if name in c.__dict__: # first class that *defines* the attr
@@ -27,7 +27,7 @@ def first_real_override(cls: type, name: str, *, base: type=None) -> Optional[Ca
return None
class ComfyNodeInternal:
class _ComfyNodeInternal:
"""Class that all V3-based APIs inherit from for ComfyNode.
This is intended to only be referenced within execution.py, as it has to handle all V3 APIs going forward."""

View File

@@ -22,7 +22,7 @@ from comfy.samplers import CFGGuider, Sampler
from comfy.sd import CLIP, VAE
from comfy.sd import StyleModel as StyleModel_
from comfy_api.input import VideoInput
from comfy_api.internal import (ComfyNodeInternal, classproperty, copy_class, first_real_override, is_class,
from comfy_api.internal import (_ComfyNodeInternal, classproperty, copy_class, first_real_override, is_class,
prune_dict, shallow_clone_class)
from comfy_api.v3.resources import Resources, ResourcesLocal
from comfy_execution.graph import ExecutionBlocker
@@ -73,7 +73,6 @@ class RemoteOptions:
class NumberDisplay(str, Enum):
number = "number"
slider = "slider"
color = "color"
class _StringIOType(str):
@@ -86,7 +85,7 @@ class _StringIOType(str):
b = frozenset(value.split(","))
return not (b.issubset(a) or a.issubset(b))
class ComfyType(ABC):
class _ComfyType(ABC):
Type = Any
io_type: str = None
@@ -102,7 +101,7 @@ def comfytype(io_type: str, **kwargs):
- class Output(OutputV3): ...
'''
def decorator(cls: T) -> T:
if isinstance(cls, ComfyType) or issubclass(cls, ComfyType):
if isinstance(cls, _ComfyType) or issubclass(cls, _ComfyType):
# clone Input and Output classes to avoid modifying the original class
new_cls = cls
if hasattr(new_cls, "Input"):
@@ -141,11 +140,11 @@ def Custom(io_type: str) -> type[ComfyTypeIO]:
...
return CustomComfyType
class IO_V3:
class _IO_V3:
'''
Base class for V3 Inputs and Outputs.
'''
Parent: ComfyType = None
Parent: _ComfyType = None
def __init__(self):
pass
@@ -158,7 +157,7 @@ class IO_V3:
def Type(self):
return self.Parent.Type
class InputV3(IO_V3):
class InputV3(_IO_V3):
'''
Base class for a V3 Input.
'''
@@ -207,7 +206,7 @@ class WidgetInputV3(InputV3):
return self.widget_type if self.widget_type is not None else super().get_io_type()
class OutputV3(IO_V3):
class OutputV3(_IO_V3):
def __init__(self, id: str=None, display_name: str=None, tooltip: str=None,
is_output_list=False):
self.id = id
@@ -226,7 +225,7 @@ class OutputV3(IO_V3):
return self.io_type
class ComfyTypeI(ComfyType):
class ComfyTypeI(_ComfyType):
'''ComfyType subclass that only has a default Input class - intended for types that only have Inputs.'''
class Input(InputV3):
...
@@ -662,6 +661,12 @@ class Accumulation(ComfyTypeIO):
class Load3DCamera(ComfyTypeIO):
Type = Any # TODO: figure out type for this; in code, only described as image['camera_info'], gotten from a LOAD_3D or LOAD_3D_ANIMATION type
@comfytype(io_type="PHOTOMAKER")
class Photomaker(ComfyTypeIO):
Type = Any
@comfytype(io_type="POINT")
class Point(ComfyTypeIO):
Type = Any # NOTE: I couldn't find any references in core code to POINT io_type. Does this exist?
@@ -689,7 +694,7 @@ class MultiType:
'''
Input that permits more than one input type; if `id` is an instance of `ComfyType.Input`, then that input will be used to create a widget (if applicable) with overridden values.
'''
def __init__(self, id: str | InputV3, types: list[type[ComfyType] | ComfyType], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None):
def __init__(self, id: str | InputV3, types: list[type[_ComfyType] | _ComfyType], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None):
# if id is an Input, then use that Input with overridden values
self.input_override = None
if isinstance(id, InputV3):
@@ -802,9 +807,9 @@ class ComboDynamicInput(DynamicInput):
@comfytype(io_type="COMFY_MATCHTYPE_V3")
class MatchType(ComfyTypeIO):
class Template:
def __init__(self, template_id: str, allowed_types: ComfyType | list[ComfyType]):
def __init__(self, template_id: str, allowed_types: _ComfyType | list[_ComfyType]):
self.template_id = template_id
self.allowed_types = [allowed_types] if isinstance(allowed_types, ComfyType) else allowed_types
self.allowed_types = [allowed_types] if isinstance(allowed_types, _ComfyType) else allowed_types
def as_dict(self):
return {
@@ -927,7 +932,7 @@ class NodeInfoV3:
@dataclass
class SchemaV3:
class Schema:
"""Definition of V3 node properties."""
node_id: str
@@ -957,8 +962,6 @@ class SchemaV3:
If a node is not connected to any output nodes, that node will not be executed. Usage::
OUTPUT_NODE = True
From the docs:
By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is.
@@ -973,6 +976,8 @@ class SchemaV3:
"""Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview."""
not_idempotent: bool=False
"""Flags a node as not idempotent; when True, the node will run and not reuse the cached outputs when identical inputs are provided on a different node in the graph."""
enable_expand: bool=False
"""Flags a node as expandable, allowing NodeOutput to include 'expand' property."""
def validate(self):
'''Validate the schema:
@@ -1111,7 +1116,7 @@ def add_to_dict_v3(io: InputV3 | OutputV3, d: dict):
class _ComfyNodeBaseInternal(ComfyNodeInternal):
class _ComfyNodeBaseInternal(_ComfyNodeInternal):
"""Common base class for storing internal methods and properties; DO NOT USE for defining nodes."""
RELATIVE_PYTHON_MODULE = None
@@ -1123,8 +1128,8 @@ class _ComfyNodeBaseInternal(ComfyNodeInternal):
@classmethod
@abstractmethod
def define_schema(cls) -> SchemaV3:
"""Override this function with one that returns a SchemaV3 instance."""
def define_schema(cls) -> Schema:
"""Override this function with one that returns a Schema instance."""
raise NotImplementedError
@classmethod
@@ -1186,41 +1191,47 @@ class _ComfyNodeBaseInternal(ComfyNodeInternal):
def EXECUTE_NORMALIZED(cls, *args, **kwargs) -> NodeOutput:
to_return = cls.execute(*args, **kwargs)
if to_return is None:
return NodeOutput()
to_return = NodeOutput()
elif isinstance(to_return, NodeOutput):
return to_return
pass
elif isinstance(to_return, tuple):
return NodeOutput(*to_return)
to_return = NodeOutput(*to_return)
elif isinstance(to_return, dict):
return NodeOutput.from_dict(to_return)
to_return = NodeOutput.from_dict(to_return)
elif isinstance(to_return, ExecutionBlocker):
return NodeOutput(block_execution=to_return.message)
to_return = NodeOutput(block_execution=to_return.message)
else:
raise Exception(f"Invalid return type from node: {type(to_return)}")
if to_return.expand is not None and not cls.SCHEMA.enable_expand:
raise Exception(f"Node {cls.__name__} is not expandable, but expand included in NodeOutput; developer should set enable_expand=True on node's Schema to allow this.")
return to_return
@final
@classmethod
async def EXECUTE_NORMALIZED_ASYNC(cls, *args, **kwargs) -> NodeOutput:
to_return = await cls.execute(*args, **kwargs)
if to_return is None:
return NodeOutput()
to_return = NodeOutput()
elif isinstance(to_return, NodeOutput):
return to_return
pass
elif isinstance(to_return, tuple):
return NodeOutput(*to_return)
to_return = NodeOutput(*to_return)
elif isinstance(to_return, dict):
return NodeOutput.from_dict(to_return)
to_return = NodeOutput.from_dict(to_return)
elif isinstance(to_return, ExecutionBlocker):
return NodeOutput(block_execution=to_return.message)
to_return = NodeOutput(block_execution=to_return.message)
else:
raise Exception(f"Invalid return type from node: {type(to_return)}")
if to_return.expand is not None and not cls.SCHEMA.enable_expand:
raise Exception(f"Node {cls.__name__} is not expandable, but expand included in NodeOutput; developer should set enable_expand=True on node's Schema to allow this.")
return to_return
@final
@classmethod
def PREPARE_CLASS_CLONE(cls, hidden_inputs: dict) -> type[ComfyNodeV3]:
def PREPARE_CLASS_CLONE(cls, hidden_inputs: dict) -> type[ComfyNode]:
"""Creates clone of real node class to prevent monkey-patching."""
c_type: type[ComfyNodeV3] = cls if is_class(cls) else type(cls)
type_clone: type[ComfyNodeV3] = shallow_clone_class(c_type)
c_type: type[ComfyNode] = cls if is_class(cls) else type(cls)
type_clone: type[ComfyNode] = shallow_clone_class(c_type)
# set hidden
type_clone.hidden = HiddenHolder.from_dict(hidden_inputs)
return type_clone
@@ -1339,7 +1350,7 @@ class _ComfyNodeBaseInternal(ComfyNodeInternal):
@final
@classmethod
def INPUT_TYPES(cls, include_hidden=True, return_schema=False) -> dict[str, dict] | tuple[dict[str, dict], SchemaV3]:
def INPUT_TYPES(cls, include_hidden=True, return_schema=False) -> dict[str, dict] | tuple[dict[str, dict], Schema]:
schema = cls.FINALIZE_SCHEMA()
info = schema.get_v1_info(cls)
input = info.input
@@ -1359,7 +1370,7 @@ class _ComfyNodeBaseInternal(ComfyNodeInternal):
@final
@classmethod
def GET_SCHEMA(cls) -> SchemaV3:
def GET_SCHEMA(cls) -> Schema:
"""Validate node class, finalize schema, validate schema, and set expected class properties."""
cls.VALIDATE_CLASS()
schema = cls.FINALIZE_SCHEMA()
@@ -1403,13 +1414,13 @@ class _ComfyNodeBaseInternal(ComfyNodeInternal):
#############################################
class ComfyNodeV3(_ComfyNodeBaseInternal):
class ComfyNode(_ComfyNodeBaseInternal):
"""Common base class for all V3 nodes."""
@classmethod
@abstractmethod
def define_schema(cls) -> SchemaV3:
"""Override this function with one that returns a SchemaV3 instance."""
def define_schema(cls) -> Schema:
"""Override this function with one that returns a Schema instance."""
raise NotImplementedError
@classmethod
@@ -1448,23 +1459,21 @@ class ComfyNodeV3(_ComfyNodeBaseInternal):
@classmethod
def GET_BASE_CLASS(cls):
"""DO NOT override this class. Will break things in execution.py."""
return ComfyNodeV3
return ComfyNode
class NodeOutput:
'''
Standardized output of a node; can pass in any number of args and/or a UIOutput into 'ui' kwarg.
'''
def __init__(self, *args: Any, ui: _UIOutput | dict=None, expand: dict=None, block_execution: str=None, **kwargs):
def __init__(self, *args: Any, ui: _UIOutput | dict=None, expand: dict=None, block_execution: str=None):
self.args = args
self.ui = ui
self.expand = expand
self.block_execution = block_execution
# self.kwargs = kwargs
@property
def result(self):
# TODO: use kwargs to refer to outputs by id + organize in proper order
return self.args if len(self.args) > 0 else None
@classmethod

View File

@@ -17,7 +17,7 @@ import folder_paths
# used for image preview
from comfy.cli_args import args
from comfy_api.v3.io import ComfyNodeV3, FolderType, Image, _UIOutput
from comfy_api.v3.io import ComfyNode, FolderType, Image, _UIOutput
class SavedResult(dict):
@@ -37,6 +37,30 @@ class SavedResult(dict):
return FolderType(self["type"])
class SavedImages(_UIOutput):
"""A UI output class to represent one or more saved images, potentially animated."""
def __init__(self, results: list[SavedResult], is_animated: bool = False):
super().__init__()
self.results = results
self.is_animated = is_animated
def as_dict(self) -> dict:
data = {"images": self.results}
if self.is_animated:
data["animated"] = (True,)
return data
class SavedAudios(_UIOutput):
"""UI wrapper around one or more audio files on disk (FLAC / MP3 / Opus)."""
def __init__(self, results: list[SavedResult]):
super().__init__()
self.results = results
def as_dict(self) -> dict:
return {"audio": self.results}
def _get_directory_by_folder_type(folder_type: FolderType) -> str:
if folder_type == FolderType.input:
return folder_paths.get_input_directory()
@@ -54,7 +78,7 @@ class ImageSaveHelper:
return PILImage.fromarray(np.clip(255.0 * image_tensor.cpu().numpy(), 0, 255).astype(np.uint8))
@staticmethod
def _create_png_metadata(cls: Type[ComfyNodeV3] | None) -> PngInfo | None:
def _create_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None:
"""Creates a PngInfo object with prompt and extra_pnginfo."""
if args.disable_metadata or cls is None or not cls.hidden:
return None
@@ -67,7 +91,7 @@ class ImageSaveHelper:
return metadata
@staticmethod
def _create_animated_png_metadata(cls: Type[ComfyNodeV3] | None) -> PngInfo | None:
def _create_animated_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None:
"""Creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG)."""
if args.disable_metadata or cls is None or not cls.hidden:
return None
@@ -92,7 +116,7 @@ class ImageSaveHelper:
return metadata
@staticmethod
def _create_webp_metadata(pil_image: PILImage.Image, cls: Type[ComfyNodeV3] | None) -> PILImage.Exif:
def _create_webp_metadata(pil_image: PILImage.Image, cls: Type[ComfyNode] | None) -> PILImage.Exif:
"""Creates EXIF metadata bytes for WebP images."""
exif_data = pil_image.getexif()
if args.disable_metadata or cls is None or cls.hidden is None:
@@ -108,7 +132,7 @@ class ImageSaveHelper:
@staticmethod
def save_images(
images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNodeV3] | None, compress_level = 4,
images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, compress_level = 4,
) -> list[SavedResult]:
"""Saves a batch of images as individual PNG files."""
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
@@ -125,14 +149,22 @@ class ImageSaveHelper:
counter += 1
return results
@staticmethod
def get_save_images_ui(images, filename_prefix: str, cls: Type[ComfyNode] | None, compress_level=4) -> SavedImages:
"""Saves a batch of images and returns a UI object for the node output."""
return SavedImages(
ImageSaveHelper.save_images(
images,
filename_prefix=filename_prefix,
folder_type=FolderType.output,
cls=cls,
compress_level=compress_level,
)
)
@staticmethod
def save_animated_png(
images,
filename_prefix: str,
folder_type: FolderType,
cls: Type[ComfyNodeV3] | None,
fps: float,
compress_level: int
images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, fps: float, compress_level: int
) -> SavedResult:
"""Saves a batch of images as a single animated PNG."""
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
@@ -152,12 +184,27 @@ class ImageSaveHelper:
)
return SavedResult(file, subfolder, folder_type)
@staticmethod
def get_save_animated_png_ui(
images, filename_prefix: str, cls: Type[ComfyNode] | None, fps: float, compress_level: int
) -> SavedImages:
"""Saves an animated PNG and returns a UI object for the node output."""
result = ImageSaveHelper.save_animated_png(
images,
filename_prefix=filename_prefix,
folder_type=FolderType.output,
cls=cls,
fps=fps,
compress_level=compress_level,
)
return SavedImages([result], is_animated=len(images) > 1)
@staticmethod
def save_animated_webp(
images,
filename_prefix: str,
folder_type: FolderType,
cls: Type[ComfyNodeV3] | None,
cls: Type[ComfyNode] | None,
fps: float,
lossless: bool,
quality: int,
@@ -182,9 +229,158 @@ class ImageSaveHelper:
)
return SavedResult(file, subfolder, folder_type)
@staticmethod
def get_save_animated_webp_ui(
images,
filename_prefix: str,
cls: Type[ComfyNode] | None,
fps: float,
lossless: bool,
quality: int,
method: int,
) -> SavedImages:
"""Saves an animated WebP and returns a UI object for the node output."""
result = ImageSaveHelper.save_animated_webp(
images,
filename_prefix=filename_prefix,
folder_type=FolderType.output,
cls=cls,
fps=fps,
lossless=lossless,
quality=quality,
method=method,
)
return SavedImages([result], is_animated=len(images) > 1)
class AudioSaveHelper:
"""A helper class with static methods to handle audio saving and metadata."""
_OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
@staticmethod
def save_audio(
audio: dict,
filename_prefix: str,
folder_type: FolderType,
cls: Type[ComfyNode] | None,
format: str = "flac",
quality: str = "128k",
) -> list[SavedResult]:
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
filename_prefix, _get_directory_by_folder_type(folder_type)
)
metadata = {}
if not args.disable_metadata and cls is not None:
if cls.hidden.prompt is not None:
metadata["prompt"] = json.dumps(cls.hidden.prompt)
if cls.hidden.extra_pnginfo is not None:
for x in cls.hidden.extra_pnginfo:
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
results = []
for batch_number, waveform in enumerate(audio["waveform"].cpu()):
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
file = f"{filename_with_batch_num}_{counter:05}_.{format}"
output_path = os.path.join(full_output_folder, file)
# Use original sample rate initially
sample_rate = audio["sample_rate"]
# Handle Opus sample rate requirements
if format == "opus":
if sample_rate > 48000:
sample_rate = 48000
elif sample_rate not in AudioSaveHelper._OPUS_RATES:
# Find the next highest supported rate
for rate in sorted(AudioSaveHelper._OPUS_RATES):
if rate > sample_rate:
sample_rate = rate
break
if sample_rate not in AudioSaveHelper._OPUS_RATES: # Fallback if still not supported
sample_rate = 48000
# Resample if necessary
if sample_rate != audio["sample_rate"]:
waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
# Create output with specified format
output_buffer = BytesIO()
output_container = av.open(output_buffer, mode="w", format=format)
# Set metadata on the container
for key, value in metadata.items():
output_container.metadata[key] = value
# Set up the output stream with appropriate properties
if format == "opus":
out_stream = output_container.add_stream("libopus", rate=sample_rate)
if quality == "64k":
out_stream.bit_rate = 64000
elif quality == "96k":
out_stream.bit_rate = 96000
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "192k":
out_stream.bit_rate = 192000
elif quality == "320k":
out_stream.bit_rate = 320000
elif format == "mp3":
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
if quality == "V0":
# TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
out_stream.codec_context.qscale = 1
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "320k":
out_stream.bit_rate = 320000
else: # format == "flac":
out_stream = output_container.add_stream("flac", rate=sample_rate)
frame = av.AudioFrame.from_ndarray(
waveform.movedim(0, 1).reshape(1, -1).float().numpy(),
format="flt",
layout="mono" if waveform.shape[0] == 1 else "stereo",
)
frame.sample_rate = sample_rate
frame.pts = 0
output_container.mux(out_stream.encode(frame))
# Flush encoder
output_container.mux(out_stream.encode(None))
# Close containers
output_container.close()
# Write the output to file
output_buffer.seek(0)
with open(output_path, "wb") as f:
f.write(output_buffer.getbuffer())
results.append(SavedResult(file, subfolder, folder_type))
counter += 1
return results
@staticmethod
def get_save_audio_ui(
audio, filename_prefix: str, cls: Type[ComfyNode] | None, format: str = "flac", quality: str = "128k",
) -> SavedAudios:
"""Save and instantly wrap for UI."""
return SavedAudios(
AudioSaveHelper.save_audio(
audio,
filename_prefix=filename_prefix,
folder_type=FolderType.output,
cls=cls,
format=format,
quality=quality,
)
)
class PreviewImage(_UIOutput):
def __init__(self, image: Image.Type, animated: bool=False, cls: ComfyNodeV3=None, **kwargs):
def __init__(self, image: Image.Type, animated: bool = False, cls: Type[ComfyNode] = None, **kwargs):
self.values = ImageSaveHelper.save_images(
image,
filename_prefix="ComfyUI_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for _ in range(5)),
@@ -202,7 +398,7 @@ class PreviewImage(_UIOutput):
class PreviewMask(PreviewImage):
def __init__(self, mask: PreviewMask.Type, animated: bool=False, cls: ComfyNodeV3=None, **kwargs):
def __init__(self, mask: PreviewMask.Type, animated: bool=False, cls: ComfyNode=None, **kwargs):
preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
super().__init__(preview, animated, cls, **kwargs)
@@ -256,108 +452,17 @@ class PreviewMask(PreviewImage):
class PreviewAudio(_UIOutput):
def __init__(self, audio, cls: ComfyNodeV3=None, **kwargs):
quality = "128k"
format = "flac"
filename_prefix = "ComfyUI_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
filename_prefix, folder_paths.get_temp_directory()
def __init__(self, audio: dict, cls: Type[ComfyNode] = None, **kwargs):
self.values = AudioSaveHelper.save_audio(
audio,
filename_prefix="ComfyUI_temp_" + "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(5)),
folder_type=FolderType.temp,
cls=cls,
format="flac",
quality="128k",
)
# Prepare metadata dictionary
metadata = {}
if not args.disable_metadata and cls is not None:
if cls.hidden.prompt is not None:
metadata["prompt"] = json.dumps(cls.hidden.prompt)
if cls.hidden.extra_pnginfo is not None:
for x in cls.hidden.extra_pnginfo:
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
# Opus supported sample rates
OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
results = []
for (batch_number, waveform) in enumerate(audio["waveform"].cpu()):
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
file = f"{filename_with_batch_num}_{counter:05}_.{format}"
output_path = os.path.join(full_output_folder, file)
# Use original sample rate initially
sample_rate = audio["sample_rate"]
# Handle Opus sample rate requirements
if format == "opus":
if sample_rate > 48000:
sample_rate = 48000
elif sample_rate not in OPUS_RATES:
# Find the next highest supported rate
for rate in sorted(OPUS_RATES):
if rate > sample_rate:
sample_rate = rate
break
if sample_rate not in OPUS_RATES: # Fallback if still not supported
sample_rate = 48000
# Resample if necessary
if sample_rate != audio["sample_rate"]:
waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
# Create output with specified format
output_buffer = BytesIO()
output_container = av.open(output_buffer, mode='w', format=format)
# Set metadata on the container
for key, value in metadata.items():
output_container.metadata[key] = value
# Set up the output stream with appropriate properties
if format == "opus":
out_stream = output_container.add_stream("libopus", rate=sample_rate)
if quality == "64k":
out_stream.bit_rate = 64000
elif quality == "96k":
out_stream.bit_rate = 96000
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "192k":
out_stream.bit_rate = 192000
elif quality == "320k":
out_stream.bit_rate = 320000
elif format == "mp3":
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
if quality == "V0":
# TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
out_stream.codec_context.qscale = 1
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "320k":
out_stream.bit_rate = 320000
else: # format == "flac":
out_stream = output_container.add_stream("flac", rate=sample_rate)
frame = av.AudioFrame.from_ndarray(waveform.movedim(0, 1).reshape(1, -1).float().numpy(), format='flt',
layout='mono' if waveform.shape[0] == 1 else 'stereo')
frame.sample_rate = sample_rate
frame.pts = 0
output_container.mux(out_stream.encode(frame))
# Flush encoder
output_container.mux(out_stream.encode(None))
# Close containers
output_container.close()
# Write the output to file
output_buffer.seek(0)
with open(output_path, 'wb') as f:
f.write(output_buffer.getbuffer())
results.append(SavedResult(file, subfolder, FolderType.temp))
counter += 1
self.values = results
def as_dict(self):
def as_dict(self) -> dict:
return {"audio": self.values}

View File

@@ -278,6 +278,42 @@ class PreviewAudio(SaveAudio):
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
"""Convert audio to float 32 bits PCM format."""
if wav.dtype.is_floating_point:
return wav
elif wav.dtype == torch.int16:
return wav.float() / (2 ** 15)
elif wav.dtype == torch.int32:
return wav.float() / (2 ** 31)
raise ValueError(f"Unsupported wav dtype: {wav.dtype}")
def load(filepath: str) -> tuple[torch.Tensor, int]:
with av.open(filepath) as af:
if not af.streams.audio:
raise ValueError("No audio stream found in the file.")
stream = af.streams.audio[0]
sr = stream.codec_context.sample_rate
n_channels = stream.channels
frames = []
length = 0
for frame in af.decode(streams=stream.index):
buf = torch.from_numpy(frame.to_ndarray())
if buf.shape[0] != n_channels:
buf = buf.view(-1, n_channels).t()
frames.append(buf)
length += buf.shape[1]
if not frames:
raise ValueError("No audio frames decoded.")
wav = torch.cat(frames, dim=1)
wav = f32_pcm(wav)
return wav, sr
class LoadAudio:
@classmethod
def INPUT_TYPES(s):
@@ -292,7 +328,7 @@ class LoadAudio:
def load(self, audio):
audio_path = folder_paths.get_annotated_filepath(audio)
waveform, sample_rate = torchaudio.load(audio_path)
waveform, sample_rate = load(audio_path)
audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
return (audio, )

View File

@@ -301,6 +301,35 @@ class ExtendIntermediateSigmas:
return (extended_sigmas,)
class SamplingPercentToSigma:
@classmethod
def INPUT_TYPES(cls) -> InputTypeDict:
return {
"required": {
"model": (IO.MODEL, {}),
"sampling_percent": (IO.FLOAT, {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001}),
"return_actual_sigma": (IO.BOOLEAN, {"default": False, "tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."}),
}
}
RETURN_TYPES = (IO.FLOAT,)
RETURN_NAMES = ("sigma_value",)
CATEGORY = "sampling/custom_sampling/sigmas"
FUNCTION = "get_sigma"
def get_sigma(self, model, sampling_percent, return_actual_sigma):
model_sampling = model.get_model_object("model_sampling")
sigma_val = model_sampling.percent_to_sigma(sampling_percent)
if return_actual_sigma:
if sampling_percent == 0.0:
sigma_val = model_sampling.sigma_max.item()
elif sampling_percent == 1.0:
sigma_val = model_sampling.sigma_min.item()
return (sigma_val,)
class KSamplerSelect:
@classmethod
def INPUT_TYPES(s):
@@ -683,9 +712,10 @@ class CFGGuider:
return (guider,)
class Guider_DualCFG(comfy.samplers.CFGGuider):
def set_cfg(self, cfg1, cfg2):
def set_cfg(self, cfg1, cfg2, nested=False):
self.cfg1 = cfg1
self.cfg2 = cfg2
self.nested = nested
def set_conds(self, positive, middle, negative):
middle = node_helpers.conditioning_set_values(middle, {"prompt_type": "negative"})
@@ -695,14 +725,20 @@ class Guider_DualCFG(comfy.samplers.CFGGuider):
negative_cond = self.conds.get("negative", None)
middle_cond = self.conds.get("middle", None)
positive_cond = self.conds.get("positive", None)
if model_options.get("disable_cfg1_optimization", False) == False:
if math.isclose(self.cfg2, 1.0):
negative_cond = None
if math.isclose(self.cfg1, 1.0):
middle_cond = None
out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options)
return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1
if self.nested:
out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options)
pred_text = comfy.samplers.cfg_function(self.inner_model, out[2], out[1], self.cfg1, x, timestep, model_options=model_options, cond=positive_cond, uncond=middle_cond)
return out[0] + self.cfg2 * (pred_text - out[0])
else:
if model_options.get("disable_cfg1_optimization", False) == False:
if math.isclose(self.cfg2, 1.0):
negative_cond = None
if math.isclose(self.cfg1, 1.0):
middle_cond = None
out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options)
return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1
class DualCFGGuider:
@classmethod
@@ -714,6 +750,7 @@ class DualCFGGuider:
"negative": ("CONDITIONING", ),
"cfg_conds": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"cfg_cond2_negative": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}),
"style": (["regular", "nested"],),
}
}
@@ -722,10 +759,10 @@ class DualCFGGuider:
FUNCTION = "get_guider"
CATEGORY = "sampling/custom_sampling/guiders"
def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative):
def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative, style):
guider = Guider_DualCFG(model)
guider.set_conds(cond1, cond2, negative)
guider.set_cfg(cfg_conds, cfg_cond2_negative)
guider.set_cfg(cfg_conds, cfg_cond2_negative, nested=(style == "nested"))
return (guider,)
class DisableNoise:
@@ -879,6 +916,7 @@ NODE_CLASS_MAPPINGS = {
"FlipSigmas": FlipSigmas,
"SetFirstSigma": SetFirstSigma,
"ExtendIntermediateSigmas": ExtendIntermediateSigmas,
"SamplingPercentToSigma": SamplingPercentToSigma,
"CFGGuider": CFGGuider,
"DualCFGGuider": DualCFGGuider,

View File

@@ -9,15 +9,11 @@ import asyncio
@io.comfytype(io_type="XYZ")
class XYZ:
class XYZ(io.ComfyTypeIO):
Type = tuple[int,str]
class Input(io.InputV3):
...
class Output(io.OutputV3):
...
class V3TestNode(io.ComfyNodeV3):
class V3TestNode(io.ComfyNode):
# NOTE: this is here just to test that state is not leaking
def __init__(self):
super().__init__()
@@ -25,7 +21,7 @@ class V3TestNode(io.ComfyNodeV3):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_01_TestNode1",
display_name="V3 Test Node",
category="v3 nodes",
@@ -91,10 +87,10 @@ class V3TestNode(io.ComfyNodeV3):
return io.NodeOutput(some_int, image, ui=ui.PreviewImage(image, cls=cls))
class V3LoraLoader(io.ComfyNodeV3):
class V3LoraLoader(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_LoraLoader",
display_name="V3 LoRA Loader",
category="v3 nodes",
@@ -141,10 +137,10 @@ class V3LoraLoader(io.ComfyNodeV3):
return io.NodeOutput(model_lora, clip_lora)
class NInputsTest(io.ComfyNodeV3):
class NInputsTest(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_NInputsTest",
display_name="V3 N Inputs Test",
inputs=[
@@ -183,10 +179,10 @@ class NInputsTest(io.ComfyNodeV3):
return io.NodeOutput(combined_image)
class V3TestSleep(io.ComfyNodeV3):
class V3TestSleep(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_TestSleep",
display_name="V3 Test Sleep",
category="_for_testing",
@@ -218,10 +214,10 @@ class V3TestSleep(io.ComfyNodeV3):
return io.NodeOutput(value)
class V3DummyStart(io.ComfyNodeV3):
class V3DummyStart(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_DummyStart",
display_name="V3 Dummy Start",
category="v3 nodes",
@@ -237,12 +233,12 @@ class V3DummyStart(io.ComfyNodeV3):
return io.NodeOutput(None)
class V3DummyEnd(io.ComfyNodeV3):
class V3DummyEnd(io.ComfyNode):
COOL_VALUE = 123
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="V3_DummyEnd",
display_name="V3 Dummy End",
category="v3 nodes",
@@ -279,7 +275,7 @@ class V3DummyEndInherit(V3DummyEnd):
return super().execute(xyz)
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
V3TestNode,
V3LoraLoader,
NInputsTest,

View File

@@ -7,10 +7,10 @@ import node_helpers
from comfy_api.v3 import io
class TextEncodeAceStepAudio(io.ComfyNodeV3):
class TextEncodeAceStepAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="TextEncodeAceStepAudio_V3",
category="conditioning",
inputs=[
@@ -29,10 +29,10 @@ class TextEncodeAceStepAudio(io.ComfyNodeV3):
return io.NodeOutput(conditioning)
class EmptyAceStepLatentAudio(io.ComfyNodeV3):
class EmptyAceStepLatentAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="EmptyAceStepLatentAudio_V3",
category="latent/audio",
inputs=[
@@ -51,7 +51,7 @@ class EmptyAceStepLatentAudio(io.ComfyNodeV3):
return io.NodeOutput({"samples": latent, "type": "audio"})
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
TextEncodeAceStepAudio,
EmptyAceStepLatentAudio,
]

View File

@@ -41,12 +41,12 @@ def sample_lcm_upscale(
return x
class SamplerLCMUpscale(io.ComfyNodeV3):
class SamplerLCMUpscale(io.ComfyNode):
UPSCALE_METHODS = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="SamplerLCMUpscale_V3",
category="sampling/custom_sampling/samplers",
inputs=[
@@ -99,10 +99,10 @@ def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=No
return x
class SamplerEulerCFGpp(io.ComfyNodeV3):
class SamplerEulerCFGpp(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="SamplerEulerCFGpp_V3",
display_name="SamplerEulerCFG++ _V3",
category="_for_testing",

View File

@@ -47,10 +47,10 @@ def loglinear_interp(t_steps, num_steps):
return np.exp(new_ys)[::-1].copy()
class AlignYourStepsScheduler(io.ComfyNodeV3):
class AlignYourStepsScheduler(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="AlignYourStepsScheduler_V3",
category="sampling/custom_sampling/schedulers",
inputs=[

View File

@@ -10,10 +10,10 @@ def project(v0, v1):
return v0_parallel, v0_orthogonal
class APG(io.ComfyNodeV3):
class APG(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="APG_V3",
display_name="Adaptive Projected Guidance _V3",
category="sampling/custom_sampling",

View File

@@ -17,10 +17,10 @@ def attention_multiply(attn, model, q, k, v, out):
return m
class UNetSelfAttentionMultiply(io.ComfyNodeV3):
class UNetSelfAttentionMultiply(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetSelfAttentionMultiply_V3",
category="_for_testing/attention_experiments",
inputs=[
@@ -39,10 +39,10 @@ class UNetSelfAttentionMultiply(io.ComfyNodeV3):
return io.NodeOutput(attention_multiply("attn1", model, q, k, v, out))
class UNetCrossAttentionMultiply(io.ComfyNodeV3):
class UNetCrossAttentionMultiply(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetCrossAttentionMultiply_V3",
category="_for_testing/attention_experiments",
inputs=[
@@ -61,10 +61,10 @@ class UNetCrossAttentionMultiply(io.ComfyNodeV3):
return io.NodeOutput(attention_multiply("attn2", model, q, k, v, out))
class CLIPAttentionMultiply(io.ComfyNodeV3):
class CLIPAttentionMultiply(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CLIPAttentionMultiply_V3",
category="_for_testing/attention_experiments",
inputs=[
@@ -95,10 +95,10 @@ class CLIPAttentionMultiply(io.ComfyNodeV3):
return io.NodeOutput(m)
class UNetTemporalAttentionMultiply(io.ComfyNodeV3):
class UNetTemporalAttentionMultiply(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetTemporalAttentionMultiply_V3",
category="_for_testing/attention_experiments",
inputs=[

View File

@@ -1,25 +1,21 @@
from __future__ import annotations
import hashlib
import json
import os
from io import BytesIO
import av
import torch
import torchaudio
import comfy.model_management
import folder_paths
import node_helpers
from comfy.cli_args import args
from comfy_api.v3 import io, ui
class ConditioningStableAudio(io.ComfyNodeV3):
class ConditioningStableAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ConditioningStableAudio_V3",
category="conditioning",
inputs=[
@@ -46,10 +42,10 @@ class ConditioningStableAudio(io.ComfyNodeV3):
)
class EmptyLatentAudio(io.ComfyNodeV3):
class EmptyLatentAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="EmptyLatentAudio_V3",
category="latent/audio",
inputs=[
@@ -68,10 +64,10 @@ class EmptyLatentAudio(io.ComfyNodeV3):
return io.NodeOutput({"samples": latent, "type": "audio"})
class LoadAudio(io.ComfyNodeV3):
class LoadAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LoadAudio_V3", # frontend expects "LoadAudio" to work
display_name="Load Audio _V3", # frontend ignores "display_name" for this node
category="audio",
@@ -106,10 +102,10 @@ class LoadAudio(io.ComfyNodeV3):
return True
class PreviewAudio(io.ComfyNodeV3):
class PreviewAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PreviewAudio_V3", # frontend expects "PreviewAudio" to work
display_name="Preview Audio _V3", # frontend ignores "display_name" for this node
category="audio",
@@ -125,10 +121,10 @@ class PreviewAudio(io.ComfyNodeV3):
return io.NodeOutput(ui=ui.PreviewAudio(audio, cls=cls))
class SaveAudioMP3(io.ComfyNodeV3):
class SaveAudioMP3(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveAudioMP3_V3", # frontend expects "SaveAudioMP3" to work
display_name="Save Audio(MP3) _V3", # frontend ignores "display_name" for this node
category="audio",
@@ -142,14 +138,18 @@ class SaveAudioMP3(io.ComfyNodeV3):
)
@classmethod
def execute(self, audio, filename_prefix="ComfyUI", format="mp3", quality="V0") -> io.NodeOutput:
return _save_audio(self, audio, filename_prefix, format, quality)
def execute(cls, audio, filename_prefix="ComfyUI", format="mp3", quality="V0") -> io.NodeOutput:
return io.NodeOutput(
ui=ui.AudioSaveHelper.get_save_audio_ui(
audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality
)
)
class SaveAudioOpus(io.ComfyNodeV3):
class SaveAudioOpus(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveAudioOpus_V3", # frontend expects "SaveAudioOpus" to work
display_name="Save Audio(Opus) _V3", # frontend ignores "display_name" for this node
category="audio",
@@ -163,14 +163,18 @@ class SaveAudioOpus(io.ComfyNodeV3):
)
@classmethod
def execute(self, audio, filename_prefix="ComfyUI", format="opus", quality="128k") -> io.NodeOutput:
return _save_audio(self, audio, filename_prefix, format, quality)
def execute(cls, audio, filename_prefix="ComfyUI", format="opus", quality="128k") -> io.NodeOutput:
return io.NodeOutput(
ui=ui.AudioSaveHelper.get_save_audio_ui(
audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality
)
)
class SaveAudio(io.ComfyNodeV3):
class SaveAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveAudio_V3", # frontend expects "SaveAudio" to work
display_name="Save Audio _V3", # frontend ignores "display_name" for this node
category="audio",
@@ -184,13 +188,15 @@ class SaveAudio(io.ComfyNodeV3):
@classmethod
def execute(cls, audio, filename_prefix="ComfyUI", format="flac") -> io.NodeOutput:
return _save_audio(cls, audio, filename_prefix, format)
return io.NodeOutput(
ui=ui.AudioSaveHelper.get_save_audio_ui(audio, filename_prefix=filename_prefix, cls=cls, format=format)
)
class VAEDecodeAudio(io.ComfyNodeV3):
class VAEDecodeAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="VAEDecodeAudio_V3",
category="latent/audio",
inputs=[
@@ -209,10 +215,10 @@ class VAEDecodeAudio(io.ComfyNodeV3):
return io.NodeOutput({"waveform": audio, "sample_rate": 44100})
class VAEEncodeAudio(io.ComfyNodeV3):
class VAEEncodeAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="VAEEncodeAudio_V3",
category="latent/audio",
inputs=[
@@ -232,109 +238,7 @@ class VAEEncodeAudio(io.ComfyNodeV3):
return io.NodeOutput({"samples": vae.encode(waveform.movedim(1, -1))})
def _save_audio(cls, audio, filename_prefix="ComfyUI", format="flac", quality="128k") -> io.NodeOutput:
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
filename_prefix, folder_paths.get_output_directory()
)
# Prepare metadata dictionary
metadata = {}
if not args.disable_metadata:
if cls.hidden.prompt is not None:
metadata["prompt"] = json.dumps(cls.hidden.prompt)
if cls.hidden.extra_pnginfo is not None:
for x in cls.hidden.extra_pnginfo:
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
# Opus supported sample rates
OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
results = []
for batch_number, waveform in enumerate(audio["waveform"].cpu()):
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
file = f"{filename_with_batch_num}_{counter:05}_.{format}"
output_path = os.path.join(full_output_folder, file)
# Use original sample rate initially
sample_rate = audio["sample_rate"]
# Handle Opus sample rate requirements
if format == "opus":
if sample_rate > 48000:
sample_rate = 48000
elif sample_rate not in OPUS_RATES:
# Find the next highest supported rate
for rate in sorted(OPUS_RATES):
if rate > sample_rate:
sample_rate = rate
break
if sample_rate not in OPUS_RATES: # Fallback if still not supported
sample_rate = 48000
# Resample if necessary
if sample_rate != audio["sample_rate"]:
waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
# Create output with specified format
output_buffer = BytesIO()
output_container = av.open(output_buffer, mode="w", format=format)
# Set metadata on the container
for key, value in metadata.items():
output_container.metadata[key] = value
# Set up the output stream with appropriate properties
if format == "opus":
out_stream = output_container.add_stream("libopus", rate=sample_rate)
if quality == "64k":
out_stream.bit_rate = 64000
elif quality == "96k":
out_stream.bit_rate = 96000
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "192k":
out_stream.bit_rate = 192000
elif quality == "320k":
out_stream.bit_rate = 320000
elif format == "mp3":
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
if quality == "V0":
# TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
out_stream.codec_context.qscale = 1
elif quality == "128k":
out_stream.bit_rate = 128000
elif quality == "320k":
out_stream.bit_rate = 320000
else: # format == "flac":
out_stream = output_container.add_stream("flac", rate=sample_rate)
frame = av.AudioFrame.from_ndarray(
waveform.movedim(0, 1).reshape(1, -1).float().numpy(),
format="flt",
layout="mono" if waveform.shape[0] == 1 else "stereo",
)
frame.sample_rate = sample_rate
frame.pts = 0
output_container.mux(out_stream.encode(frame))
# Flush encoder
output_container.mux(out_stream.encode(None))
# Close containers
output_container.close()
# Write the output to file
output_buffer.seek(0)
with open(output_path, "wb") as f:
f.write(output_buffer.getbuffer())
results.append(ui.SavedResult(file, subfolder, io.FolderType.output))
counter += 1
return io.NodeOutput(ui={"audio": results})
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
ConditioningStableAudio,
EmptyLatentAudio,
LoadAudio,

View File

@@ -135,10 +135,10 @@ def get_camera_motion(angle, T, speed, n=81):
return RT
class WanCameraEmbedding(io.ComfyNodeV3):
class WanCameraEmbedding(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanCameraEmbedding_V3",
category="camera",
inputs=[

View File

@@ -6,10 +6,10 @@ import comfy.model_management
from comfy_api.v3 import io
class Canny(io.ComfyNodeV3):
class Canny(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="Canny_V3",
category="image/preprocessors",
inputs=[

View File

@@ -21,10 +21,10 @@ def optimized_scale(positive, negative):
return st_star.reshape([positive.shape[0]] + [1] * (positive.ndim - 1))
class CFGNorm(io.ComfyNodeV3):
class CFGNorm(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CFGNorm_V3",
category="advanced/guidance",
inputs=[
@@ -52,10 +52,10 @@ class CFGNorm(io.ComfyNodeV3):
return io.NodeOutput(m)
class CFGZeroStar(io.ComfyNodeV3):
class CFGZeroStar(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CFGZeroStar_V3",
category="advanced/guidance",
inputs=[

View File

@@ -4,10 +4,10 @@ import nodes
from comfy_api.v3 import io
class CLIPTextEncodeSDXL(io.ComfyNodeV3):
class CLIPTextEncodeSDXL(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CLIPTextEncodeSDXL_V3",
category="advanced/conditioning",
inputs=[
@@ -48,10 +48,10 @@ class CLIPTextEncodeSDXL(io.ComfyNodeV3):
return io.NodeOutput(conditioning)
class CLIPTextEncodeSDXLRefiner(io.ComfyNodeV3):
class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CLIPTextEncodeSDXLRefiner_V3",
category="advanced/conditioning",
inputs=[

View File

@@ -112,10 +112,10 @@ def porter_duff_composite(
return out_image, out_alpha
class JoinImageWithAlpha(io.ComfyNodeV3):
class JoinImageWithAlpha(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="JoinImageWithAlpha_V3",
display_name="Join Image with Alpha _V3",
category="mask/compositing",
@@ -138,10 +138,10 @@ class JoinImageWithAlpha(io.ComfyNodeV3):
return io.NodeOutput(torch.stack(out_images))
class PorterDuffImageComposite(io.ComfyNodeV3):
class PorterDuffImageComposite(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PorterDuffImageComposite_V3",
display_name="Porter-Duff Image Composite _V3",
category="mask/compositing",
@@ -199,10 +199,10 @@ class PorterDuffImageComposite(io.ComfyNodeV3):
return io.NodeOutput(torch.stack(out_images), torch.stack(out_alphas))
class SplitImageWithAlpha(io.ComfyNodeV3):
class SplitImageWithAlpha(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SplitImageWithAlpha_V3",
display_name="Split Image with Alpha _V3",
category="mask/compositing",

View File

@@ -3,10 +3,10 @@ from __future__ import annotations
from comfy_api.v3 import io
class CLIPTextEncodeControlnet(io.ComfyNodeV3):
class CLIPTextEncodeControlnet(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CLIPTextEncodeControlnet_V3",
category="_for_testing/conditioning",
inputs=[
@@ -30,10 +30,10 @@ class CLIPTextEncodeControlnet(io.ComfyNodeV3):
return io.NodeOutput(c)
class T5TokenizerOptions(io.ComfyNodeV3):
class T5TokenizerOptions(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="T5TokenizerOptions_V3",
category="_for_testing/conditioning",
inputs=[

View File

@@ -3,10 +3,10 @@ from comfy.cldm.control_types import UNION_CONTROLNET_TYPES
from comfy_api.v3 import io
class ControlNetApplyAdvanced(io.ComfyNodeV3):
class ControlNetApplyAdvanced(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ControlNetApplyAdvanced_V3",
display_name="Apply ControlNet _V3",
category="conditioning/controlnet",
@@ -60,10 +60,10 @@ class ControlNetApplyAdvanced(io.ComfyNodeV3):
return io.NodeOutput(out[0], out[1])
class SetUnionControlNetType(io.ComfyNodeV3):
class SetUnionControlNetType(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SetUnionControlNetType_V3",
category="conditioning/controlnet",
inputs=[
@@ -90,7 +90,7 @@ class SetUnionControlNetType(io.ComfyNodeV3):
class ControlNetInpaintingAliMamaApply(ControlNetApplyAdvanced):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ControlNetInpaintingAliMamaApply_V3",
category="conditioning/controlnet",
inputs=[
@@ -134,7 +134,7 @@ class ControlNetInpaintingAliMamaApply(ControlNetApplyAdvanced):
)
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
ControlNetApplyAdvanced,
SetUnionControlNetType,
ControlNetInpaintingAliMamaApply,

View File

@@ -20,10 +20,10 @@ def vae_encode_with_padding(vae, image, width, height, length, padding=0):
return latent_temp[:, :, :latent_len]
class CosmosImageToVideoLatent(io.ComfyNodeV3):
class CosmosImageToVideoLatent(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CosmosImageToVideoLatent_V3",
category="conditioning/inpaint",
inputs=[
@@ -67,10 +67,10 @@ class CosmosImageToVideoLatent(io.ComfyNodeV3):
return io.NodeOutput(out_latent)
class CosmosPredict2ImageToVideoLatent(io.ComfyNodeV3):
class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CosmosPredict2ImageToVideoLatent_V3",
category="conditioning/inpaint",
inputs=[
@@ -116,10 +116,10 @@ class CosmosPredict2ImageToVideoLatent(io.ComfyNodeV3):
return io.NodeOutput(out_latent)
class EmptyCosmosLatentVideo(io.ComfyNodeV3):
class EmptyCosmosLatentVideo(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.SchemaV3:
return io.SchemaV3(
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="EmptyCosmosLatentVideo_V3",
category="latent/video",
inputs=[

View File

@@ -5,10 +5,10 @@ import torch
from comfy_api.v3 import io
class DifferentialDiffusion(io.ComfyNodeV3):
class DifferentialDiffusion(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="DifferentialDiffusion_V3",
display_name="Differential Diffusion _V3",
category="_for_testing",

View File

@@ -0,0 +1,34 @@
from __future__ import annotations
import node_helpers
from comfy_api.v3 import io
class ReferenceLatent(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ReferenceLatent_V3",
category="advanced/conditioning/edit_models",
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
inputs=[
io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True),
],
outputs=[
io.Conditioning.Output(),
]
)
@classmethod
def execute(cls, conditioning, latent=None):
if latent is not None:
conditioning = node_helpers.conditioning_set_values(
conditioning, {"reference_latents": [latent["samples"]]}, append=True
)
return io.NodeOutput(conditioning)
NODES_LIST = [
ReferenceLatent,
]

View File

@@ -25,10 +25,10 @@ PREFERED_KONTEXT_RESOLUTIONS = [
]
class CLIPTextEncodeFlux(io.ComfyNodeV3):
class CLIPTextEncodeFlux(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CLIPTextEncodeFlux_V3",
category="advanced/conditioning/flux",
inputs=[
@@ -50,10 +50,10 @@ class CLIPTextEncodeFlux(io.ComfyNodeV3):
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
class FluxDisableGuidance(io.ComfyNodeV3):
class FluxDisableGuidance(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FluxDisableGuidance_V3",
category="advanced/conditioning/flux",
description="This node completely disables the guidance embed on Flux and Flux like models",
@@ -71,10 +71,10 @@ class FluxDisableGuidance(io.ComfyNodeV3):
return io.NodeOutput(c)
class FluxGuidance(io.ComfyNodeV3):
class FluxGuidance(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FluxGuidance_V3",
category="advanced/conditioning/flux",
inputs=[
@@ -92,10 +92,10 @@ class FluxGuidance(io.ComfyNodeV3):
return io.NodeOutput(c)
class FluxKontextImageScale(io.ComfyNodeV3):
class FluxKontextImageScale(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FluxKontextImageScale_V3",
category="advanced/conditioning/flux",
description="This node resizes the image to one that is more optimal for flux kontext.",

View File

@@ -28,10 +28,10 @@ def Fourier_filter(x, threshold, scale):
return x_filtered.to(x.dtype)
class FreeU(io.ComfyNodeV3):
class FreeU(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FreeU_V3",
category="model_patches/unet",
inputs=[
@@ -73,10 +73,10 @@ class FreeU(io.ComfyNodeV3):
return io.NodeOutput(m)
class FreeU_V2(io.ComfyNodeV3):
class FreeU_V2(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FreeU_V2_V3",
category="model_patches/unet",
inputs=[

View File

@@ -56,10 +56,10 @@ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
return x_filtered
class FreSca(io.ComfyNodeV3):
class FreSca(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FreSca_V3",
display_name="FreSca _V3",
category="_for_testing",

View File

@@ -336,10 +336,10 @@ NOISE_LEVELS = {
}
class GITSScheduler(io.ComfyNodeV3):
class GITSScheduler(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="GITSScheduler_V3",
category="sampling/custom_sampling/schedulers",
inputs=[

View File

@@ -0,0 +1,71 @@
from __future__ import annotations
import comfy.model_management
import comfy.sd
import folder_paths
from comfy_api.v3 import io
class CLIPTextEncodeHiDream(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="CLIPTextEncodeHiDream_V3",
category="advanced/conditioning",
inputs=[
io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
io.String.Input("llama", multiline=True, dynamic_prompts=True),
],
outputs=[
io.Conditioning.Output(),
]
)
@classmethod
def execute(cls, clip, clip_l, clip_g, t5xxl, llama):
tokens = clip.tokenize(clip_g)
tokens["l"] = clip.tokenize(clip_l)["l"]
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
tokens["llama"] = clip.tokenize(llama)["llama"]
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
class QuadrupleCLIPLoader(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="QuadrupleCLIPLoader_V3",
category="advanced/loaders",
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
inputs=[
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")),
io.Combo.Input("clip_name4", options=folder_paths.get_filename_list("text_encoders")),
],
outputs=[
io.Clip.Output(),
]
)
@classmethod
def execute(cls, clip_name1, clip_name2, clip_name3, clip_name4):
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
clip_path4 = folder_paths.get_full_path_or_raise("text_encoders", clip_name4)
return io.NodeOutput(
comfy.sd.load_clip(
ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4],
embedding_directory=folder_paths.get_folder_paths("embeddings"),
)
)
NODES_LIST = [
CLIPTextEncodeHiDream,
QuadrupleCLIPLoader,
]

View File

@@ -13,10 +13,10 @@ from comfy_api.v3 import io, ui
from server import PromptServer
class GetImageSize(io.ComfyNodeV3):
class GetImageSize(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="GetImageSize_V3",
display_name="Get Image Size _V3",
description="Returns width and height of the image, and passes it through unchanged.",
@@ -46,10 +46,10 @@ class GetImageSize(io.ComfyNodeV3):
return io.NodeOutput(width, height, batch_size)
class ImageAddNoise(io.ComfyNodeV3):
class ImageAddNoise(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageAddNoise_V3",
display_name="Image Add Noise _V3",
category="image",
@@ -79,10 +79,10 @@ class ImageAddNoise(io.ComfyNodeV3):
return io.NodeOutput(s)
class ImageCrop(io.ComfyNodeV3):
class ImageCrop(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageCrop_V3",
display_name="Image Crop _V3",
category="image/transform",
@@ -105,10 +105,10 @@ class ImageCrop(io.ComfyNodeV3):
return io.NodeOutput(image[:, y:to_y, x:to_x, :])
class ImageFlip(io.ComfyNodeV3):
class ImageFlip(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageFlip_V3",
display_name="Image Flip _V3",
category="image/transform",
@@ -129,10 +129,10 @@ class ImageFlip(io.ComfyNodeV3):
return io.NodeOutput(image)
class ImageFromBatch(io.ComfyNodeV3):
class ImageFromBatch(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageFromBatch_V3",
display_name="Image From Batch _V3",
category="image/batch",
@@ -153,10 +153,10 @@ class ImageFromBatch(io.ComfyNodeV3):
return io.NodeOutput(s)
class ImageRotate(io.ComfyNodeV3):
class ImageRotate(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageRotate_V3",
display_name="Image Rotate _V3",
category="image/transform",
@@ -180,12 +180,12 @@ class ImageRotate(io.ComfyNodeV3):
return io.NodeOutput(torch.rot90(image, k=rotate_by, dims=[2, 1]))
class ImageStitch(io.ComfyNodeV3):
class ImageStitch(io.ComfyNode):
"""Upstreamed from https://github.com/kijai/ComfyUI-KJNodes"""
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageStitch_V3",
display_name="Image Stitch _V3",
description="Stitches image2 to image1 in the specified direction. "
@@ -350,10 +350,10 @@ class ImageStitch(io.ComfyNodeV3):
return io.NodeOutput(torch.cat(images, dim=concat_dim))
class LoadImage(io.ComfyNodeV3):
class LoadImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LoadImage_V3",
display_name="Load Image _V3",
category="image",
@@ -438,10 +438,10 @@ class LoadImage(io.ComfyNodeV3):
return True
class LoadImageOutput(io.ComfyNodeV3):
class LoadImageOutput(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LoadImageOutput_V3",
display_name="Load Image (from Outputs) _V3",
description="Load an image from the output folder. "
@@ -527,10 +527,10 @@ class LoadImageOutput(io.ComfyNodeV3):
return True
class PreviewImage(io.ComfyNodeV3):
class PreviewImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PreviewImage_V3",
display_name="Preview Image _V3",
description="Preview the input images.",
@@ -547,10 +547,10 @@ class PreviewImage(io.ComfyNodeV3):
return io.NodeOutput(ui=ui.PreviewImage(images, cls=cls))
class RepeatImageBatch(io.ComfyNodeV3):
class RepeatImageBatch(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="RepeatImageBatch_V3",
display_name="Repeat Image Batch _V3",
category="image/batch",
@@ -566,10 +566,10 @@ class RepeatImageBatch(io.ComfyNodeV3):
return io.NodeOutput(image.repeat((amount, 1, 1, 1)))
class ResizeAndPadImage(io.ComfyNodeV3):
class ResizeAndPadImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ResizeAndPadImage_V3",
display_name="Resize and Pad Image _V3",
category="image/transform",
@@ -611,10 +611,10 @@ class ResizeAndPadImage(io.ComfyNodeV3):
return io.NodeOutput(padded.permute(0, 2, 3, 1))
class SaveAnimatedPNG(io.ComfyNodeV3):
class SaveAnimatedPNG(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveAnimatedPNG_V3",
display_name="Save Animated PNG _V3",
category="image/animation",
@@ -630,23 +630,23 @@ class SaveAnimatedPNG(io.ComfyNodeV3):
@classmethod
def execute(cls, images, fps, compress_level, filename_prefix="ComfyUI") -> io.NodeOutput:
result = ui.ImageSaveHelper.save_animated_png(
images=images,
filename_prefix=filename_prefix,
folder_type=io.FolderType.output,
cls=cls,
fps=fps,
compress_level=compress_level,
return io.NodeOutput(
ui=ui.ImageSaveHelper.get_save_animated_png_ui(
images=images,
filename_prefix=filename_prefix,
cls=cls,
fps=fps,
compress_level=compress_level,
)
)
return io.NodeOutput(ui={"images": [result], "animated": (len(images) != 1,)})
class SaveAnimatedWEBP(io.ComfyNodeV3):
class SaveAnimatedWEBP(io.ComfyNode):
COMPRESS_METHODS = {"default": 4, "fastest": 0, "slowest": 6}
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveAnimatedWEBP_V3",
display_name="Save Animated WEBP _V3",
category="image/animation",
@@ -664,23 +664,23 @@ class SaveAnimatedWEBP(io.ComfyNodeV3):
@classmethod
def execute(cls, images, fps, filename_prefix, lossless, quality, method) -> io.NodeOutput:
result = ui.ImageSaveHelper.save_animated_webp(
images=images,
filename_prefix=filename_prefix,
folder_type=io.FolderType.output,
cls=cls,
fps=fps,
lossless=lossless,
quality=quality,
method=cls.COMPRESS_METHODS.get(method)
return io.NodeOutput(
ui=ui.ImageSaveHelper.get_save_animated_webp_ui(
images=images,
filename_prefix=filename_prefix,
cls=cls,
fps=fps,
lossless=lossless,
quality=quality,
method=cls.COMPRESS_METHODS.get(method)
)
)
return io.NodeOutput(ui={"images": [result], "animated": (len(images) != 1,)})
class SaveImage(io.ComfyNodeV3):
class SaveImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveImage_V3",
display_name="Save Image _V3",
description="Saves the input images to your ComfyUI output directory.",
@@ -703,17 +703,12 @@ class SaveImage(io.ComfyNodeV3):
@classmethod
def execute(cls, images, filename_prefix="ComfyUI") -> io.NodeOutput:
results = ui.ImageSaveHelper.save_images(
images,
filename_prefix=filename_prefix,
folder_type=io.FolderType.output,
cls=cls,
compress_level=4,
return io.NodeOutput(
ui=ui.ImageSaveHelper.get_save_images_ui(images, filename_prefix=filename_prefix, cls=cls, compress_level=4)
)
return io.NodeOutput(ui={"images": results})
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
GetImageSize,
ImageAddNoise,
ImageCrop,

View File

@@ -17,10 +17,10 @@ def reshape_latent_to(target_shape, latent, repeat_batch=True):
return latent
class LatentAdd(io.ComfyNodeV3):
class LatentAdd(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentAdd_V3",
category="latent/advanced",
inputs=[
@@ -44,10 +44,10 @@ class LatentAdd(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentApplyOperation(io.ComfyNodeV3):
class LatentApplyOperation(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentApplyOperation_V3",
category="latent/advanced/operations",
is_experimental=True,
@@ -69,10 +69,10 @@ class LatentApplyOperation(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentApplyOperationCFG(io.ComfyNodeV3):
class LatentApplyOperationCFG(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentApplyOperationCFG_V3",
category="latent/advanced/operations",
is_experimental=True,
@@ -101,10 +101,10 @@ class LatentApplyOperationCFG(io.ComfyNodeV3):
return io.NodeOutput(m)
class LatentBatch(io.ComfyNodeV3):
class LatentBatch(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentBatch_V3",
category="latent/batch",
inputs=[
@@ -130,10 +130,10 @@ class LatentBatch(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentBatchSeedBehavior(io.ComfyNodeV3):
class LatentBatchSeedBehavior(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentBatchSeedBehavior_V3",
category="latent/advanced",
inputs=[
@@ -159,10 +159,10 @@ class LatentBatchSeedBehavior(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentInterpolate(io.ComfyNodeV3):
class LatentInterpolate(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentInterpolate_V3",
category="latent/advanced",
inputs=[
@@ -198,10 +198,10 @@ class LatentInterpolate(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentMultiply(io.ComfyNodeV3):
class LatentMultiply(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentMultiply_V3",
category="latent/advanced",
inputs=[
@@ -222,10 +222,10 @@ class LatentMultiply(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class LatentOperationSharpen(io.ComfyNodeV3):
class LatentOperationSharpen(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentOperationSharpen_V3",
category="latent/advanced/operations",
is_experimental=True,
@@ -264,10 +264,10 @@ class LatentOperationSharpen(io.ComfyNodeV3):
return io.NodeOutput(sharpen)
class LatentOperationTonemapReinhard(io.ComfyNodeV3):
class LatentOperationTonemapReinhard(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentOperationTonemapReinhard_V3",
category="latent/advanced/operations",
is_experimental=True,
@@ -299,10 +299,10 @@ class LatentOperationTonemapReinhard(io.ComfyNodeV3):
return io.NodeOutput(tonemap_reinhard)
class LatentSubtract(io.ComfyNodeV3):
class LatentSubtract(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentSubtract_V3",
category="latent/advanced",
inputs=[

View File

@@ -86,10 +86,10 @@ def preprocess(image: torch.Tensor, crf=29):
return torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0
class EmptyLTXVLatentVideo(io.ComfyNodeV3):
class EmptyLTXVLatentVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="EmptyLTXVLatentVideo_V3",
category="latent/video/ltxv",
inputs=[
@@ -112,13 +112,13 @@ class EmptyLTXVLatentVideo(io.ComfyNodeV3):
return io.NodeOutput({"samples": latent})
class LTXVAddGuide(io.ComfyNodeV3):
class LTXVAddGuide(io.ComfyNode):
NUM_PREFIX_FRAMES = 2
PATCHIFIER = SymmetricPatchifier(1)
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVAddGuide_V3",
category="conditioning/video_models",
inputs=[
@@ -275,10 +275,10 @@ class LTXVAddGuide(io.ComfyNodeV3):
return latent_image, noise_mask
class LTXVConditioning(io.ComfyNodeV3):
class LTXVConditioning(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVConditioning_V3",
category="conditioning/video_models",
inputs=[
@@ -299,10 +299,10 @@ class LTXVConditioning(io.ComfyNodeV3):
return io.NodeOutput(positive, negative)
class LTXVCropGuides(io.ComfyNodeV3):
class LTXVCropGuides(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVCropGuides_V3",
category="conditioning/video_models",
inputs=[
@@ -335,10 +335,10 @@ class LTXVCropGuides(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
class LTXVImgToVideo(io.ComfyNodeV3):
class LTXVImgToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVImgToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -383,10 +383,10 @@ class LTXVImgToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask})
class LTXVPreprocess(io.ComfyNodeV3):
class LTXVPreprocess(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVPreprocess_V3",
category="image",
inputs=[
@@ -408,10 +408,10 @@ class LTXVPreprocess(io.ComfyNodeV3):
return io.NodeOutput(torch.stack(output_images))
class LTXVScheduler(io.ComfyNodeV3):
class LTXVScheduler(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LTXVScheduler_V3",
category="sampling/custom_sampling/schedulers",
inputs=[
@@ -471,10 +471,10 @@ class LTXVScheduler(io.ComfyNodeV3):
return io.NodeOutput(sigmas)
class ModelSamplingLTXV(io.ComfyNodeV3):
class ModelSamplingLTXV(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ModelSamplingLTXV_V3",
category="advanced/model",
inputs=[

View File

@@ -57,10 +57,10 @@ def composite(destination, source, x, y, mask=None, multiplier=8, resize_source=
return destination
class CropMask(io.ComfyNodeV3):
class CropMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CropMask_V3",
display_name="Crop Mask _V3",
category="mask",
@@ -80,10 +80,10 @@ class CropMask(io.ComfyNodeV3):
return io.NodeOutput(mask[:, y : y + height, x : x + width])
class FeatherMask(io.ComfyNodeV3):
class FeatherMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="FeatherMask_V3",
display_name="Feather Mask _V3",
category="mask",
@@ -125,10 +125,10 @@ class FeatherMask(io.ComfyNodeV3):
return io.NodeOutput(output)
class GrowMask(io.ComfyNodeV3):
class GrowMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="GrowMask_V3",
display_name="Grow Mask _V3",
category="mask",
@@ -158,16 +158,16 @@ class GrowMask(io.ComfyNodeV3):
return io.NodeOutput(torch.stack(out, dim=0))
class ImageColorToMask(io.ComfyNodeV3):
class ImageColorToMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageColorToMask_V3",
display_name="Image Color to Mask _V3",
category="mask",
inputs=[
io.Image.Input("image"),
io.Int.Input("color", default=0, min=0, max=0xFFFFFF, display_mode=io.NumberDisplay.color),
io.Int.Input("color", default=0, min=0, max=0xFFFFFF),
],
outputs=[io.Mask.Output()],
)
@@ -183,10 +183,10 @@ class ImageColorToMask(io.ComfyNodeV3):
return io.NodeOutput(torch.where(temp == color, 1.0, 0).float())
class ImageCompositeMasked(io.ComfyNodeV3):
class ImageCompositeMasked(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageCompositeMasked_V3",
display_name="Image Composite Masked _V3",
category="image",
@@ -209,12 +209,12 @@ class ImageCompositeMasked(io.ComfyNodeV3):
return io.NodeOutput(output)
class ImageToMask(io.ComfyNodeV3):
class ImageToMask(io.ComfyNode):
CHANNELS = ["red", "green", "blue", "alpha"]
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageToMask_V3",
display_name="Convert Image to Mask _V3",
category="mask",
@@ -230,10 +230,10 @@ class ImageToMask(io.ComfyNodeV3):
return io.NodeOutput(image[:, :, :, cls.CHANNELS.index(channel)])
class InvertMask(io.ComfyNodeV3):
class InvertMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="InvertMask_V3",
display_name="Invert Mask _V3",
category="mask",
@@ -248,10 +248,10 @@ class InvertMask(io.ComfyNodeV3):
return io.NodeOutput(1.0 - mask)
class LatentCompositeMasked(io.ComfyNodeV3):
class LatentCompositeMasked(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="LatentCompositeMasked_V3",
display_name="Latent Composite Masked _V3",
category="latent",
@@ -275,10 +275,10 @@ class LatentCompositeMasked(io.ComfyNodeV3):
return io.NodeOutput(output)
class MaskComposite(io.ComfyNodeV3):
class MaskComposite(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="MaskComposite_V3",
display_name="Mask Composite _V3",
category="mask",
@@ -335,7 +335,7 @@ class MaskComposite(io.ComfyNodeV3):
return io.NodeOutput(torch.clamp(output, 0.0, 1.0))
class MaskPreview(io.ComfyNodeV3):
class MaskPreview(io.ComfyNode):
"""Mask Preview - original implement in ComfyUI_essentials.
https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81
@@ -344,7 +344,7 @@ class MaskPreview(io.ComfyNodeV3):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="MaskPreview_V3",
display_name="Preview Mask _V3",
category="mask",
@@ -360,10 +360,10 @@ class MaskPreview(io.ComfyNodeV3):
return io.NodeOutput(ui=ui.PreviewMask(masks))
class MaskToImage(io.ComfyNodeV3):
class MaskToImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="MaskToImage_V3",
display_name="Convert Mask to Image _V3",
category="mask",
@@ -378,10 +378,10 @@ class MaskToImage(io.ComfyNodeV3):
return io.NodeOutput(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3))
class SolidMask(io.ComfyNodeV3):
class SolidMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SolidMask_V3",
display_name="Solid Mask _V3",
category="mask",
@@ -398,10 +398,10 @@ class SolidMask(io.ComfyNodeV3):
return io.NodeOutput(torch.full((1, height, width), value, dtype=torch.float32, device="cpu"))
class ThresholdMask(io.ComfyNodeV3):
class ThresholdMask(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ThresholdMask_V3",
display_name="Threshold Mask _V3",
category="mask",
@@ -417,7 +417,7 @@ class ThresholdMask(io.ComfyNodeV3):
return io.NodeOutput((mask > value).float())
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
CropMask,
FeatherMask,
GrowMask,

View File

@@ -0,0 +1,38 @@
from __future__ import annotations
import torch
import comfy.model_management
import nodes
from comfy_api.v3 import io
class EmptyMochiLatentVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="EmptyMochiLatentVideo_V3",
category="latent/video",
inputs=[
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("length", default=25, min=7, max=nodes.MAX_RESOLUTION, step=6),
io.Int.Input("batch_size", default=1, min=1, max=4096),
],
outputs=[
io.Latent.Output(),
],
)
@classmethod
def execute(cls, width, height, length, batch_size=1):
latent = torch.zeros(
[batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8],
device=comfy.model_management.intermediate_device(),
)
return io.NodeOutput({"samples": latent})
NODES_LIST = [
EmptyMochiLatentVideo,
]

View File

@@ -0,0 +1,387 @@
from __future__ import annotations
import torch
import comfy.latent_formats
import comfy.model_sampling
import comfy.sd
import node_helpers
import nodes
from comfy_api.v3 import io
class LCM(comfy.model_sampling.EPS):
def calculate_denoised(self, sigma, model_output, model_input):
timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
x0 = model_input - model_output * sigma
sigma_data = 0.5
scaled_timestep = timestep * 10.0 #timestep_scaling
c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5
return c_out * x0 + c_skip * model_input
class ModelSamplingDiscreteDistilled(comfy.model_sampling.ModelSamplingDiscrete):
original_timesteps = 50
def __init__(self, model_config=None, zsnr=None):
super().__init__(model_config, zsnr=zsnr)
self.skip_steps = self.num_timesteps // self.original_timesteps
sigmas_valid = torch.zeros((self.original_timesteps), dtype=torch.float32)
for x in range(self.original_timesteps):
sigmas_valid[self.original_timesteps - 1 - x] = self.sigmas[self.num_timesteps - 1 - x * self.skip_steps]
self.set_sigmas(sigmas_valid)
def timestep(self, sigma):
log_sigma = sigma.log()
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device)
def sigma(self, timestep):
t = torch.clamp(
((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(),
min=0,
max=(len(self.sigmas) - 1),
)
low_idx = t.floor().long()
high_idx = t.ceil().long()
w = t.frac()
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
return log_sigma.exp().to(timestep.device)
class ModelComputeDtype(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelComputeDtype_V3",
category="advanced/debug/model",
inputs=[
io.Model.Input("model"),
io.Combo.Input("dtype", options=["default", "fp32", "fp16", "bf16"]),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, dtype):
m = model.clone()
m.set_model_compute_dtype(node_helpers.string_to_torch_dtype(dtype))
return io.NodeOutput(m)
class ModelSamplingContinuousEDM(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingContinuousEDM_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Combo.Input(
"sampling", options=["v_prediction", "edm", "edm_playground_v2.5", "eps", "cosmos_rflow"]
),
io.Float.Input("sigma_max", default=120.0, min=0.0, max=1000.0, step=0.001, round=False),
io.Float.Input("sigma_min", default=0.002, min=0.0, max=1000.0, step=0.001, round=False),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, sampling, sigma_max, sigma_min):
m = model.clone()
sampling_base = comfy.model_sampling.ModelSamplingContinuousEDM
latent_format = None
sigma_data = 1.0
if sampling == "eps":
sampling_type = comfy.model_sampling.EPS
elif sampling == "edm":
sampling_type = comfy.model_sampling.EDM
sigma_data = 0.5
elif sampling == "v_prediction":
sampling_type = comfy.model_sampling.V_PREDICTION
elif sampling == "edm_playground_v2.5":
sampling_type = comfy.model_sampling.EDM
sigma_data = 0.5
latent_format = comfy.latent_formats.SDXL_Playground_2_5()
elif sampling == "cosmos_rflow":
sampling_type = comfy.model_sampling.COSMOS_RFLOW
sampling_base = comfy.model_sampling.ModelSamplingCosmosRFlow
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
m.add_object_patch("model_sampling", model_sampling)
if latent_format is not None:
m.add_object_patch("latent_format", latent_format)
return io.NodeOutput(m)
class ModelSamplingContinuousV(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingContinuousV_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Combo.Input("sampling", options=["v_prediction"]),
io.Float.Input("sigma_max", default=500.0, min=0.0, max=1000.0, step=0.001, round=False),
io.Float.Input("sigma_min", default=0.03, min=0.0, max=1000.0, step=0.001, round=False),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, sampling, sigma_max, sigma_min):
m = model.clone()
sigma_data = 1.0
if sampling == "v_prediction":
sampling_type = comfy.model_sampling.V_PREDICTION
class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousV, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
m.add_object_patch("model_sampling", model_sampling)
return io.NodeOutput(m)
class ModelSamplingDiscrete(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingDiscrete_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Combo.Input("sampling", options=["eps", "v_prediction", "lcm", "x0", "img_to_img"]),
io.Boolean.Input("zsnr", default=False),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, sampling, zsnr):
m = model.clone()
sampling_base = comfy.model_sampling.ModelSamplingDiscrete
if sampling == "eps":
sampling_type = comfy.model_sampling.EPS
elif sampling == "v_prediction":
sampling_type = comfy.model_sampling.V_PREDICTION
elif sampling == "lcm":
sampling_type = LCM
sampling_base = ModelSamplingDiscreteDistilled
elif sampling == "x0":
sampling_type = comfy.model_sampling.X0
elif sampling == "img_to_img":
sampling_type = comfy.model_sampling.IMG_TO_IMG
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config, zsnr=zsnr)
m.add_object_patch("model_sampling", model_sampling)
return io.NodeOutput(m)
class ModelSamplingFlux(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingFlux_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Float.Input("max_shift", default=1.15, min=0.0, max=100.0, step=0.01),
io.Float.Input("base_shift", default=0.5, min=0.0, max=100.0, step=0.01),
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8),
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, max_shift, base_shift, width, height):
m = model.clone()
x1 = 256
x2 = 4096
mm = (max_shift - base_shift) / (x2 - x1)
b = base_shift - mm * x1
shift = (width * height / (8 * 8 * 2 * 2)) * mm + b
sampling_base = comfy.model_sampling.ModelSamplingFlux
sampling_type = comfy.model_sampling.CONST
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
model_sampling.set_parameters(shift=shift)
m.add_object_patch("model_sampling", model_sampling)
return io.NodeOutput(m)
class ModelSamplingSD3(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingSD3_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Float.Input("shift", default=3.0, min=0.0, max=100.0, step=0.01),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, shift, multiplier: int | float = 1000):
m = model.clone()
sampling_base = comfy.model_sampling.ModelSamplingDiscreteFlow
sampling_type = comfy.model_sampling.CONST
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
model_sampling.set_parameters(shift=shift, multiplier=multiplier)
m.add_object_patch("model_sampling", model_sampling)
return io.NodeOutput(m)
class ModelSamplingAuraFlow(ModelSamplingSD3):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingAuraFlow_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Float.Input("shift", default=1.73, min=0.0, max=100.0, step=0.01),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, shift, multiplier: int | float = 1.0):
return super().execute(model, shift, multiplier)
class ModelSamplingStableCascade(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ModelSamplingStableCascade_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Float.Input("shift", default=2.0, min=0.0, max=100.0, step=0.01),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, shift):
m = model.clone()
sampling_base = comfy.model_sampling.StableCascadeSampling
sampling_type = comfy.model_sampling.EPS
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
model_sampling.set_parameters(shift)
m.add_object_patch("model_sampling", model_sampling)
return io.NodeOutput(m)
class RescaleCFG(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="RescaleCFG_V3",
category="advanced/model",
inputs=[
io.Model.Input("model"),
io.Float.Input("multiplier", default=0.7, min=0.0, max=1.0, step=0.01),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(cls, model, multiplier):
def rescale_cfg(args):
cond = args["cond"]
uncond = args["uncond"]
cond_scale = args["cond_scale"]
sigma = args["sigma"]
sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
x_orig = args["input"]
#rescale cfg has to be done on v-pred model output
x = x_orig / (sigma * sigma + 1.0)
cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
#rescalecfg
x_cfg = uncond + cond_scale * (cond - uncond)
ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True)
x_rescaled = x_cfg * (ro_pos / ro_cfg)
x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)
m = model.clone()
m.set_model_sampler_cfg_function(rescale_cfg)
return io.NodeOutput(m)
NODES_LIST = [
ModelSamplingAuraFlow,
ModelComputeDtype,
ModelSamplingContinuousEDM,
ModelSamplingContinuousV,
ModelSamplingDiscrete,
ModelSamplingFlux,
ModelSamplingSD3,
ModelSamplingStableCascade,
RescaleCFG,
]

View File

@@ -0,0 +1,68 @@
from __future__ import annotations
import comfy.utils
from comfy_api.v3 import io
class PatchModelAddDownscale(io.ComfyNode):
UPSCALE_METHODS = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
@classmethod
def define_schema(cls):
return io.Schema(
node_id="PatchModelAddDownscale_V3",
display_name="PatchModelAddDownscale (Kohya Deep Shrink) _V3",
category="model_patches/unet",
inputs=[
io.Model.Input("model"),
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
io.Float.Input("downscale_factor", default=2.0, min=0.1, max=9.0, step=0.001),
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
io.Float.Input("end_percent", default=0.35, min=0.0, max=1.0, step=0.001),
io.Boolean.Input("downscale_after_skip", default=True),
io.Combo.Input("downscale_method", options=cls.UPSCALE_METHODS),
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
],
outputs=[
io.Model.Output(),
],
)
@classmethod
def execute(
cls, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method
):
model_sampling = model.get_model_object("model_sampling")
sigma_start = model_sampling.percent_to_sigma(start_percent)
sigma_end = model_sampling.percent_to_sigma(end_percent)
def input_block_patch(h, transformer_options):
if transformer_options["block"][1] == block_number:
sigma = transformer_options["sigmas"][0].item()
if sigma <= sigma_start and sigma >= sigma_end:
h = comfy.utils.common_upscale(
h,
round(h.shape[-1] * (1.0 / downscale_factor)),
round(h.shape[-2] * (1.0 / downscale_factor)),
downscale_method,
"disabled",
)
return h
def output_block_patch(h, hsp, transformer_options):
if h.shape[2] != hsp.shape[2]:
h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled")
return h, hsp
m = model.clone()
if downscale_after_skip:
m.set_model_input_block_patch_after_skip(input_block_patch)
else:
m.set_model_input_block_patch(input_block_patch)
m.set_model_output_block_patch(output_block_patch)
return io.NodeOutput(m)
NODES_LIST = [
PatchModelAddDownscale,
]

View File

@@ -16,10 +16,10 @@ import comfy.model_management
from comfy_api.v3 import io
class ImageRGBToYUV(io.ComfyNodeV3):
class ImageRGBToYUV(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageRGBToYUV_V3",
category="image/batch",
inputs=[
@@ -38,10 +38,10 @@ class ImageRGBToYUV(io.ComfyNodeV3):
return io.NodeOutput(out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image))
class ImageYUVToRGB(io.ComfyNodeV3):
class ImageYUVToRGB(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="ImageYUVToRGB_V3",
category="image/batch",
inputs=[
@@ -60,10 +60,10 @@ class ImageYUVToRGB(io.ComfyNodeV3):
return io.NodeOutput(kornia.color.ycbcr_to_rgb(image.movedim(-1, 1)).movedim(1, -1))
class Morphology(io.ComfyNodeV3):
class Morphology(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="Morphology_V3",
display_name="ImageMorphology _V3",
category="image/postprocessing",

View File

@@ -26,10 +26,10 @@ NOISE_LEVELS = {
}
class OptimalStepsScheduler(io.ComfyNodeV3):
class OptimalStepsScheduler(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="OptimalStepsScheduler_V3",
category="sampling/custom_sampling/schedulers",
inputs=[

View File

@@ -10,10 +10,10 @@ from comfy_api.v3 import io
#My modified one here is more basic but has fewer chances of breaking with ComfyUI updates.
class PerturbedAttentionGuidance(io.ComfyNodeV3):
class PerturbedAttentionGuidance(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PerturbedAttentionGuidance_V3",
category="model_patches/unet",
inputs=[

View File

@@ -81,10 +81,10 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider):
return cfg_result
class PerpNegGuider(io.ComfyNodeV3):
class PerpNegGuider(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PerpNegGuider_V3",
category="_for_testing",
inputs=[

View File

@@ -0,0 +1,205 @@
from __future__ import annotations
import torch
import torch.nn as nn
import comfy.clip_model
import comfy.clip_vision
import comfy.model_management
import comfy.ops
import comfy.utils
import folder_paths
from comfy_api.v3 import io
# code for model from:
# https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0
VISION_CONFIG_DICT = {
"hidden_size": 1024,
"image_size": 224,
"intermediate_size": 4096,
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"patch_size": 14,
"projection_dim": 768,
"hidden_act": "quick_gelu",
"model_type": "clip_vision_model",
}
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=comfy.ops):
super().__init__()
if use_residual:
assert in_dim == out_dim
self.layernorm = operations.LayerNorm(in_dim)
self.fc1 = operations.Linear(in_dim, hidden_dim)
self.fc2 = operations.Linear(hidden_dim, out_dim)
self.use_residual = use_residual
self.act_fn = nn.GELU()
def forward(self, x):
residual = x
x = self.layernorm(x)
x = self.fc1(x)
x = self.act_fn(x)
x = self.fc2(x)
if self.use_residual:
x = x + residual
return x
class FuseModule(nn.Module):
def __init__(self, embed_dim, operations):
super().__init__()
self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations)
self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations)
self.layer_norm = operations.LayerNorm(embed_dim)
def fuse_fn(self, prompt_embeds, id_embeds):
stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
stacked_id_embeds = self.mlp2(stacked_id_embeds)
stacked_id_embeds = self.layer_norm(stacked_id_embeds)
return stacked_id_embeds
def forward(
self,
prompt_embeds,
id_embeds,
class_tokens_mask,
) -> torch.Tensor:
# id_embeds shape: [b, max_num_inputs, 1, 2048]
id_embeds = id_embeds.to(prompt_embeds.dtype)
num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
batch_size, max_num_inputs = id_embeds.shape[:2]
# seq_length: 77
seq_length = prompt_embeds.shape[1]
# flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
flat_id_embeds = id_embeds.view(
-1, id_embeds.shape[-2], id_embeds.shape[-1]
)
# valid_id_mask [b*max_num_inputs]
valid_id_mask = (
torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
< num_inputs[:, None]
)
valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
class_tokens_mask = class_tokens_mask.view(-1)
valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
# slice out the image token embeddings
image_token_embeds = prompt_embeds[class_tokens_mask]
stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
return prompt_embeds.view(batch_size, seq_length, -1)
class PhotoMakerIDEncoder(comfy.clip_model.CLIPVisionModelProjection):
def __init__(self):
self.load_device = comfy.model_management.text_encoder_device()
offload_device = comfy.model_management.text_encoder_offload_device()
dtype = comfy.model_management.text_encoder_dtype(self.load_device)
super().__init__(VISION_CONFIG_DICT, dtype, offload_device, comfy.ops.manual_cast)
self.visual_projection_2 = comfy.ops.manual_cast.Linear(1024, 1280, bias=False)
self.fuse_module = FuseModule(2048, comfy.ops.manual_cast)
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
b, num_inputs, c, h, w = id_pixel_values.shape
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
shared_id_embeds = self.vision_model(id_pixel_values)[2]
id_embeds = self.visual_projection(shared_id_embeds)
id_embeds_2 = self.visual_projection_2(shared_id_embeds)
id_embeds = id_embeds.view(b, num_inputs, 1, -1)
id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
return self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
class PhotoMakerEncode(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="PhotoMakerEncode_V3",
category="_for_testing/photomaker",
inputs=[
io.Photomaker.Input("photomaker"),
io.Image.Input("image"),
io.Clip.Input("clip"),
io.String.Input("text", multiline=True, dynamic_prompts=True, default="photograph of photomaker"),
],
outputs=[
io.Conditioning.Output(),
],
is_experimental=True,
)
@classmethod
def execute(cls, photomaker, image, clip, text):
special_token = "photomaker"
pixel_values = comfy.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float()
try:
index = text.split(" ").index(special_token) + 1
except ValueError:
index = -1
tokens = clip.tokenize(text, return_word_ids=True)
out_tokens = {}
for k in tokens:
out_tokens[k] = []
for t in tokens[k]:
f = list(filter(lambda x: x[2] != index, t))
while len(f) < len(t):
f.append(t[-1])
out_tokens[k].append(f)
cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True)
if index > 0:
token_index = index - 1
num_id_images = 1
class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)]
out = photomaker(
id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device),
class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0),
)
else:
out = cond
return io.NodeOutput([[out, {"pooled_output": pooled}]])
class PhotoMakerLoader(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="PhotoMakerLoader_V3",
category="_for_testing/photomaker",
inputs=[
io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")),
],
outputs=[
io.Photomaker.Output(),
],
is_experimental=True,
)
@classmethod
def execute(cls, photomaker_model_name):
photomaker_model_path = folder_paths.get_full_path_or_raise("photomaker", photomaker_model_name)
photomaker_model = PhotoMakerIDEncoder()
data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True)
if "id_encoder" in data:
data = data["id_encoder"]
photomaker_model.load_state_dict(data)
return io.NodeOutput(photomaker_model)
NODES_LIST = [
PhotoMakerEncode,
PhotoMakerLoader,
]

View File

@@ -0,0 +1,33 @@
from __future__ import annotations
import nodes
from comfy_api.v3 import io
class CLIPTextEncodePixArtAlpha(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="CLIPTextEncodePixArtAlpha_V3",
category="advanced/conditioning",
description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
inputs=[
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.String.Input("text", multiline=True, dynamic_prompts=True),
io.Clip.Input("clip"),
],
outputs=[
io.Conditioning.Output(),
],
)
@classmethod
def execute(cls, width, height, text, clip):
tokens = clip.tokenize(text)
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}))
NODES_LIST = [
CLIPTextEncodePixArtAlpha,
]

View File

@@ -0,0 +1,255 @@
from __future__ import annotations
import math
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
import comfy.model_management
import comfy.utils
import node_helpers
from comfy_api.v3 import io
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size, device=device), torch.linspace(-1, 1, kernel_size, device=device), indexing="ij")
d = torch.sqrt(x * x + y * y)
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
return g / g.sum()
class Blend(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageBlend_V3",
category="image/postprocessing",
inputs=[
io.Image.Input("image1"),
io.Image.Input("image2"),
io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01),
io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
image1, image2 = node_helpers.image_alpha_fix(image1, image2)
image2 = image2.to(image1.device)
if image1.shape != image2.shape:
image2 = image2.permute(0, 3, 1, 2)
image2 = comfy.utils.common_upscale(
image2, image1.shape[2], image1.shape[1], upscale_method="bicubic", crop="center"
)
image2 = image2.permute(0, 2, 3, 1)
blended_image = cls.blend_mode(image1, image2, blend_mode)
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
blended_image = torch.clamp(blended_image, 0, 1)
return io.NodeOutput(blended_image)
@classmethod
def blend_mode(cls, img1, img2, mode):
if mode == "normal":
return img2
elif mode == "multiply":
return img1 * img2
elif mode == "screen":
return 1 - (1 - img1) * (1 - img2)
elif mode == "overlay":
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
elif mode == "soft_light":
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1))
elif mode == "difference":
return img1 - img2
raise ValueError(f"Unsupported blend mode: {mode}")
@classmethod
def g(cls, x):
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
class Blur(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageBlur_V3",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float):
if blur_radius == 0:
return io.NodeOutput(image)
image = image.to(comfy.model_management.get_torch_device())
batch_size, height, width, channels = image.shape
kernel_size = blur_radius * 2 + 1
kernel = gaussian_kernel(kernel_size, sigma, device=image.device).repeat(channels, 1, 1).unsqueeze(1)
image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
padded_image = F.pad(image, (blur_radius,blur_radius,blur_radius,blur_radius), "reflect")
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
blurred = blurred.permute(0, 2, 3, 1)
return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device()))
class ImageScaleToTotalPixels(io.ComfyNode):
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
crop_methods = ["disabled", "center"]
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageScaleToTotalPixels_V3",
category="image/upscaling",
inputs=[
io.Image.Input("image"),
io.Combo.Input("upscale_method", options=cls.upscale_methods),
io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def execute(cls, image, upscale_method, megapixels):
samples = image.movedim(-1,1)
total = int(megapixels * 1024 * 1024)
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
width = round(samples.shape[3] * scale_by)
height = round(samples.shape[2] * scale_by)
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
return io.NodeOutput(s.movedim(1,-1))
class Quantize(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageQuantize_V3",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("colors", default=256, min=1, max=256, step=1),
io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]),
],
outputs=[
io.Image.Output(),
],
)
@staticmethod
def bayer(im, pal_im, order):
def normalized_bayer_matrix(n):
if n == 0:
return np.zeros((1,1), "float32")
q = 4 ** n
m = q * normalized_bayer_matrix(n - 1)
return np.bmat(((m-1.5, m+0.5), (m+1.5, m-0.5))) / q
num_colors = len(pal_im.getpalette()) // 3
spread = 2 * 256 / num_colors
bayer_n = int(math.log2(order))
bayer_matrix = torch.from_numpy(spread * normalized_bayer_matrix(bayer_n) + 0.5)
result = torch.from_numpy(np.array(im).astype(np.float32))
tw = math.ceil(result.shape[0] / bayer_matrix.shape[0])
th = math.ceil(result.shape[1] / bayer_matrix.shape[1])
tiled_matrix = bayer_matrix.tile(tw, th).unsqueeze(-1)
result.add_(tiled_matrix[:result.shape[0],:result.shape[1]]).clamp_(0, 255)
result = result.to(dtype=torch.uint8)
im = Image.fromarray(result.cpu().numpy())
return im.quantize(palette=pal_im, dither=Image.Dither.NONE)
@classmethod
def execute(cls, image: torch.Tensor, colors: int, dither: str):
batch_size, height, width, _ = image.shape
result = torch.zeros_like(image)
for b in range(batch_size):
im = Image.fromarray((image[b] * 255).to(torch.uint8).numpy(), mode='RGB')
pal_im = im.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836
if dither == "none":
quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
elif dither == "floyd-steinberg":
quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.FLOYDSTEINBERG)
elif dither.startswith("bayer"):
order = int(dither.split('-')[-1])
quantized_image = cls.bayer(im, pal_im, order)
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
result[b] = quantized_array
return io.NodeOutput(result)
class Sharpen(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageSharpen_V3",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01),
io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float):
if sharpen_radius == 0:
return io.NodeOutput(image)
batch_size, height, width, channels = image.shape
image = image.to(comfy.model_management.get_torch_device())
kernel_size = sharpen_radius * 2 + 1
kernel = gaussian_kernel(kernel_size, sigma, device=image.device) * -(alpha*10)
center = kernel_size // 2
kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0
kernel = kernel.repeat(channels, 1, 1).unsqueeze(1)
tensor_image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
tensor_image = F.pad(tensor_image, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), "reflect")
sharpened = F.conv2d(tensor_image, kernel, padding=center, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
sharpened = sharpened.permute(0, 2, 3, 1)
result = torch.clamp(sharpened, 0, 1)
return io.NodeOutput(result.to(comfy.model_management.intermediate_device()))
NODES_LIST = [
Blend,
Blur,
ImageScaleToTotalPixels,
Quantize,
Sharpen,
]

View File

@@ -5,14 +5,14 @@ import json
from comfy_api.v3 import io, ui
class PreviewAny(io.ComfyNodeV3):
class PreviewAny(io.ComfyNode):
"""Originally implement from https://github.com/rgthree/rgthree-comfy/blob/main/py/display_any.py
upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes"""
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PreviewAny_V3", # frontend expects "PreviewAny" to work
display_name="Preview Any _V3", # frontend ignores "display_name" for this node
description="Preview any type of data by converting it to a readable text format.",
@@ -42,6 +42,6 @@ class PreviewAny(io.ComfyNodeV3):
return io.NodeOutput(ui=ui.PreviewText(value))
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
PreviewAny,
]

View File

@@ -5,10 +5,10 @@ import sys
from comfy_api.v3 import io
class String(io.ComfyNodeV3):
class String(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PrimitiveString_V3",
display_name="String _V3",
category="utils/primitive",
@@ -23,10 +23,10 @@ class String(io.ComfyNodeV3):
return io.NodeOutput(value)
class StringMultiline(io.ComfyNodeV3):
class StringMultiline(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PrimitiveStringMultiline_V3",
display_name="String (Multiline) _V3",
category="utils/primitive",
@@ -41,10 +41,10 @@ class StringMultiline(io.ComfyNodeV3):
return io.NodeOutput(value)
class Int(io.ComfyNodeV3):
class Int(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PrimitiveInt_V3",
display_name="Int _V3",
category="utils/primitive",
@@ -59,10 +59,10 @@ class Int(io.ComfyNodeV3):
return io.NodeOutput(value)
class Float(io.ComfyNodeV3):
class Float(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PrimitiveFloat_V3",
display_name="Float _V3",
category="utils/primitive",
@@ -77,10 +77,10 @@ class Float(io.ComfyNodeV3):
return io.NodeOutput(value)
class Boolean(io.ComfyNodeV3):
class Boolean(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="PrimitiveBoolean_V3",
display_name="Boolean _V3",
category="utils/primitive",
@@ -95,7 +95,7 @@ class Boolean(io.ComfyNodeV3):
return io.NodeOutput(value)
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
String,
StringMultiline,
Int,

View File

@@ -5,10 +5,10 @@ import torch
from comfy_api.v3 import io
class ImageRebatch(io.ComfyNodeV3):
class ImageRebatch(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="RebatchImages_V3",
display_name="Rebatch Images _V3",
category="image/batch",
@@ -38,10 +38,10 @@ class ImageRebatch(io.ComfyNodeV3):
return io.NodeOutput(output_list)
class LatentRebatch(io.ComfyNodeV3):
class LatentRebatch(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="RebatchLatents_V3",
display_name="Rebatch Latents _V3",
category="latent/batch",

View File

@@ -111,10 +111,10 @@ def gaussian_blur_2d(img, kernel_size, sigma):
return F.conv2d(img, kernel2d, groups=img.shape[-3])
class SelfAttentionGuidance(io.ComfyNodeV3):
class SelfAttentionGuidance(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SelfAttentionGuidance_V3",
display_name="Self-Attention Guidance _V3",
category="_for_testing",

View File

@@ -10,10 +10,10 @@ from comfy_api.v3 import io
from comfy_extras.v3.nodes_slg import SkipLayerGuidanceDiT
class CLIPTextEncodeSD3(io.ComfyNodeV3):
class CLIPTextEncodeSD3(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CLIPTextEncodeSD3_V3",
category="advanced/conditioning",
inputs=[
@@ -54,10 +54,10 @@ class CLIPTextEncodeSD3(io.ComfyNodeV3):
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
class EmptySD3LatentImage(io.ComfyNodeV3):
class EmptySD3LatentImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="EmptySD3LatentImage_V3",
category="latent/sd3",
inputs=[
@@ -86,7 +86,7 @@ class SkipLayerGuidanceSD3(SkipLayerGuidanceDiT):
"""
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SkipLayerGuidanceSD3_V3",
category="advanced/guidance",
inputs=[
@@ -109,10 +109,10 @@ class SkipLayerGuidanceSD3(SkipLayerGuidanceDiT):
)
class TripleCLIPLoader(io.ComfyNodeV3):
class TripleCLIPLoader(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="TripleCLIPLoader_V3",
category="advanced/loaders",
description="[Recipes]\n\nsd3: clip-l, clip-g, t5",

View File

@@ -6,10 +6,10 @@ import comfy.utils
from comfy_api.v3 import io
class SD_4XUpscale_Conditioning(io.ComfyNodeV3):
class SD_4XUpscale_Conditioning(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SD_4XUpscale_Conditioning_V3",
category="conditioning/upscale_diffusion",
inputs=[

View File

@@ -7,7 +7,7 @@ import comfy.samplers
from comfy_api.v3 import io
class SkipLayerGuidanceDiT(io.ComfyNodeV3):
class SkipLayerGuidanceDiT(io.ComfyNode):
"""
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
@@ -16,7 +16,7 @@ class SkipLayerGuidanceDiT(io.ComfyNodeV3):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SkipLayerGuidanceDiT_V3",
category="advanced/guidance",
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
@@ -92,12 +92,12 @@ class SkipLayerGuidanceDiT(io.ComfyNodeV3):
return io.NodeOutput(m)
class SkipLayerGuidanceDiTSimple(io.ComfyNodeV3):
class SkipLayerGuidanceDiTSimple(io.ComfyNode):
"""Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass."""
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SkipLayerGuidanceDiTSimple_V3",
category="advanced/guidance",
description="Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.",

View File

@@ -23,10 +23,10 @@ import nodes
from comfy_api.v3 import io
class StableCascade_EmptyLatentImage(io.ComfyNodeV3):
class StableCascade_EmptyLatentImage(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="StableCascade_EmptyLatentImage_V3",
category="latent/stable_cascade",
inputs=[
@@ -48,10 +48,10 @@ class StableCascade_EmptyLatentImage(io.ComfyNodeV3):
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
class StableCascade_StageC_VAEEncode(io.ComfyNodeV3):
class StableCascade_StageC_VAEEncode(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="StableCascade_StageC_VAEEncode_V3",
category="latent/stable_cascade",
inputs=[
@@ -79,10 +79,10 @@ class StableCascade_StageC_VAEEncode(io.ComfyNodeV3):
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
class StableCascade_StageB_Conditioning(io.ComfyNodeV3):
class StableCascade_StageB_Conditioning(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="StableCascade_StageB_Conditioning_V3",
category="conditioning/stable_cascade",
inputs=[
@@ -105,10 +105,10 @@ class StableCascade_StageB_Conditioning(io.ComfyNodeV3):
return io.NodeOutput(c)
class StableCascade_SuperResolutionControlnet(io.ComfyNodeV3):
class StableCascade_SuperResolutionControlnet(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="StableCascade_SuperResolutionControlnet_V3",
category="_for_testing/stable_cascade",
is_experimental=True,
@@ -135,7 +135,7 @@ class StableCascade_SuperResolutionControlnet(io.ComfyNodeV3):
return io.NodeOutput(controlnet_input, {"samples": c_latent}, {"samples": b_latent})
NODES_LIST: list[type[io.ComfyNodeV3]] = [
NODES_LIST: list[type[io.ComfyNode]] = [
StableCascade_EmptyLatentImage,
StableCascade_StageB_Conditioning,
StableCascade_StageC_VAEEncode,

View File

@@ -15,10 +15,10 @@ from comfy_api.util import VideoCodec, VideoComponents, VideoContainer
from comfy_api.v3 import io, ui
class CreateVideo(io.ComfyNodeV3):
class CreateVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="CreateVideo_V3",
display_name="Create Video _V3",
category="image/video",
@@ -44,10 +44,10 @@ class CreateVideo(io.ComfyNodeV3):
))
class GetVideoComponents(io.ComfyNodeV3):
class GetVideoComponents(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="GetVideoComponents_V3",
display_name="Get Video Components _V3",
category="image/video",
@@ -68,13 +68,13 @@ class GetVideoComponents(io.ComfyNodeV3):
return io.NodeOutput(components.images, components.audio, float(components.frame_rate))
class LoadVideo(io.ComfyNodeV3):
class LoadVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
files = folder_paths.filter_files_content_types(files, ["video"])
return io.SchemaV3(
return io.Schema(
node_id="LoadVideo_V3",
display_name="Load Video _V3",
category="image/video",
@@ -105,10 +105,10 @@ class LoadVideo(io.ComfyNodeV3):
return True
class SaveVideo(io.ComfyNodeV3):
class SaveVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveVideo_V3",
display_name="Save Video _V3",
category="image/video",
@@ -152,10 +152,10 @@ class SaveVideo(io.ComfyNodeV3):
return io.NodeOutput(ui=ui.PreviewVideo([ui.SavedResult(file, subfolder, io.FolderType.output)]))
class SaveWEBM(io.ComfyNodeV3):
class SaveWEBM(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="SaveWEBM_V3",
category="image/video",
is_experimental=True,

View File

@@ -11,10 +11,10 @@ import nodes
from comfy_api.v3 import io
class TrimVideoLatent(io.ComfyNodeV3):
class TrimVideoLatent(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="TrimVideoLatent_V3",
category="latent/video",
is_experimental=True,
@@ -36,10 +36,10 @@ class TrimVideoLatent(io.ComfyNodeV3):
return io.NodeOutput(samples_out)
class WanCameraImageToVideo(io.ComfyNodeV3):
class WanCameraImageToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanCameraImageToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -88,10 +88,10 @@ class WanCameraImageToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, out_latent)
class WanFirstLastFrameToVideo(io.ComfyNodeV3):
class WanFirstLastFrameToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanFirstLastFrameToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -159,10 +159,10 @@ class WanFirstLastFrameToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, out_latent)
class WanFunControlToVideo(io.ComfyNodeV3):
class WanFunControlToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanFunControlToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -213,10 +213,10 @@ class WanFunControlToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, out_latent)
class WanFunInpaintToVideo(io.ComfyNodeV3):
class WanFunInpaintToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanFunInpaintToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -244,10 +244,10 @@ class WanFunInpaintToVideo(io.ComfyNodeV3):
return flfv.execute(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output)
class WanImageToVideo(io.ComfyNodeV3):
class WanImageToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanImageToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -292,10 +292,10 @@ class WanImageToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, negative, out_latent)
class WanPhantomSubjectToVideo(io.ComfyNodeV3):
class WanPhantomSubjectToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanPhantomSubjectToVideo_V3",
category="conditioning/video_models",
inputs=[
@@ -336,10 +336,10 @@ class WanPhantomSubjectToVideo(io.ComfyNodeV3):
return io.NodeOutput(positive, cond2, negative, out_latent)
class WanVaceToVideo(io.ComfyNodeV3):
class WanVaceToVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WanVaceToVideo_V3",
category="conditioning/video_models",
is_experimental=True,

View File

@@ -10,10 +10,10 @@ import nodes
from comfy_api.v3 import io
class WebcamCapture(io.ComfyNodeV3):
class WebcamCapture(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.SchemaV3(
return io.Schema(
node_id="WebcamCapture_V3",
display_name="Webcam Capture _V3",
category="image",
@@ -89,4 +89,4 @@ class WebcamCapture(io.ComfyNodeV3):
return True
NODES_LIST: list[type[io.ComfyNodeV3]] = [WebcamCapture]
NODES_LIST: list[type[io.ComfyNode]] = [WebcamCapture]

View File

@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.3.44"
__version__ = "0.3.45"

View File

@@ -32,7 +32,7 @@ from comfy_execution.graph_utils import GraphBuilder, is_link
from comfy_execution.validation import validate_node_input
from comfy_execution.progress import get_progress_state, reset_progress_state, add_progress_handler, WebUIProgressHandler
from comfy_execution.utils import CurrentNodeContext
from comfy_api.internal import ComfyNodeInternal, first_real_override, is_class, make_locked_method_func
from comfy_api.internal import _ComfyNodeInternal, first_real_override, is_class, make_locked_method_func
from comfy_api.v3 import io
@@ -60,7 +60,7 @@ class IsChangedCache:
class_def = nodes.NODE_CLASS_MAPPINGS[class_type]
has_is_changed = False
is_changed_name = None
if issubclass(class_def, ComfyNodeInternal) and first_real_override(class_def, "fingerprint_inputs") is not None:
if issubclass(class_def, _ComfyNodeInternal) and first_real_override(class_def, "fingerprint_inputs") is not None:
has_is_changed = True
is_changed_name = "fingerprint_inputs"
elif hasattr(class_def, "IS_CHANGED"):
@@ -136,7 +136,7 @@ class CacheSet:
SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org")
def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}):
is_v3 = issubclass(class_def, ComfyNodeInternal)
is_v3 = issubclass(class_def, _ComfyNodeInternal)
if is_v3:
valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True)
else:
@@ -245,7 +245,7 @@ async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, f
if pre_execute_cb is not None and index is not None:
pre_execute_cb(index)
# V3
if isinstance(obj, ComfyNodeInternal) or (is_class(obj) and issubclass(obj, ComfyNodeInternal)):
if isinstance(obj, _ComfyNodeInternal) or (is_class(obj) and issubclass(obj, _ComfyNodeInternal)):
# if is just a class, then assign no resources or state, just create clone
if is_class(obj):
type_obj = obj
@@ -476,7 +476,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
obj = class_def()
caches.objects.set(unique_id, obj)
if issubclass(class_def, ComfyNodeInternal):
if issubclass(class_def, _ComfyNodeInternal):
lazy_status_present = first_real_override(class_def, "check_lazy_status") is not None
else:
lazy_status_present = getattr(obj, "check_lazy_status", None) is not None
@@ -761,7 +761,7 @@ async def validate_inputs(prompt_id, prompt, item, validated):
validate_function_inputs = []
validate_has_kwargs = False
if issubclass(obj_class, ComfyNodeInternal):
if issubclass(obj_class, _ComfyNodeInternal):
validate_function_name = "validate_inputs"
validate_function = first_real_override(obj_class, validate_function_name)
else:

View File

@@ -115,6 +115,15 @@ if os.name == "nt":
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
if __name__ == "__main__":
if args.default_device is not None:
default_dev = args.default_device
devices = list(range(32))
devices.remove(default_dev)
devices.insert(0, default_dev)
devices = ','.join(map(str, devices))
os.environ['CUDA_VISIBLE_DEVICES'] = str(devices)
os.environ['HIP_VISIBLE_DEVICES'] = str(devices)
if args.cuda_device is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device)

View File

@@ -2162,7 +2162,7 @@ def load_custom_node(module_path: str, ignore=set(), module_parent="custom_nodes
# V3 node definition
elif getattr(module, "NODES_LIST", None) is not None:
for node_cls in module.NODES_LIST:
node_cls: io.ComfyNodeV3
node_cls: io.ComfyNode
schema = node_cls.GET_SCHEMA()
if schema.node_id not in ignore:
NODE_CLASS_MAPPINGS[schema.node_id] = node_cls
@@ -2314,18 +2314,26 @@ def init_builtin_extra_nodes():
"v3/nodes_controlnet.py",
"v3/nodes_cosmos.py",
"v3/nodes_differential_diffusion.py",
"v3/nodes_edit_model.py",
"v3/nodes_flux.py",
"v3/nodes_freelunch.py",
"v3/nodes_fresca.py",
"v3/nodes_gits.py",
"v3/nodes_hidream.py",
"v3/nodes_images.py",
"v3/nodes_latent.py",
"v3/nodes_lt.py",
"v3/nodes_mask.py",
"v3/nodes_mochi.py",
"v3/nodes_model_advanced.py",
"v3/nodes_model_downscale.py",
"v3/nodes_morphology.py",
"v3/nodes_optimalsteps.py",
"v3/nodes_pag.py",
"v3/nodes_perpneg.py",
"v3/nodes_photomaker.py",
"v3/nodes_pixart.py",
"v3/nodes_post_processing.py",
"v3/nodes_preview_any.py",
"v3/nodes_primitive.py",
"v3/nodes_rebatch.py",

View File

@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.3.44"
version = "0.3.45"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.9"

View File

@@ -1,5 +1,5 @@
comfyui-frontend-package==1.23.4
comfyui-workflow-templates==0.1.36
comfyui-workflow-templates==0.1.39
comfyui-embedded-docs==0.2.4
torch
torchsde

View File

@@ -30,7 +30,7 @@ from comfy_api import feature_flags
import node_helpers
from comfyui_version import __version__
from app.frontend_management import FrontendManager
from comfy_api.internal import ComfyNodeInternal
from comfy_api.internal import _ComfyNodeInternal
from app.user_manager import UserManager
from app.model_manager import ModelFileManager
@@ -590,7 +590,7 @@ class PromptServer():
def node_info(node_class):
obj_class = nodes.NODE_CLASS_MAPPINGS[node_class]
if issubclass(obj_class, ComfyNodeInternal):
if issubclass(obj_class, _ComfyNodeInternal):
return obj_class.GET_NODE_INFO_V1()
info = {}
info['input'] = obj_class.INPUT_TYPES()