mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-04 07:52:46 +08:00
Compare commits
229 Commits
master
...
v3-definit
Author | SHA1 | Date | |
---|---|---|---|
|
006a8981f7 | ||
|
f90beb73f8 | ||
|
7a522e4b6f | ||
|
1d72917fad | ||
|
6405730e00 | ||
|
ccfa2a80ff | ||
|
babd7bbf00 | ||
|
fafe53ece8 | ||
|
2aa853df76 | ||
|
bd367c8e54 | ||
|
82fd2c8f84 | ||
|
9177cfd895 | ||
|
0247b7bd17 | ||
|
e9a9762ca0 | ||
|
930f8d9e6d | ||
|
9a3d02eb3a | ||
|
b341c96386 | ||
|
b365fb4138 | ||
|
1415219375 | ||
|
320f4be792 | ||
|
2f0cc45682 | ||
|
b6754d935b | ||
|
689db36073 | ||
|
b45a110de6 | ||
|
b007125398 | ||
|
31b1bc20cc | ||
|
de54491deb | ||
|
e55b540899 | ||
|
918ca7f2ea | ||
|
675e9fd788 | ||
|
40abe9647c | ||
|
4c83303801 | ||
|
5a8c426112 | ||
|
a4253f49e6 | ||
|
631916dfb2 | ||
|
00c46797b8 | ||
|
9b5a44ce6e | ||
|
c52b5dcb52 | ||
|
ed95d603df | ||
|
a998a3ce4f | ||
|
9d44cbf7c8 | ||
|
44afeab124 | ||
|
d3a62a440f | ||
|
56aae3e2c8 | ||
|
dacd0e9a59 | ||
|
9bd3faaf1f | ||
|
3a8286b034 | ||
|
b2e564c3d5 | ||
|
c3d9243915 | ||
|
f569823738 | ||
|
9300301584 | ||
|
66cd5152fd | ||
|
2ea2bc2941 | ||
|
487ec28b9c | ||
|
b4d9a27fdb | ||
|
991de5fc81 | ||
|
7d710727a9 | ||
|
7ef18d5afd | ||
|
e5cac06bbe | ||
|
f672515ba6 | ||
|
2e6ed6a10f | ||
|
32c46c044c | ||
|
ddb84a3991 | ||
|
6adaf6c776 | ||
|
d984cee318 | ||
|
b0f73174b2 | ||
|
a9f5554342 | ||
|
c6dcf7afd9 | ||
|
b561dfe8b2 | ||
|
ce1d30e9c3 | ||
|
e374ee1f1c | ||
|
9208b4a7c1 | ||
|
bed60d6ed9 | ||
|
333d942f30 | ||
|
941dea9439 | ||
|
54bf03466f | ||
|
7f8c51e36d | ||
|
4a461b6093 | ||
|
27734d9527 | ||
|
8c03ff085d | ||
|
d673124343 | ||
|
cf4ba2787d | ||
|
6a77eb15bc | ||
|
5afcca1c17 | ||
|
aae60881de | ||
|
45363ad31f | ||
|
f15c63c37d | ||
|
517be3d980 | ||
|
a7c59dc3d6 | ||
|
96d317b3e2 | ||
|
87e72fc04c | ||
|
1de63e8e41 | ||
|
b196fb954e | ||
|
638096fade | ||
|
edc8f06770 | ||
|
9e37b5420b | ||
|
36e8277724 | ||
|
b6a4a4c664 | ||
|
780c3ead16 | ||
|
fd9c34a3eb | ||
|
de0901bd02 | ||
|
2a7793394f | ||
|
18ed598fa1 | ||
|
9eda706e64 | ||
|
bc6b0113e2 | ||
|
bf12dcc066 | ||
|
e431868c0d | ||
|
95289b3952 | ||
|
f8b7170103 | ||
|
ab98b65226 | ||
|
b99e3d1336 | ||
|
3aceeab359 | ||
|
326a2593e0 | ||
|
a8f1981bf2 | ||
|
5c94199b04 | ||
|
205611cc22 | ||
|
d703ba9633 | ||
|
106bc9b32a | ||
|
c3334ae813 | ||
|
8beead753a | ||
|
751c57c853 | ||
|
4263d6feca | ||
|
d6737063af | ||
|
119f5a869e | ||
|
59e2d47cfc | ||
|
d99f778982 | ||
|
8d9e4c76dd | ||
|
c196dd5d0f | ||
|
f687f8af7c | ||
|
b17cc99c1e | ||
|
ac05d9a5fa | ||
|
4294dfc496 | ||
|
79098e9fc8 | ||
|
a580176735 | ||
|
371e20494d | ||
|
a19ca62354 | ||
|
039a64be76 | ||
|
c9e03684d6 | ||
|
fad1b90d93 | ||
|
f74f410ee7 | ||
|
139025f0fd | ||
|
8f7e27352e | ||
|
1e36e7ff8b | ||
|
535faa84f6 | ||
|
c09213ebc1 | ||
|
0be2ab610a | ||
|
926a2b1579 | ||
|
af781cb96c | ||
|
21c9d7b289 | ||
|
eabd053227 | ||
|
a7e9956dfc | ||
|
f51ebfb5a1 | ||
|
5ee63e284b | ||
|
5423a4f262 | ||
|
fe2cadeaa0 | ||
|
2b5bd2ace3 | ||
|
19bb231fbd | ||
|
d8b91bb84e | ||
|
965d2f9b8f | ||
|
7521ff7dad | ||
|
a6bcb184f6 | ||
|
e1975567a3 | ||
|
982f4d6f31 | ||
|
8f0621ca7e | ||
|
fefb24cc33 | ||
|
1eb1a44883 | ||
|
36770c1658 | ||
|
5f91e2905a | ||
|
3aa2d19c70 | ||
|
2b9ff52248 | ||
|
cc68880914 | ||
|
904dc06451 | ||
|
56ccfeaa8a | ||
|
82e6eeab75 | ||
|
936bf6b60f | ||
|
a86fddcdd4 | ||
|
18a7207ca4 | ||
|
aff5271291 | ||
|
3758c65107 | ||
|
0e7ff98e1d | ||
|
2999212480 | ||
|
1ad8a72fe9 | ||
|
1ae7e7a1e2 | ||
|
f4ece6731b | ||
|
0122bc43ea | ||
|
d0c077423a | ||
|
ba857bd8a0 | ||
|
cef73c75fb | ||
|
fce43e1312 | ||
|
533090465c | ||
|
86de88fb44 | ||
|
aefd845a21 | ||
|
6ef4ad2a4c | ||
|
6d64658c79 | ||
|
6cf5db512a | ||
|
b52154f382 | ||
|
aac91caf1a | ||
|
002e16ac71 | ||
|
fe9a47ae50 | ||
|
ef3f45807f | ||
|
11d87760ca | ||
|
f9aec12ef1 | ||
|
38721fdb64 | ||
|
1ef0693e65 | ||
|
1711e44e99 | ||
|
ef04c46ee3 | ||
|
54e0d6b161 | ||
|
cf7312d82c | ||
|
6854864db9 | ||
|
2873aaf4db | ||
|
70d2bbfec0 | ||
|
2197b6cbf3 | ||
|
d79a3cf990 | ||
|
a7f515e913 | ||
|
1fb1bad150 | ||
|
50da98bcf5 | ||
|
94e6119f9f | ||
|
f46dc03658 | ||
|
50603859ab | ||
|
0d185b721f | ||
|
8642757971 | ||
|
de86d8e32b | ||
|
8b331c5ca2 | ||
|
937d2d5325 | ||
|
0400497d5e | ||
|
5f0e04e2d7 | ||
|
96c2e3856d | ||
|
880f756dc1 | ||
|
4480ed488e |
@@ -111,7 +111,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
|
||||
|
||||
## Release Process
|
||||
|
||||
ComfyUI follows a weekly release cycle targeting Friday but this regularly changes because of model releases or large changes to the codebase. There are three interconnected repositories:
|
||||
ComfyUI follows a weekly release cycle every Friday, with three interconnected repositories:
|
||||
|
||||
1. **[ComfyUI Core](https://github.com/comfyanonymous/ComfyUI)**
|
||||
- Releases a new stable version (e.g., v0.7.0)
|
||||
|
@@ -130,21 +130,10 @@ class ModelFileManager:
|
||||
|
||||
for file_name in filenames:
|
||||
try:
|
||||
full_path = os.path.join(dirpath, file_name)
|
||||
relative_path = os.path.relpath(full_path, directory)
|
||||
|
||||
# Get file metadata
|
||||
file_info = {
|
||||
"name": relative_path,
|
||||
"pathIndex": pathIndex,
|
||||
"modified": os.path.getmtime(full_path), # Add modification time
|
||||
"created": os.path.getctime(full_path), # Add creation time
|
||||
"size": os.path.getsize(full_path) # Add file size
|
||||
}
|
||||
result.append(file_info)
|
||||
|
||||
except Exception as e:
|
||||
logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.")
|
||||
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
|
||||
result.append(relative_path)
|
||||
except:
|
||||
logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.")
|
||||
continue
|
||||
|
||||
for d in subdirs:
|
||||
@@ -155,7 +144,7 @@ class ModelFileManager:
|
||||
logging.warning(f"Warning: Unable to access {path}. Skipping this path.")
|
||||
continue
|
||||
|
||||
return result, dirs, time.perf_counter()
|
||||
return [{"name": f, "pathIndex": pathIndex} for f in result], dirs, time.perf_counter()
|
||||
|
||||
def get_model_previews(self, filepath: str) -> list[str | BytesIO]:
|
||||
dirname = os.path.dirname(filepath)
|
||||
|
@@ -20,15 +20,13 @@ class FileInfo(TypedDict):
|
||||
path: str
|
||||
size: int
|
||||
modified: int
|
||||
created: int
|
||||
|
||||
|
||||
def get_file_info(path: str, relative_to: str) -> FileInfo:
|
||||
return {
|
||||
"path": os.path.relpath(path, relative_to).replace(os.sep, '/'),
|
||||
"size": os.path.getsize(path),
|
||||
"modified": os.path.getmtime(path),
|
||||
"created": os.path.getctime(path)
|
||||
"modified": os.path.getmtime(path)
|
||||
}
|
||||
|
||||
|
||||
|
@@ -43,6 +43,7 @@ if TYPE_CHECKING:
|
||||
|
||||
def broadcast_image_to(tensor, target_batch_size, batched_number):
|
||||
current_batch_size = tensor.shape[0]
|
||||
#print(current_batch_size, target_batch_size)
|
||||
if current_batch_size == 1:
|
||||
return tensor
|
||||
|
||||
|
@@ -58,8 +58,7 @@ def is_odd(n: int) -> bool:
|
||||
|
||||
|
||||
def nonlinearity(x):
|
||||
# x * sigmoid(x)
|
||||
return torch.nn.functional.silu(x)
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
def Normalize(in_channels, num_groups=32):
|
||||
|
@@ -36,7 +36,7 @@ def get_timestep_embedding(timesteps, embedding_dim):
|
||||
|
||||
def nonlinearity(x):
|
||||
# swish
|
||||
return torch.nn.functional.silu(x)
|
||||
return x*torch.sigmoid(x)
|
||||
|
||||
|
||||
def Normalize(in_channels, num_groups=32):
|
||||
|
@@ -769,7 +769,8 @@ class CameraWanModel(WanModel):
|
||||
# embeddings
|
||||
x = self.patch_embedding(x.float()).to(x.dtype)
|
||||
if self.control_adapter is not None and camera_conditions is not None:
|
||||
x = x + self.control_adapter(camera_conditions).to(x.dtype)
|
||||
x_camera = self.control_adapter(camera_conditions).to(x.dtype)
|
||||
x = x + x_camera
|
||||
grid_sizes = x.shape[2:]
|
||||
x = x.flatten(2).transpose(1, 2)
|
||||
|
||||
|
@@ -24,17 +24,12 @@ class CausalConv3d(ops.Conv3d):
|
||||
self.padding[1], 2 * self.padding[0], 0)
|
||||
self.padding = (0, 0, 0)
|
||||
|
||||
def forward(self, x, cache_x=None, cache_list=None, cache_idx=None):
|
||||
if cache_list is not None:
|
||||
cache_x = cache_list[cache_idx]
|
||||
cache_list[cache_idx] = None
|
||||
|
||||
def forward(self, x, cache_x=None):
|
||||
padding = list(self._padding)
|
||||
if cache_x is not None and self._padding[4] > 0:
|
||||
cache_x = cache_x.to(x.device)
|
||||
x = torch.cat([cache_x, x], dim=2)
|
||||
padding[4] -= cache_x.shape[2]
|
||||
del cache_x
|
||||
x = F.pad(x, padding)
|
||||
|
||||
return super().forward(x)
|
||||
@@ -171,7 +166,7 @@ class ResidualBlock(nn.Module):
|
||||
if in_dim != out_dim else nn.Identity()
|
||||
|
||||
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
||||
old_x = x
|
||||
h = self.shortcut(x)
|
||||
for layer in self.residual:
|
||||
if isinstance(layer, CausalConv3d) and feat_cache is not None:
|
||||
idx = feat_idx[0]
|
||||
@@ -183,12 +178,12 @@ class ResidualBlock(nn.Module):
|
||||
cache_x.device), cache_x
|
||||
],
|
||||
dim=2)
|
||||
x = layer(x, cache_list=feat_cache, cache_idx=idx)
|
||||
x = layer(x, feat_cache[idx])
|
||||
feat_cache[idx] = cache_x
|
||||
feat_idx[0] += 1
|
||||
else:
|
||||
x = layer(x)
|
||||
return x + self.shortcut(old_x)
|
||||
return x + h
|
||||
|
||||
|
||||
class AttentionBlock(nn.Module):
|
||||
|
@@ -151,7 +151,7 @@ class ResidualBlock(nn.Module):
|
||||
],
|
||||
dim=2,
|
||||
)
|
||||
x = layer(x, cache_list=feat_cache, cache_idx=idx)
|
||||
x = layer(x, feat_cache[idx])
|
||||
feat_cache[idx] = cache_x
|
||||
feat_idx[0] += 1
|
||||
else:
|
||||
|
@@ -106,12 +106,10 @@ def model_sampling(model_config, model_type):
|
||||
return ModelSampling(model_config)
|
||||
|
||||
|
||||
def convert_tensor(extra, dtype, device):
|
||||
def convert_tensor(extra, dtype):
|
||||
if hasattr(extra, "dtype"):
|
||||
if extra.dtype != torch.int and extra.dtype != torch.long:
|
||||
extra = extra.to(dtype=dtype, device=device)
|
||||
else:
|
||||
extra = extra.to(device=device)
|
||||
extra = extra.to(dtype)
|
||||
return extra
|
||||
|
||||
|
||||
@@ -171,21 +169,20 @@ class BaseModel(torch.nn.Module):
|
||||
dtype = self.manual_cast_dtype
|
||||
|
||||
xc = xc.to(dtype)
|
||||
device = xc.device
|
||||
t = self.model_sampling.timestep(t).float()
|
||||
if context is not None:
|
||||
context = context.to(dtype=dtype, device=device)
|
||||
context = context.to(dtype)
|
||||
|
||||
extra_conds = {}
|
||||
for o in kwargs:
|
||||
extra = kwargs[o]
|
||||
|
||||
if hasattr(extra, "dtype"):
|
||||
extra = convert_tensor(extra, dtype, device)
|
||||
extra = convert_tensor(extra, dtype)
|
||||
elif isinstance(extra, list):
|
||||
ex = []
|
||||
for ext in extra:
|
||||
ex.append(convert_tensor(ext, dtype, device))
|
||||
ex.append(convert_tensor(ext, dtype))
|
||||
extra = ex
|
||||
extra_conds[o] = extra
|
||||
|
||||
|
@@ -403,6 +403,54 @@ class PreviewMask(PreviewImage):
|
||||
super().__init__(preview, animated, cls, **kwargs)
|
||||
|
||||
|
||||
# class UILatent(_UIOutput):
|
||||
# def __init__(self, values: list[SavedResult | dict], **kwargs):
|
||||
# output_dir = folder_paths.get_temp_directory()
|
||||
# type = "temp"
|
||||
# prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
|
||||
# compress_level = 1
|
||||
# filename_prefix = "ComfyUI"
|
||||
|
||||
|
||||
# full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
|
||||
# # support save metadata for latent sharing
|
||||
# prompt_info = ""
|
||||
# if prompt is not None:
|
||||
# prompt_info = json.dumps(prompt)
|
||||
|
||||
# metadata = None
|
||||
# if not args.disable_metadata:
|
||||
# metadata = {"prompt": prompt_info}
|
||||
# if extra_pnginfo is not None:
|
||||
# for x in extra_pnginfo:
|
||||
# metadata[x] = json.dumps(extra_pnginfo[x])
|
||||
|
||||
# file = f"{filename}_{counter:05}_.latent"
|
||||
|
||||
# results: list[FileLocator] = []
|
||||
# results.append({
|
||||
# "filename": file,
|
||||
# "subfolder": subfolder,
|
||||
# "type": "output"
|
||||
# })
|
||||
|
||||
# file = os.path.join(full_output_folder, file)
|
||||
|
||||
# output = {}
|
||||
# output["latent_tensor"] = samples["samples"].contiguous()
|
||||
# output["latent_format_version_0"] = torch.tensor([])
|
||||
|
||||
# comfy.utils.save_torch_file(output, file, metadata=metadata)
|
||||
|
||||
# self.values = values
|
||||
|
||||
# def as_dict(self):
|
||||
# return {
|
||||
# "latents": self.values,
|
||||
# }
|
||||
|
||||
|
||||
class PreviewAudio(_UIOutput):
|
||||
def __init__(self, audio: dict, cls: Type[ComfyNode] = None, **kwargs):
|
||||
self.values = AudioSaveHelper.save_audio(
|
||||
|
276
comfy_extras/nodes_v3_test.py
Normal file
276
comfy_extras/nodes_v3_test.py
Normal file
@@ -0,0 +1,276 @@
|
||||
import torch
|
||||
import time
|
||||
from comfy_api.latest import io, ui, _io
|
||||
from comfy_api.latest import ComfyExtension
|
||||
import logging # noqa
|
||||
import comfy.utils
|
||||
import asyncio
|
||||
from typing_extensions import override
|
||||
|
||||
@io.comfytype(io_type="XYZ")
|
||||
class XYZ(io.ComfyTypeIO):
|
||||
Type = tuple[int,str]
|
||||
|
||||
|
||||
class V3TestNode(io.ComfyNode):
|
||||
# NOTE: this is here just to test that state is not leaking
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.hahajkunless = ";)"
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_01_TestNode1",
|
||||
display_name="V3 Test Node",
|
||||
category="v3 nodes",
|
||||
description="This is a funky V3 node test.",
|
||||
inputs=[
|
||||
io.Image.Input("image", display_name="new_image"),
|
||||
XYZ.Input("xyz", optional=True),
|
||||
io.Custom("JKL").Input("jkl", optional=True),
|
||||
io.Mask.Input("mask", display_name="mask haha", optional=True),
|
||||
io.Int.Input("some_int", display_name="new_name", min=0, max=127, default=42,
|
||||
tooltip="My tooltip 😎", display_mode=io.NumberDisplay.slider),
|
||||
io.Combo.Input("combo", options=["a", "b", "c"], tooltip="This is a combo input"),
|
||||
io.MultiCombo.Input("combo2", options=["a","b","c"]),
|
||||
io.MultiType.Input(io.Int.Input("int_multitype", display_name="haha"), types=[io.Float]),
|
||||
io.MultiType.Input("multitype", types=[io.Mask, io.Float, io.Int], optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Int.Output(),
|
||||
io.Image.Output(display_name="img🖼️", tooltip="This is an image"),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.auth_token_comfy_org,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(cls, image: io.Image.Type, some_int: int, combo: io.Combo.Type, combo2: io.MultiCombo.Type, xyz: XYZ.Type=None, mask: io.Mask.Type=None, **kwargs):
|
||||
if some_int < 0:
|
||||
raise Exception("some_int must be greater than 0")
|
||||
if combo == "c":
|
||||
raise Exception("combo must be a or b")
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: io.Image.Type, some_int: int, combo: io.Combo.Type, combo2: io.MultiCombo.Type, xyz: XYZ.Type=None, mask: io.Mask.Type=None, **kwargs):
|
||||
if hasattr(cls, "hahajkunless"):
|
||||
raise Exception("The 'cls' variable leaked instance state between runs!")
|
||||
if hasattr(cls, "doohickey"):
|
||||
raise Exception("The 'cls' variable leaked state on class properties between runs!")
|
||||
try:
|
||||
cls.doohickey = "LOLJK"
|
||||
except AttributeError:
|
||||
pass
|
||||
return io.NodeOutput(some_int, image, ui=ui.PreviewImage(image, cls=cls))
|
||||
|
||||
|
||||
# class V3LoraLoader(io.ComfyNode):
|
||||
# @classmethod
|
||||
# def define_schema(cls):
|
||||
# return io.Schema(
|
||||
# node_id="V3_LoraLoader",
|
||||
# display_name="V3 LoRA Loader",
|
||||
# category="v3 nodes",
|
||||
# description="LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
|
||||
# inputs=[
|
||||
# io.Model.Input("model", tooltip="The diffusion model the LoRA will be applied to."),
|
||||
# io.Clip.Input("clip", tooltip="The CLIP model the LoRA will be applied to."),
|
||||
# io.Combo.Input(
|
||||
# "lora_name",
|
||||
# options=folder_paths.get_filename_list("loras"),
|
||||
# tooltip="The name of the LoRA."
|
||||
# ),
|
||||
# io.Float.Input(
|
||||
# "strength_model",
|
||||
# default=1.0,
|
||||
# min=-100.0,
|
||||
# max=100.0,
|
||||
# step=0.01,
|
||||
# tooltip="How strongly to modify the diffusion model. This value can be negative."
|
||||
# ),
|
||||
# io.Float.Input(
|
||||
# "strength_clip",
|
||||
# default=1.0,
|
||||
# min=-100.0,
|
||||
# max=100.0,
|
||||
# step=0.01,
|
||||
# tooltip="How strongly to modify the CLIP model. This value can be negative."
|
||||
# ),
|
||||
# ],
|
||||
# outputs=[
|
||||
# io.Model.Output(),
|
||||
# io.Clip.Output(),
|
||||
# ],
|
||||
# )
|
||||
|
||||
# @classmethod
|
||||
# def execute(cls, model: io.Model.Type, clip: io.Clip.Type, lora_name: str, strength_model: float, strength_clip: float, **kwargs):
|
||||
# if strength_model == 0 and strength_clip == 0:
|
||||
# return io.NodeOutput(model, clip)
|
||||
|
||||
# lora = cls.resources.get(resources.TorchDictFolderFilename("loras", lora_name))
|
||||
|
||||
# model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
|
||||
# return io.NodeOutput(model_lora, clip_lora)
|
||||
|
||||
|
||||
class NInputsTest(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_NInputsTest",
|
||||
display_name="V3 N Inputs Test",
|
||||
inputs=[
|
||||
_io.AutogrowDynamic.Input("nmock", template_input=io.Image.Input("image"), min=1, max=3),
|
||||
_io.AutogrowDynamic.Input("nmock2", template_input=io.Int.Input("int"), optional=True, min=1, max=4),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(cls, nmock, nmock2):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(cls, nmock, nmock2):
|
||||
return time.time()
|
||||
|
||||
@classmethod
|
||||
def check_lazy_status(cls, **kwargs) -> list[str]:
|
||||
need = [name for name in kwargs if kwargs[name] is None]
|
||||
return need
|
||||
|
||||
@classmethod
|
||||
def execute(cls, nmock, nmock2):
|
||||
first_image = nmock[0]
|
||||
all_images = []
|
||||
for img in nmock:
|
||||
if img.shape != first_image.shape:
|
||||
img = img.movedim(-1,1)
|
||||
img = comfy.utils.common_upscale(img, first_image.shape[2], first_image.shape[1], "lanczos", "center")
|
||||
img = img.movedim(1,-1)
|
||||
all_images.append(img)
|
||||
combined_image = torch.cat(all_images, dim=0)
|
||||
return io.NodeOutput(combined_image)
|
||||
|
||||
|
||||
class V3TestSleep(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_TestSleep",
|
||||
display_name="V3 Test Sleep",
|
||||
category="_for_testing",
|
||||
description="Test async sleep functionality.",
|
||||
inputs=[
|
||||
io.AnyType.Input("value", display_name="Value"),
|
||||
io.Float.Input("seconds", display_name="Seconds", default=1.0, min=0.0, max=9999.0, step=0.01, tooltip="The amount of seconds to sleep."),
|
||||
],
|
||||
outputs=[
|
||||
io.AnyType.Output(),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(cls, value: io.AnyType.Type, seconds: io.Float.Type, **kwargs):
|
||||
logging.info(f"V3TestSleep: {cls.hidden.unique_id}")
|
||||
pbar = comfy.utils.ProgressBar(seconds, node_id=cls.hidden.unique_id)
|
||||
start = time.time()
|
||||
expiration = start + seconds
|
||||
now = start
|
||||
while now < expiration:
|
||||
now = time.time()
|
||||
pbar.update_absolute(now - start)
|
||||
await asyncio.sleep(0.02)
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class V3DummyStart(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_DummyStart",
|
||||
display_name="V3 Dummy Start",
|
||||
category="v3 nodes",
|
||||
description="This is a dummy start node.",
|
||||
inputs=[],
|
||||
outputs=[
|
||||
io.Custom("XYZ").Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls):
|
||||
return io.NodeOutput(None)
|
||||
|
||||
|
||||
class V3DummyEnd(io.ComfyNode):
|
||||
COOL_VALUE = 123
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_DummyEnd",
|
||||
display_name="V3 Dummy End",
|
||||
category="v3 nodes",
|
||||
description="This is a dummy end node.",
|
||||
inputs=[
|
||||
io.Custom("XYZ").Input("xyz"),
|
||||
],
|
||||
outputs=[],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def custom_action(cls):
|
||||
return 456
|
||||
|
||||
@classmethod
|
||||
def execute(cls, xyz: io.Custom("XYZ").Type):
|
||||
logging.info(f"V3DummyEnd: {cls.COOL_VALUE}")
|
||||
logging.info(f"V3DummyEnd: {cls.custom_action()}")
|
||||
return
|
||||
|
||||
|
||||
class V3DummyEndInherit(V3DummyEnd):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "V3_DummyEndInherit"
|
||||
schema.display_name = "V3 Dummy End Inherit"
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
def execute(cls, xyz: io.Custom("XYZ").Type):
|
||||
logging.info(f"V3DummyEndInherit: {cls.COOL_VALUE}")
|
||||
return super().execute(xyz)
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
V3TestNode,
|
||||
# V3LoraLoader,
|
||||
NInputsTest,
|
||||
V3TestSleep,
|
||||
V3DummyStart,
|
||||
V3DummyEnd,
|
||||
V3DummyEndInherit,
|
||||
]
|
||||
|
||||
class v3TestExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return NODES_LIST
|
||||
|
||||
async def comfy_entrypoint() -> v3TestExtension:
|
||||
return v3TestExtension()
|
@@ -149,7 +149,6 @@ class WanFirstLastFrameToVideo:
|
||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
|
||||
clip_vision_output = None
|
||||
if clip_vision_start_image is not None:
|
||||
clip_vision_output = clip_vision_start_image
|
||||
|
||||
|
@@ -1,3 +1,3 @@
|
||||
# This file is automatically generated by the build process when version is
|
||||
# updated in pyproject.toml.
|
||||
__version__ = "0.3.48"
|
||||
__version__ = "0.3.47"
|
||||
|
@@ -7,7 +7,7 @@ import threading
|
||||
import time
|
||||
import traceback
|
||||
from enum import Enum
|
||||
from typing import List, Literal, NamedTuple, Optional, Union
|
||||
from typing import List, Literal, NamedTuple, Optional
|
||||
import asyncio
|
||||
|
||||
import torch
|
||||
@@ -965,7 +965,7 @@ def full_type_name(klass):
|
||||
return klass.__qualname__
|
||||
return module + '.' + klass.__qualname__
|
||||
|
||||
async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[str], None]):
|
||||
async def validate_prompt(prompt_id, prompt):
|
||||
outputs = set()
|
||||
for x in prompt:
|
||||
if 'class_type' not in prompt[x]:
|
||||
@@ -989,8 +989,7 @@ async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[
|
||||
return (False, error, [], {})
|
||||
|
||||
if hasattr(class_, 'OUTPUT_NODE') and class_.OUTPUT_NODE is True:
|
||||
if partial_execution_list is None or x in partial_execution_list:
|
||||
outputs.add(x)
|
||||
outputs.add(x)
|
||||
|
||||
if len(outputs) == 0:
|
||||
error = {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ComfyUI"
|
||||
version = "0.3.48"
|
||||
version = "0.3.47"
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.9"
|
||||
@@ -12,6 +12,8 @@ documentation = "https://docs.comfy.org/"
|
||||
|
||||
[tool.ruff]
|
||||
lint.select = [
|
||||
"E", # pycodestyle errors
|
||||
"I", # isort
|
||||
"N805", # invalid-first-argument-name-for-method
|
||||
"S307", # suspicious-eval-usage
|
||||
"S102", # exec
|
||||
@@ -21,4 +23,8 @@ lint.select = [
|
||||
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
|
||||
"F",
|
||||
]
|
||||
lint.ignore = ["E501"] # disable line-length checking
|
||||
exclude = ["*.ipynb", "**/generated/*.pyi"]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"!comfy_extras/v3/*" = ["E", "I"] # enable these rules only for V3 nodes
|
||||
|
@@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.23.4
|
||||
comfyui-workflow-templates==0.1.47
|
||||
comfyui-workflow-templates==0.1.41
|
||||
comfyui-embedded-docs==0.2.4
|
||||
torch
|
||||
torchsde
|
||||
|
@@ -684,12 +684,7 @@ class PromptServer():
|
||||
if "prompt" in json_data:
|
||||
prompt = json_data["prompt"]
|
||||
prompt_id = str(json_data.get("prompt_id", uuid.uuid4()))
|
||||
|
||||
partial_execution_targets = None
|
||||
if "partial_execution_targets" in json_data:
|
||||
partial_execution_targets = json_data["partial_execution_targets"]
|
||||
|
||||
valid = await execution.validate_prompt(prompt_id, prompt, partial_execution_targets)
|
||||
valid = await execution.validate_prompt(prompt_id, prompt)
|
||||
extra_data = {}
|
||||
if "extra_data" in json_data:
|
||||
extra_data = json_data["extra_data"]
|
||||
|
@@ -7,7 +7,7 @@ import subprocess
|
||||
|
||||
from pytest import fixture
|
||||
from comfy_execution.graph_utils import GraphBuilder
|
||||
from tests.inference.test_execution import ComfyClient, run_warmup
|
||||
from tests.inference.test_execution import ComfyClient
|
||||
|
||||
|
||||
@pytest.mark.execution
|
||||
@@ -24,7 +24,6 @@ class TestAsyncNodes:
|
||||
'--listen', args_pytest["listen"],
|
||||
'--port', str(args_pytest["port"]),
|
||||
'--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml',
|
||||
'--cpu',
|
||||
]
|
||||
use_lru, lru_size = request.param
|
||||
if use_lru:
|
||||
@@ -83,9 +82,6 @@ class TestAsyncNodes:
|
||||
|
||||
def test_multiple_async_parallel_execution(self, client: ComfyClient, builder: GraphBuilder):
|
||||
"""Test that multiple async nodes execute in parallel."""
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client)
|
||||
|
||||
g = builder
|
||||
image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
|
||||
@@ -152,9 +148,6 @@ class TestAsyncNodes:
|
||||
|
||||
def test_async_lazy_evaluation(self, client: ComfyClient, builder: GraphBuilder):
|
||||
"""Test async nodes with lazy evaluation."""
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client, prefix="warmup_lazy")
|
||||
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
@@ -312,9 +305,6 @@ class TestAsyncNodes:
|
||||
|
||||
def test_async_caching_behavior(self, client: ComfyClient, builder: GraphBuilder):
|
||||
"""Test that async nodes are properly cached."""
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client, prefix="warmup_cache")
|
||||
|
||||
g = builder
|
||||
image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
sleep_node = g.node("TestSleep", value=image.out(0), seconds=0.2)
|
||||
@@ -334,9 +324,6 @@ class TestAsyncNodes:
|
||||
|
||||
def test_async_with_dynamic_prompts(self, client: ComfyClient, builder: GraphBuilder):
|
||||
"""Test async nodes within dynamically generated prompts."""
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client, prefix="warmup_dynamic")
|
||||
|
||||
g = builder
|
||||
image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
|
@@ -15,18 +15,10 @@ import urllib.parse
|
||||
import urllib.error
|
||||
from comfy_execution.graph_utils import GraphBuilder, Node
|
||||
|
||||
def run_warmup(client, prefix="warmup"):
|
||||
"""Run a simple workflow to warm up the server."""
|
||||
warmup_g = GraphBuilder(prefix=prefix)
|
||||
warmup_image = warmup_g.node("StubImage", content="BLACK", height=32, width=32, batch_size=1)
|
||||
warmup_g.node("PreviewImage", images=warmup_image.out(0))
|
||||
client.run(warmup_g)
|
||||
|
||||
class RunResult:
|
||||
def __init__(self, prompt_id: str):
|
||||
self.outputs: Dict[str,Dict] = {}
|
||||
self.runs: Dict[str,bool] = {}
|
||||
self.cached: Dict[str,bool] = {}
|
||||
self.prompt_id: str = prompt_id
|
||||
|
||||
def get_output(self, node: Node):
|
||||
@@ -35,13 +27,6 @@ class RunResult:
|
||||
def did_run(self, node: Node):
|
||||
return self.runs.get(node.id, False)
|
||||
|
||||
def was_cached(self, node: Node):
|
||||
return self.cached.get(node.id, False)
|
||||
|
||||
def was_executed(self, node: Node):
|
||||
"""Returns True if node was either run or cached"""
|
||||
return self.did_run(node) or self.was_cached(node)
|
||||
|
||||
def get_images(self, node: Node):
|
||||
output = self.get_output(node)
|
||||
if output is None:
|
||||
@@ -66,10 +51,8 @@ class ComfyClient:
|
||||
ws.connect("ws://{}/ws?clientId={}".format(self.server_address, self.client_id))
|
||||
self.ws = ws
|
||||
|
||||
def queue_prompt(self, prompt, partial_execution_targets=None):
|
||||
def queue_prompt(self, prompt):
|
||||
p = {"prompt": prompt, "client_id": self.client_id}
|
||||
if partial_execution_targets is not None:
|
||||
p["partial_execution_targets"] = partial_execution_targets
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = urllib.request.Request("http://{}/prompt".format(self.server_address), data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
@@ -87,13 +70,13 @@ class ComfyClient:
|
||||
def set_test_name(self, name):
|
||||
self.test_name = name
|
||||
|
||||
def run(self, graph, partial_execution_targets=None):
|
||||
def run(self, graph):
|
||||
prompt = graph.finalize()
|
||||
for node in graph.nodes.values():
|
||||
if node.class_type == 'SaveImage':
|
||||
node.inputs['filename_prefix'] = self.test_name
|
||||
|
||||
prompt_id = self.queue_prompt(prompt, partial_execution_targets)['prompt_id']
|
||||
prompt_id = self.queue_prompt(prompt)['prompt_id']
|
||||
result = RunResult(prompt_id)
|
||||
while True:
|
||||
out = self.ws.recv()
|
||||
@@ -109,10 +92,7 @@ class ComfyClient:
|
||||
elif message['type'] == 'execution_error':
|
||||
raise Exception(message['data'])
|
||||
elif message['type'] == 'execution_cached':
|
||||
if message['data']['prompt_id'] == prompt_id:
|
||||
cached_nodes = message['data'].get('nodes', [])
|
||||
for node_id in cached_nodes:
|
||||
result.cached[node_id] = True
|
||||
pass # Probably want to store this off for testing
|
||||
|
||||
history = self.get_history(prompt_id)[prompt_id]
|
||||
for node_id in history['outputs']:
|
||||
@@ -150,7 +130,6 @@ class TestExecution:
|
||||
'--listen', args_pytest["listen"],
|
||||
'--port', str(args_pytest["port"]),
|
||||
'--extra-model-paths-config', 'tests/inference/extra_model_paths.yaml',
|
||||
'--cpu',
|
||||
]
|
||||
use_lru, lru_size = request.param
|
||||
if use_lru:
|
||||
@@ -519,15 +498,12 @@ class TestExecution:
|
||||
assert not result.did_run(test_node), "The execution should have been cached"
|
||||
|
||||
def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder):
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client)
|
||||
|
||||
g = builder
|
||||
image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create sleep nodes for each duration
|
||||
sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.9)
|
||||
sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=3.1)
|
||||
sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.8)
|
||||
sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=2.9)
|
||||
sleep_node3 = g.node("TestSleep", value=image.out(0), seconds=3.0)
|
||||
|
||||
# Add outputs to verify the execution
|
||||
@@ -539,9 +515,10 @@ class TestExecution:
|
||||
result = client.run(g)
|
||||
elapsed_time = time.time() - start_time
|
||||
|
||||
# The test should take around 3.0 seconds (the longest sleep duration)
|
||||
# plus some overhead, but definitely less than the sum of all sleeps (9.0s)
|
||||
assert elapsed_time < 8.9, f"Parallel execution took {elapsed_time}s, expected less than 8.9s"
|
||||
# The test should take around 0.4 seconds (the longest sleep duration)
|
||||
# plus some overhead, but definitely less than the sum of all sleeps (0.9s)
|
||||
# We'll allow for up to 0.8s total to account for overhead
|
||||
assert elapsed_time < 4.0, f"Parallel execution took {elapsed_time}s, expected less than 0.8s"
|
||||
|
||||
# Verify that all nodes executed
|
||||
assert result.did_run(sleep_node1), "Sleep node 1 should have run"
|
||||
@@ -549,9 +526,6 @@ class TestExecution:
|
||||
assert result.did_run(sleep_node3), "Sleep node 3 should have run"
|
||||
|
||||
def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder):
|
||||
# Warmup execution to ensure server is fully initialized
|
||||
run_warmup(client)
|
||||
|
||||
g = builder
|
||||
# Create input images with different values
|
||||
image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
@@ -563,9 +537,9 @@ class TestExecution:
|
||||
image1=image1.out(0),
|
||||
image2=image2.out(0),
|
||||
image3=image3.out(0),
|
||||
sleep1=4.8,
|
||||
sleep2=4.9,
|
||||
sleep3=5.0)
|
||||
sleep1=0.4,
|
||||
sleep2=0.5,
|
||||
sleep3=0.6)
|
||||
output = g.node("SaveImage", images=parallel_sleep.out(0))
|
||||
|
||||
start_time = time.time()
|
||||
@@ -574,7 +548,7 @@ class TestExecution:
|
||||
|
||||
# Similar to the previous test, expect parallel execution of the sleep nodes
|
||||
# which should complete in less than the sum of all sleeps
|
||||
assert elapsed_time < 10.0, f"Expansion execution took {elapsed_time}s, expected less than 5.5s"
|
||||
assert elapsed_time < 0.8, f"Expansion execution took {elapsed_time}s, expected less than 0.8s"
|
||||
|
||||
# Verify the parallel sleep node executed
|
||||
assert result.did_run(parallel_sleep), "ParallelSleep node should have run"
|
||||
@@ -611,151 +585,3 @@ class TestExecution:
|
||||
assert len(images) == 2, "Should have 2 images"
|
||||
assert numpy.array(images[0]).min() == 0 and numpy.array(images[0]).max() == 0, "First image should be black"
|
||||
assert numpy.array(images[1]).min() == 0 and numpy.array(images[1]).max() == 0, "Second image should also be black"
|
||||
|
||||
# Output nodes included in the partial execution list are executed
|
||||
def test_partial_execution_included_outputs(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create two separate output nodes
|
||||
output1 = g.node("SaveImage", images=input1.out(0))
|
||||
output2 = g.node("SaveImage", images=input2.out(0))
|
||||
|
||||
# Run with partial execution targeting only output1
|
||||
result = client.run(g, partial_execution_targets=[output1.id])
|
||||
|
||||
assert result.was_executed(input1), "Input1 should have been executed (run or cached)"
|
||||
assert result.was_executed(output1), "Output1 should have been executed (run or cached)"
|
||||
assert not result.did_run(input2), "Input2 should not have run"
|
||||
assert not result.did_run(output2), "Output2 should not have run"
|
||||
|
||||
# Verify only output1 produced results
|
||||
assert len(result.get_images(output1)) == 1, "Output1 should have produced an image"
|
||||
assert len(result.get_images(output2)) == 0, "Output2 should not have produced an image"
|
||||
|
||||
# Output nodes NOT included in the partial execution list are NOT executed
|
||||
def test_partial_execution_excluded_outputs(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
input3 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create three output nodes
|
||||
output1 = g.node("SaveImage", images=input1.out(0))
|
||||
output2 = g.node("SaveImage", images=input2.out(0))
|
||||
output3 = g.node("SaveImage", images=input3.out(0))
|
||||
|
||||
# Run with partial execution targeting only output1 and output3
|
||||
result = client.run(g, partial_execution_targets=[output1.id, output3.id])
|
||||
|
||||
assert result.was_executed(input1), "Input1 should have been executed"
|
||||
assert result.was_executed(input3), "Input3 should have been executed"
|
||||
assert result.was_executed(output1), "Output1 should have been executed"
|
||||
assert result.was_executed(output3), "Output3 should have been executed"
|
||||
assert not result.did_run(input2), "Input2 should not have run"
|
||||
assert not result.did_run(output2), "Output2 should not have run"
|
||||
|
||||
# Output nodes NOT in list ARE executed if necessary for nodes that are in the list
|
||||
def test_partial_execution_dependencies(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create a processing chain with an OUTPUT_NODE that has socket outputs
|
||||
output_with_socket = g.node("TestOutputNodeWithSocketOutput", image=input1.out(0), value=2.0)
|
||||
|
||||
# Create another node that depends on the output_with_socket
|
||||
dependent_node = g.node("TestLazyMixImages",
|
||||
image1=output_with_socket.out(0),
|
||||
image2=input1.out(0),
|
||||
mask=g.node("StubMask", value=0.5, height=512, width=512, batch_size=1).out(0))
|
||||
|
||||
# Create the final output
|
||||
final_output = g.node("SaveImage", images=dependent_node.out(0))
|
||||
|
||||
# Run with partial execution targeting only the final output
|
||||
result = client.run(g, partial_execution_targets=[final_output.id])
|
||||
|
||||
# All nodes should have been executed because they're dependencies
|
||||
assert result.was_executed(input1), "Input1 should have been executed"
|
||||
assert result.was_executed(output_with_socket), "Output with socket should have been executed (dependency)"
|
||||
assert result.was_executed(dependent_node), "Dependent node should have been executed"
|
||||
assert result.was_executed(final_output), "Final output should have been executed"
|
||||
|
||||
# Lazy execution works with partial execution
|
||||
def test_partial_execution_with_lazy_nodes(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
input3 = g.node("StubImage", content="NOISE", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create masks that will trigger different lazy execution paths
|
||||
mask1 = g.node("StubMask", value=0.0, height=512, width=512, batch_size=1) # Will only need image1
|
||||
mask2 = g.node("StubMask", value=0.5, height=512, width=512, batch_size=1) # Will need both images
|
||||
|
||||
# Create two lazy mix nodes
|
||||
lazy_mix1 = g.node("TestLazyMixImages", image1=input1.out(0), image2=input2.out(0), mask=mask1.out(0))
|
||||
lazy_mix2 = g.node("TestLazyMixImages", image1=input2.out(0), image2=input3.out(0), mask=mask2.out(0))
|
||||
|
||||
output1 = g.node("SaveImage", images=lazy_mix1.out(0))
|
||||
output2 = g.node("SaveImage", images=lazy_mix2.out(0))
|
||||
|
||||
# Run with partial execution targeting only output1
|
||||
result = client.run(g, partial_execution_targets=[output1.id])
|
||||
|
||||
# For output1 path - only input1 should run due to lazy evaluation (mask=0.0)
|
||||
assert result.was_executed(input1), "Input1 should have been executed"
|
||||
assert not result.did_run(input2), "Input2 should not have run (lazy evaluation)"
|
||||
assert result.was_executed(mask1), "Mask1 should have been executed"
|
||||
assert result.was_executed(lazy_mix1), "Lazy mix1 should have been executed"
|
||||
assert result.was_executed(output1), "Output1 should have been executed"
|
||||
|
||||
# Nothing from output2 path should run
|
||||
assert not result.did_run(input3), "Input3 should not have run"
|
||||
assert not result.did_run(mask2), "Mask2 should not have run"
|
||||
assert not result.did_run(lazy_mix2), "Lazy mix2 should not have run"
|
||||
assert not result.did_run(output2), "Output2 should not have run"
|
||||
|
||||
# Multiple OUTPUT_NODEs with dependencies
|
||||
def test_partial_execution_multiple_output_nodes(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
input2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||
|
||||
# Create a chain of OUTPUT_NODEs
|
||||
output_node1 = g.node("TestOutputNodeWithSocketOutput", image=input1.out(0), value=1.5)
|
||||
output_node2 = g.node("TestOutputNodeWithSocketOutput", image=output_node1.out(0), value=2.0)
|
||||
|
||||
# Create regular output nodes
|
||||
save1 = g.node("SaveImage", images=output_node1.out(0))
|
||||
save2 = g.node("SaveImage", images=output_node2.out(0))
|
||||
save3 = g.node("SaveImage", images=input2.out(0))
|
||||
|
||||
# Run targeting only save2
|
||||
result = client.run(g, partial_execution_targets=[save2.id])
|
||||
|
||||
# Should run: input1, output_node1, output_node2, save2
|
||||
assert result.was_executed(input1), "Input1 should have been executed"
|
||||
assert result.was_executed(output_node1), "Output node 1 should have been executed (dependency)"
|
||||
assert result.was_executed(output_node2), "Output node 2 should have been executed (dependency)"
|
||||
assert result.was_executed(save2), "Save2 should have been executed"
|
||||
|
||||
# Should NOT run: input2, save1, save3
|
||||
assert not result.did_run(input2), "Input2 should not have run"
|
||||
assert not result.did_run(save1), "Save1 should not have run"
|
||||
assert not result.did_run(save3), "Save3 should not have run"
|
||||
|
||||
# Empty partial execution list (should execute nothing)
|
||||
def test_partial_execution_empty_list(self, client: ComfyClient, builder: GraphBuilder):
|
||||
g = builder
|
||||
input1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||
_output1 = g.node("SaveImage", images=input1.out(0))
|
||||
|
||||
# Run with empty partial execution list
|
||||
try:
|
||||
_result = client.run(g, partial_execution_targets=[])
|
||||
# Should get an error because no outputs are selected
|
||||
assert False, "Should have raised an error for empty partial execution list"
|
||||
except urllib.error.HTTPError:
|
||||
pass # Expected behavior
|
||||
|
||||
|
@@ -463,25 +463,6 @@ class TestParallelSleep(ComfyNodeABC):
|
||||
"expand": g.finalize(),
|
||||
}
|
||||
|
||||
class TestOutputNodeWithSocketOutput:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
|
||||
},
|
||||
}
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "process"
|
||||
CATEGORY = "_for_testing"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def process(self, image, value):
|
||||
# Apply value scaling and return both as output and socket
|
||||
result = image * value
|
||||
return (result,)
|
||||
|
||||
TEST_NODE_CLASS_MAPPINGS = {
|
||||
"TestLazyMixImages": TestLazyMixImages,
|
||||
"TestVariadicAverage": TestVariadicAverage,
|
||||
@@ -497,7 +478,6 @@ TEST_NODE_CLASS_MAPPINGS = {
|
||||
"TestSamplingInExpansion": TestSamplingInExpansion,
|
||||
"TestSleep": TestSleep,
|
||||
"TestParallelSleep": TestParallelSleep,
|
||||
"TestOutputNodeWithSocketOutput": TestOutputNodeWithSocketOutput,
|
||||
}
|
||||
|
||||
TEST_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
@@ -515,5 +495,4 @@ TEST_NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"TestSamplingInExpansion": "Sampling In Expansion",
|
||||
"TestSleep": "Test Sleep",
|
||||
"TestParallelSleep": "Test Parallel Sleep",
|
||||
"TestOutputNodeWithSocketOutput": "Test Output Node With Socket Output",
|
||||
}
|
||||
|
Reference in New Issue
Block a user