1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 15:04:50 +08:00

Compare commits

...

5 Commits

Author SHA1 Message Date
comfyanonymous
a97f2f850a ComfyUI version 0.3.30 2025-04-24 16:03:01 -04:00
comfyanonymous
5acb705857 Switch LTXVPreprocess to libx264 (#7776) 2025-04-24 13:58:31 -04:00
thot experiment
5c80da31db fix multiple image return from api nodes (#7772) 2025-04-24 03:29:05 -04:00
thot experiment
e2eed9eb9b throw away alpha channel in clip vision preprocessor (#7769)
saves users having to explicitly discard the channel
2025-04-23 21:28:36 -04:00
filtered
11b68ebd22 [BugFix] Update frontend to 1.17.11 (#7766) 2025-04-23 18:16:12 -04:00
6 changed files with 32 additions and 23 deletions

View File

@@ -18,6 +18,7 @@ class Output:
setattr(self, key, item)
def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
image = image[:, :, :, :3] if image.shape[3] > 3 else image
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
std = torch.tensor(std, device=image.device, dtype=image.dtype)
image = image.movedim(-1, 1)

View File

@@ -31,35 +31,43 @@ def downscale_input(image):
s = s.movedim(1,-1)
return s
def validate_and_cast_response (response):
def validate_and_cast_response(response):
# validate raw JSON response
data = response.data
if not data or len(data) == 0:
raise Exception("No images returned from API endpoint")
# Get base64 image data
image_url = data[0].url
b64_data = data[0].b64_json
if not image_url and not b64_data:
raise Exception("No image was generated in the response")
# Initialize list to store image tensors
image_tensors = []
if b64_data:
img_data = base64.b64decode(b64_data)
img = Image.open(io.BytesIO(img_data))
# Process each image in the data array
for image_data in data:
image_url = image_data.url
b64_data = image_data.b64_json
elif image_url:
img_response = requests.get(image_url)
if img_response.status_code != 200:
raise Exception("Failed to download the image")
img = Image.open(io.BytesIO(img_response.content))
if not image_url and not b64_data:
raise Exception("No image was generated in the response")
img = img.convert("RGBA")
if b64_data:
img_data = base64.b64decode(b64_data)
img = Image.open(io.BytesIO(img_data))
# Convert to numpy array, normalize to float32 between 0 and 1
img_array = np.array(img).astype(np.float32) / 255.0
elif image_url:
img_response = requests.get(image_url)
if img_response.status_code != 200:
raise Exception("Failed to download the image")
img = Image.open(io.BytesIO(img_response.content))
# Convert to torch tensor and add batch dimension
return torch.from_numpy(img_array)[None,]
img = img.convert("RGBA")
# Convert to numpy array, normalize to float32 between 0 and 1
img_array = np.array(img).astype(np.float32) / 255.0
img_tensor = torch.from_numpy(img_array)
# Add to list of tensors
image_tensors.append(img_tensor)
return torch.stack(image_tensors, dim=0)
class OpenAIDalle2(ComfyNodeABC):
"""

View File

@@ -385,7 +385,7 @@ def encode_single_frame(output_file, image_array: np.ndarray, crf):
container = av.open(output_file, "w", format="mp4")
try:
stream = container.add_stream(
"h264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
"libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
)
stream.height = image_array.shape[0]
stream.width = image_array.shape[1]

View File

@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.3.29"
__version__ = "0.3.30"

View File

@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.3.29"
version = "0.3.30"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.9"

View File

@@ -1,4 +1,4 @@
comfyui-frontend-package==1.17.10
comfyui-frontend-package==1.17.11
comfyui-workflow-templates==0.1.3
torch
torchsde