mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-03 07:26:31 +08:00
Compare commits
5 Commits
desktop-re
...
v0.3.30
Author | SHA1 | Date | |
---|---|---|---|
|
a97f2f850a | ||
|
5acb705857 | ||
|
5c80da31db | ||
|
e2eed9eb9b | ||
|
11b68ebd22 |
@@ -18,6 +18,7 @@ class Output:
|
|||||||
setattr(self, key, item)
|
setattr(self, key, item)
|
||||||
|
|
||||||
def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
|
def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
|
||||||
|
image = image[:, :, :, :3] if image.shape[3] > 3 else image
|
||||||
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
|
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
|
||||||
std = torch.tensor(std, device=image.device, dtype=image.dtype)
|
std = torch.tensor(std, device=image.device, dtype=image.dtype)
|
||||||
image = image.movedim(-1, 1)
|
image = image.movedim(-1, 1)
|
||||||
|
@@ -31,35 +31,43 @@ def downscale_input(image):
|
|||||||
s = s.movedim(1,-1)
|
s = s.movedim(1,-1)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def validate_and_cast_response (response):
|
def validate_and_cast_response(response):
|
||||||
# validate raw JSON response
|
# validate raw JSON response
|
||||||
data = response.data
|
data = response.data
|
||||||
if not data or len(data) == 0:
|
if not data or len(data) == 0:
|
||||||
raise Exception("No images returned from API endpoint")
|
raise Exception("No images returned from API endpoint")
|
||||||
|
|
||||||
# Get base64 image data
|
# Initialize list to store image tensors
|
||||||
image_url = data[0].url
|
image_tensors = []
|
||||||
b64_data = data[0].b64_json
|
|
||||||
if not image_url and not b64_data:
|
|
||||||
raise Exception("No image was generated in the response")
|
|
||||||
|
|
||||||
if b64_data:
|
# Process each image in the data array
|
||||||
img_data = base64.b64decode(b64_data)
|
for image_data in data:
|
||||||
img = Image.open(io.BytesIO(img_data))
|
image_url = image_data.url
|
||||||
|
b64_data = image_data.b64_json
|
||||||
|
|
||||||
elif image_url:
|
if not image_url and not b64_data:
|
||||||
img_response = requests.get(image_url)
|
raise Exception("No image was generated in the response")
|
||||||
if img_response.status_code != 200:
|
|
||||||
raise Exception("Failed to download the image")
|
|
||||||
img = Image.open(io.BytesIO(img_response.content))
|
|
||||||
|
|
||||||
img = img.convert("RGBA")
|
if b64_data:
|
||||||
|
img_data = base64.b64decode(b64_data)
|
||||||
|
img = Image.open(io.BytesIO(img_data))
|
||||||
|
|
||||||
# Convert to numpy array, normalize to float32 between 0 and 1
|
elif image_url:
|
||||||
img_array = np.array(img).astype(np.float32) / 255.0
|
img_response = requests.get(image_url)
|
||||||
|
if img_response.status_code != 200:
|
||||||
|
raise Exception("Failed to download the image")
|
||||||
|
img = Image.open(io.BytesIO(img_response.content))
|
||||||
|
|
||||||
# Convert to torch tensor and add batch dimension
|
img = img.convert("RGBA")
|
||||||
return torch.from_numpy(img_array)[None,]
|
|
||||||
|
# Convert to numpy array, normalize to float32 between 0 and 1
|
||||||
|
img_array = np.array(img).astype(np.float32) / 255.0
|
||||||
|
img_tensor = torch.from_numpy(img_array)
|
||||||
|
|
||||||
|
# Add to list of tensors
|
||||||
|
image_tensors.append(img_tensor)
|
||||||
|
|
||||||
|
return torch.stack(image_tensors, dim=0)
|
||||||
|
|
||||||
class OpenAIDalle2(ComfyNodeABC):
|
class OpenAIDalle2(ComfyNodeABC):
|
||||||
"""
|
"""
|
||||||
|
@@ -385,7 +385,7 @@ def encode_single_frame(output_file, image_array: np.ndarray, crf):
|
|||||||
container = av.open(output_file, "w", format="mp4")
|
container = av.open(output_file, "w", format="mp4")
|
||||||
try:
|
try:
|
||||||
stream = container.add_stream(
|
stream = container.add_stream(
|
||||||
"h264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
|
"libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
|
||||||
)
|
)
|
||||||
stream.height = image_array.shape[0]
|
stream.height = image_array.shape[0]
|
||||||
stream.width = image_array.shape[1]
|
stream.width = image_array.shape[1]
|
||||||
|
@@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.3.29"
|
__version__ = "0.3.30"
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.3.29"
|
version = "0.3.30"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
comfyui-frontend-package==1.17.10
|
comfyui-frontend-package==1.17.11
|
||||||
comfyui-workflow-templates==0.1.3
|
comfyui-workflow-templates==0.1.3
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
|
Reference in New Issue
Block a user