1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 23:14:49 +08:00

Display progress and result URL directly on API nodes (#8102)

* [Luma] Print download URL of successful task result directly on nodes (#177)

[Veo] Print download URL of successful task result directly on nodes (#184)

[Recraft] Print download URL of successful task result directly on nodes (#183)

[Pixverse] Print download URL of successful task result directly on nodes (#182)

[Kling] Print download URL of successful task result directly on nodes (#181)

[MiniMax] Print progress text and download URL of successful task result directly on nodes (#179)

[Docs] Link to docs in `API_NODE` class property type annotation comment (#178)

[Ideogram] Print download URL of successful task result directly on nodes (#176)

[Kling] Print download URL of successful task result directly on nodes (#181)

[Veo] Print download URL of successful task result directly on nodes (#184)

[Recraft] Print download URL of successful task result directly on nodes (#183)

[Pixverse] Print download URL of successful task result directly on nodes (#182)

[MiniMax] Print progress text and download URL of successful task result directly on nodes (#179)

[Docs] Link to docs in `API_NODE` class property type annotation comment (#178)

[Luma] Print download URL of successful task result directly on nodes (#177)

[Ideogram] Print download URL of successful task result directly on nodes (#176)

Show output URL and progress text on Pika nodes (#168)

[BFL] Print download URL of successful task result directly on nodes (#175)

[OpenAI ] Print download URL of successful task result directly on nodes (#174)

* fix ruff errors

* fix 3.10 syntax error
This commit is contained in:
Christian Byrne
2025-05-13 21:33:18 -07:00
committed by GitHub
parent bab836d88d
commit 98ff01e148
13 changed files with 474 additions and 92 deletions

View File

@@ -6,6 +6,7 @@ For source of truth on the allowed permutations of request fields, please refere
from __future__ import annotations
from typing import Optional, TypeVar, Any
from collections.abc import Callable
import math
import logging
@@ -86,6 +87,15 @@ MAX_PROMPT_LENGTH_IMAGE_GEN = 500
MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200
MAX_PROMPT_LENGTH_LIP_SYNC = 120
# TODO: adjust based on tests
AVERAGE_DURATION_T2V = 319 # 319,
AVERAGE_DURATION_I2V = 164 # 164,
AVERAGE_DURATION_LIP_SYNC = 120
AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 # 19,
AVERAGE_DURATION_IMAGE_GEN = 32
AVERAGE_DURATION_VIDEO_EFFECTS = 320
AVERAGE_DURATION_VIDEO_EXTEND = 320
R = TypeVar("R")
@@ -95,7 +105,13 @@ class KlingApiError(Exception):
pass
def poll_until_finished(auth_kwargs: dict[str,str], api_endpoint: ApiEndpoint[Any, R]) -> R:
def poll_until_finished(
auth_kwargs: dict[str, str],
api_endpoint: ApiEndpoint[Any, R],
result_url_extractor: Optional[Callable[[R], str]] = None,
estimated_duration: Optional[int] = None,
node_id: Optional[str] = None,
) -> R:
"""Polls the Kling API endpoint until the task reaches a terminal state, then returns the response."""
return PollingOperation(
poll_endpoint=api_endpoint,
@@ -109,6 +125,9 @@ def poll_until_finished(auth_kwargs: dict[str,str], api_endpoint: ApiEndpoint[An
else None
),
auth_kwargs=auth_kwargs,
result_url_extractor=result_url_extractor,
estimated_duration=estimated_duration,
node_id=node_id,
).execute()
@@ -227,7 +246,9 @@ def get_camera_control_input_config(
def get_video_from_response(response) -> KlingVideoResult:
"""Returns the first video object from the Kling video generation task result."""
"""Returns the first video object from the Kling video generation task result.
Will raise an error if the response is not valid.
"""
video = response.data.task_result.videos[0]
logging.info(
"Kling task %s succeeded. Video URL: %s", response.data.task_id, video.url
@@ -235,12 +256,37 @@ def get_video_from_response(response) -> KlingVideoResult:
return video
def get_video_url_from_response(response) -> Optional[str]:
"""Returns the first video url from the Kling video generation task result.
Will not raise an error if the response is not valid.
"""
if response and is_valid_video_response(response):
return str(get_video_from_response(response).url)
else:
return None
def get_images_from_response(response) -> list[KlingImageResult]:
"""Returns the list of image objects from the Kling image generation task result.
Will raise an error if the response is not valid.
"""
images = response.data.task_result.images
logging.info("Kling task %s succeeded. Images: %s", response.data.task_id, images)
return images
def get_images_urls_from_response(response) -> Optional[str]:
"""Returns the list of image urls from the Kling image generation task result.
Will not raise an error if the response is not valid. If there is only one image, returns the url as a string. If there are multiple images, returns a list of urls.
"""
if response and is_valid_image_response(response):
images = get_images_from_response(response)
image_urls = [str(image.url) for image in images]
return "\n".join(image_urls)
else:
return None
def video_result_to_node_output(
video: KlingVideoResult,
) -> tuple[VideoFromFile, str, str]:
@@ -312,6 +358,7 @@ class KlingCameraControls(KlingNodeBase):
RETURN_TYPES = ("CAMERA_CONTROL",)
RETURN_NAMES = ("camera_control",)
FUNCTION = "main"
API_NODE = False # This is just a helper node, it doesn't make an API call
@classmethod
def VALIDATE_INPUTS(
@@ -421,6 +468,7 @@ class KlingTextToVideoNode(KlingNodeBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -428,7 +476,9 @@ class KlingTextToVideoNode(KlingNodeBase):
RETURN_NAMES = ("VIDEO", "video_id", "duration")
DESCRIPTION = "Kling Text to Video Node"
def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingText2VideoResponse:
def get_response(
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingText2VideoResponse:
return poll_until_finished(
auth_kwargs,
ApiEndpoint(
@@ -437,6 +487,9 @@ class KlingTextToVideoNode(KlingNodeBase):
request_model=EmptyRequest,
response_model=KlingText2VideoResponse,
),
result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_T2V,
node_id=node_id,
)
def api_call(
@@ -449,6 +502,7 @@ class KlingTextToVideoNode(KlingNodeBase):
camera_control: Optional[KlingCameraControl] = None,
model_name: Optional[str] = None,
duration: Optional[str] = None,
unique_id: Optional[str] = None,
**kwargs,
) -> tuple[VideoFromFile, str, str]:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V)
@@ -478,7 +532,9 @@ class KlingTextToVideoNode(KlingNodeBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_video_result_response(final_response)
video = get_video_from_response(final_response)
@@ -528,6 +584,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -540,6 +597,7 @@ class KlingCameraControlT2VNode(KlingTextToVideoNode):
cfg_scale: float,
aspect_ratio: str,
camera_control: Optional[KlingCameraControl] = None,
unique_id: Optional[str] = None,
**kwargs,
):
return super().api_call(
@@ -613,6 +671,7 @@ class KlingImage2VideoNode(KlingNodeBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -620,7 +679,9 @@ class KlingImage2VideoNode(KlingNodeBase):
RETURN_NAMES = ("VIDEO", "video_id", "duration")
DESCRIPTION = "Kling Image to Video Node"
def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingImage2VideoResponse:
def get_response(
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingImage2VideoResponse:
return poll_until_finished(
auth_kwargs,
ApiEndpoint(
@@ -629,6 +690,9 @@ class KlingImage2VideoNode(KlingNodeBase):
request_model=KlingImage2VideoRequest,
response_model=KlingImage2VideoResponse,
),
result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_I2V,
node_id=node_id,
)
def api_call(
@@ -643,6 +707,7 @@ class KlingImage2VideoNode(KlingNodeBase):
duration: str,
camera_control: Optional[KlingCameraControl] = None,
end_frame: Optional[torch.Tensor] = None,
unique_id: Optional[str] = None,
**kwargs,
) -> tuple[VideoFromFile]:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
@@ -681,7 +746,9 @@ class KlingImage2VideoNode(KlingNodeBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_video_result_response(final_response)
video = get_video_from_response(final_response)
@@ -734,6 +801,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -747,6 +815,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode):
cfg_scale: float,
aspect_ratio: str,
camera_control: KlingCameraControl,
unique_id: Optional[str] = None,
**kwargs,
):
return super().api_call(
@@ -759,6 +828,7 @@ class KlingCameraControlI2VNode(KlingImage2VideoNode):
prompt=prompt,
negative_prompt=negative_prompt,
camera_control=camera_control,
unique_id=unique_id,
**kwargs,
)
@@ -830,6 +900,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -844,6 +915,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode):
cfg_scale: float,
aspect_ratio: str,
mode: str,
unique_id: Optional[str] = None,
**kwargs,
):
mode, duration, model_name = KlingStartEndFrameNode.get_mode_string_mapping()[
@@ -859,6 +931,7 @@ class KlingStartEndFrameNode(KlingImage2VideoNode):
aspect_ratio=aspect_ratio,
duration=duration,
end_frame=end_frame,
unique_id=unique_id,
**kwargs,
)
@@ -892,6 +965,7 @@ class KlingVideoExtendNode(KlingNodeBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -899,7 +973,9 @@ class KlingVideoExtendNode(KlingNodeBase):
RETURN_NAMES = ("VIDEO", "video_id", "duration")
DESCRIPTION = "Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes."
def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoExtendResponse:
def get_response(
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingVideoExtendResponse:
return poll_until_finished(
auth_kwargs,
ApiEndpoint(
@@ -908,6 +984,9 @@ class KlingVideoExtendNode(KlingNodeBase):
request_model=EmptyRequest,
response_model=KlingVideoExtendResponse,
),
result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_VIDEO_EXTEND,
node_id=node_id,
)
def api_call(
@@ -916,6 +995,7 @@ class KlingVideoExtendNode(KlingNodeBase):
negative_prompt: str,
cfg_scale: float,
video_id: str,
unique_id: Optional[str] = None,
**kwargs,
) -> tuple[VideoFromFile, str, str]:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V)
@@ -939,7 +1019,9 @@ class KlingVideoExtendNode(KlingNodeBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_video_result_response(final_response)
video = get_video_from_response(final_response)
@@ -952,7 +1034,9 @@ class KlingVideoEffectsBase(KlingNodeBase):
RETURN_TYPES = ("VIDEO", "STRING", "STRING")
RETURN_NAMES = ("VIDEO", "video_id", "duration")
def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingVideoEffectsResponse:
def get_response(
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingVideoEffectsResponse:
return poll_until_finished(
auth_kwargs,
ApiEndpoint(
@@ -961,6 +1045,9 @@ class KlingVideoEffectsBase(KlingNodeBase):
request_model=EmptyRequest,
response_model=KlingVideoEffectsResponse,
),
result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_VIDEO_EFFECTS,
node_id=node_id,
)
def api_call(
@@ -972,6 +1059,7 @@ class KlingVideoEffectsBase(KlingNodeBase):
image_1: torch.Tensor,
image_2: Optional[torch.Tensor] = None,
mode: Optional[KlingVideoGenMode] = None,
unique_id: Optional[str] = None,
**kwargs,
):
if dual_character:
@@ -1009,7 +1097,9 @@ class KlingVideoEffectsBase(KlingNodeBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_video_result_response(final_response)
video = get_video_from_response(final_response)
@@ -1053,6 +1143,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -1068,6 +1159,7 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase):
model_name: KlingCharacterEffectModelName,
mode: KlingVideoGenMode,
duration: KlingVideoGenDuration,
unique_id: Optional[str] = None,
**kwargs,
):
video, _, duration = super().api_call(
@@ -1078,10 +1170,12 @@ class KlingDualCharacterVideoEffectNode(KlingVideoEffectsBase):
duration=duration,
image_1=image_left,
image_2=image_right,
unique_id=unique_id,
**kwargs,
)
return video, duration
class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase):
"""Kling Single Image Video Effect Node"""
@@ -1117,6 +1211,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -1128,6 +1223,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase):
effect_scene: KlingSingleImageEffectsScene,
model_name: KlingSingleImageEffectModelName,
duration: KlingVideoGenDuration,
unique_id: Optional[str] = None,
**kwargs,
):
return super().api_call(
@@ -1136,6 +1232,7 @@ class KlingSingleImageVideoEffectNode(KlingVideoEffectsBase):
model_name=model_name,
duration=duration,
image_1=image,
unique_id=unique_id,
**kwargs,
)
@@ -1154,7 +1251,9 @@ class KlingLipSyncBase(KlingNodeBase):
f"Text is too long. Maximum length is {MAX_PROMPT_LENGTH_LIP_SYNC} characters."
)
def get_response(self, task_id: str, auth_kwargs: dict[str,str]) -> KlingLipSyncResponse:
def get_response(
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingLipSyncResponse:
"""Polls the Kling API endpoint until the task reaches a terminal state."""
return poll_until_finished(
auth_kwargs,
@@ -1164,6 +1263,9 @@ class KlingLipSyncBase(KlingNodeBase):
request_model=EmptyRequest,
response_model=KlingLipSyncResponse,
),
result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_LIP_SYNC,
node_id=node_id,
)
def api_call(
@@ -1175,7 +1277,8 @@ class KlingLipSyncBase(KlingNodeBase):
text: Optional[str] = None,
voice_speed: Optional[float] = None,
voice_id: Optional[str] = None,
**kwargs
unique_id: Optional[str] = None,
**kwargs,
) -> tuple[VideoFromFile, str, str]:
if text:
self.validate_text(text)
@@ -1217,7 +1320,9 @@ class KlingLipSyncBase(KlingNodeBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_video_result_response(final_response)
video = get_video_from_response(final_response)
@@ -1243,6 +1348,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -1253,6 +1359,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase):
video: VideoInput,
audio: AudioInput,
voice_language: str,
unique_id: Optional[str] = None,
**kwargs,
):
return super().api_call(
@@ -1260,6 +1367,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase):
audio=audio,
voice_language=voice_language,
mode="audio2video",
unique_id=unique_id,
**kwargs,
)
@@ -1352,6 +1460,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
@@ -1363,6 +1472,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase):
text: str,
voice: str,
voice_speed: float,
unique_id: Optional[str] = None,
**kwargs,
):
voice_id, voice_language = KlingLipSyncTextToVideoNode.get_voice_config()[voice]
@@ -1373,6 +1483,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase):
voice_id=voice_id,
voice_speed=voice_speed,
mode="text2video",
unique_id=unique_id,
**kwargs,
)
@@ -1413,13 +1524,14 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human."
DESCRIPTION = "Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background."
def get_response(
self, task_id: str, auth_kwargs: dict[str,str] = None
self, task_id: str, auth_kwargs: dict[str, str], node_id: Optional[str] = None
) -> KlingVirtualTryOnResponse:
return poll_until_finished(
auth_kwargs,
@@ -1429,6 +1541,9 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase):
request_model=EmptyRequest,
response_model=KlingVirtualTryOnResponse,
),
result_url_extractor=get_images_urls_from_response,
estimated_duration=AVERAGE_DURATION_VIRTUAL_TRY_ON,
node_id=node_id,
)
def api_call(
@@ -1436,6 +1551,7 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase):
human_image: torch.Tensor,
cloth_image: torch.Tensor,
model_name: KlingVirtualTryOnModelName,
unique_id: Optional[str] = None,
**kwargs,
):
initial_operation = SynchronousOperation(
@@ -1457,7 +1573,9 @@ class KlingVirtualTryOnNode(KlingImageGenerationBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_image_result_response(final_response)
images = get_images_from_response(final_response)
@@ -1528,13 +1646,17 @@ class KlingImageGenerationNode(KlingImageGenerationBase):
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
DESCRIPTION = "Kling Image Generation Node. Generate an image from a text prompt with an optional reference image."
def get_response(
self, task_id: str, auth_kwargs: Optional[dict[str,str]] = None
self,
task_id: str,
auth_kwargs: Optional[dict[str, str]],
node_id: Optional[str] = None,
) -> KlingImageGenerationsResponse:
return poll_until_finished(
auth_kwargs,
@@ -1544,6 +1666,9 @@ class KlingImageGenerationNode(KlingImageGenerationBase):
request_model=EmptyRequest,
response_model=KlingImageGenerationsResponse,
),
result_url_extractor=get_images_urls_from_response,
estimated_duration=AVERAGE_DURATION_IMAGE_GEN,
node_id=node_id,
)
def api_call(
@@ -1557,6 +1682,7 @@ class KlingImageGenerationNode(KlingImageGenerationBase):
n: int,
aspect_ratio: KlingImageGenAspectRatio,
image: Optional[torch.Tensor] = None,
unique_id: Optional[str] = None,
**kwargs,
):
self.validate_prompt(prompt, negative_prompt)
@@ -1589,7 +1715,9 @@ class KlingImageGenerationNode(KlingImageGenerationBase):
validate_task_creation_response(task_creation_response)
task_id = task_creation_response.data.task_id
final_response = self.get_response(task_id, auth_kwargs=kwargs)
final_response = self.get_response(
task_id, auth_kwargs=kwargs, node_id=unique_id
)
validate_image_result_response(final_response)
images = get_images_from_response(final_response)