1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 23:14:49 +08:00

Make it possible to load tokenizer data from checkpoints.

This commit is contained in:
comfyanonymous
2024-07-24 16:43:53 -04:00
parent ce80e69fb8
commit 10c919f4c7
8 changed files with 26 additions and 31 deletions

View File

@@ -60,7 +60,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
class CLIP:
def __init__(self, target=None, embedding_directory=None, no_init=False):
def __init__(self, target=None, embedding_directory=None, no_init=False, tokenizer_data={}):
if no_init:
return
params = target.params.copy()
@@ -79,7 +79,7 @@ class CLIP:
if not model_management.supports_cast(load_device, dt):
load_device = offload_device
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
self.patcher = comfy.model_patcher.ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
self.layer_idx = None
logging.debug("CLIP model load device: {}, offload device: {}".format(load_device, offload_device))
@@ -520,7 +520,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
if clip_target is not None:
clip_sd = model_config.process_clip_state_dict(sd)
if len(clip_sd) > 0:
clip = CLIP(clip_target, embedding_directory=embedding_directory)
clip = CLIP(clip_target, embedding_directory=embedding_directory, tokenizer_data=clip_sd)
m, u = clip.load_sd(clip_sd, full_model=True)
if len(m) > 0:
m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m))