mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-02 23:14:49 +08:00
Try to keep text encoders loaded and patched to increase speed.
load_model_gpu() is now used with the text encoder models instead of just the unet.
This commit is contained in:
@@ -112,11 +112,9 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
||||
tokens = torch.LongTensor(tokens).to(device)
|
||||
|
||||
if backup_embeds.weight.dtype != torch.float32:
|
||||
print("autocast clip")
|
||||
precision_scope = torch.autocast
|
||||
else:
|
||||
precision_scope = contextlib.nullcontext
|
||||
print("no autocast clip")
|
||||
|
||||
with precision_scope(model_management.get_autocast_device(device)):
|
||||
outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
|
||||
|
Reference in New Issue
Block a user