From 3760d74005a6954f54657dc59d9e57fd4c44b3fd Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Sun, 8 Feb 2026 07:34:52 +0800 Subject: [PATCH 1/2] chore: update embedded docs to v0.4.1 (#12346) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 41cc9174b1c4..5e34a2a494aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ comfyui-frontend-package==1.38.13 comfyui-workflow-templates==0.8.31 -comfyui-embedded-docs==0.4.0 +comfyui-embedded-docs==0.4.1 torch torchsde torchvision From f350a842611f4d75da7104c2d2965f45989089b9 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 7 Feb 2026 16:16:28 -0800 Subject: [PATCH 2/2] Disable prompt weights for ltxv2. (#12354) --- comfy/text_encoders/lt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/text_encoders/lt.py b/comfy/text_encoders/lt.py index 26573fb12059..3f87dfd6a065 100644 --- a/comfy/text_encoders/lt.py +++ b/comfy/text_encoders/lt.py @@ -25,7 +25,7 @@ def ltxv_te(*args, **kwargs): class Gemma3_12BTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer = tokenizer_data.get("spiece_model", None) - super().__init__(tokenizer, pad_with_end=False, embedding_size=3840, embedding_key='gemma3_12b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False}, tokenizer_data=tokenizer_data) + super().__init__(tokenizer, pad_with_end=False, embedding_size=3840, embedding_key='gemma3_12b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, disable_weights=True, tokenizer_args={"add_bos": True, "add_eos": False}, tokenizer_data=tokenizer_data) def state_dict(self): return {"spiece_model": self.tokenizer.serialize_model()}