Hey all, here is a simple diff to add mochi support to the clip loader. Makes it very viable on dual 3090/4090 setups to do mochi with the full model and full t5 large:
diff --git a/__init__.py b/__init__.py
index 8a3ee41..6bc939f 100644
--- a/__init__.py
+++ b/__init__.py
@@ -203,7 +203,7 @@ class CLIPLoaderMultiGPU:
"required": {
"clip_name": (folder_paths.get_filename_list("clip"),),
"type": (
- ["stable_diffusion", "stable_cascade", "sd3", "stable_audio"],
+ ["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi"],
),
"device": ([f"cuda:{i}" for i in range(torch.cuda.device_count())],),
}
@@ -223,6 +223,8 @@ class CLIPLoaderMultiGPU:
clip_type = comfy.sd.CLIPType.SD3
elif type == "stable_audio":
clip_type = comfy.sd.CLIPType.STABLE_AUDIO
+ elif type == "mochi":
+ clip_type = comfy.sd.CLIPType.MOCHI
else:
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
Hey all, here is a simple diff to add mochi support to the clip loader. Makes it very viable on dual 3090/4090 setups to do mochi with the full model and full t5 large: