Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
fofr /wan2.1-with-lora:1bebc32a
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/wan2.1-with-lora using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/wan2.1-with-lora:1bebc32a3640c7ce70c520728365c9f939bac5805b6c76d8f6f9ffbcaf77a564",
{
input: {
image: "https://replicate.delivery/pbxt/Mfw3oD2JVarh7yhoZvMZc58dl9ORuNlSD6wCSM9oaTyQu3pZ/th6sw28afnrma0cmq5hbtrd4h8%20%281%29.png",
model: "14b",
frames: 81,
prompt: "In the video, a miniature pikachu is presented. The pikachu is held in a person's hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.",
lora_url: "https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors",
fast_mode: "Balanced",
resolution: "480p",
aspect_ratio: "16:9",
sample_shift: 8,
sample_steps: 30,
negative_prompt: "",
lora_strength_clip: 1,
sample_guide_scale: 5,
lora_strength_model: 1
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/wan2.1-with-lora using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/wan2.1-with-lora:1bebc32a3640c7ce70c520728365c9f939bac5805b6c76d8f6f9ffbcaf77a564",
input={
"image": "https://replicate.delivery/pbxt/Mfw3oD2JVarh7yhoZvMZc58dl9ORuNlSD6wCSM9oaTyQu3pZ/th6sw28afnrma0cmq5hbtrd4h8%20%281%29.png",
"model": "14b",
"frames": 81,
"prompt": "In the video, a miniature pikachu is presented. The pikachu is held in a person's hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.",
"lora_url": "https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/wan2.1-with-lora using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/wan2.1-with-lora:1bebc32a3640c7ce70c520728365c9f939bac5805b6c76d8f6f9ffbcaf77a564",
"input": {
"image": "https://replicate.delivery/pbxt/Mfw3oD2JVarh7yhoZvMZc58dl9ORuNlSD6wCSM9oaTyQu3pZ/th6sw28afnrma0cmq5hbtrd4h8%20%281%29.png",
"model": "14b",
"frames": 81,
"prompt": "In the video, a miniature pikachu is presented. The pikachu is held in a person\'s hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.",
"lora_url": "https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicateβs HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2025-03-17T13:52:09.478085Z",
"created_at": "2025-03-17T13:48:41.426000Z",
"data_removed": false,
"error": null,
"id": "wazymds0a9rmc0cnmherdhan8m",
"input": {
"image": "https://replicate.delivery/pbxt/Mfw3oD2JVarh7yhoZvMZc58dl9ORuNlSD6wCSM9oaTyQu3pZ/th6sw28afnrma0cmq5hbtrd4h8%20%281%29.png",
"model": "14b",
"frames": 81,
"prompt": "In the video, a miniature pikachu is presented. The pikachu is held in a person's hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.",
"lora_url": "https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 1
},
"logs": "Random seed set to: 4196143057\n{'3': {'inputs': {'seed': 4196143057, 'steps': 30, 'cfg': 5.0, 'sampler_name': 'uni_pc', 'scheduler': 'simple', 'denoise': 1, 'model': ['53', 0], 'positive': ['58', 0], 'negative': ['58', 1], 'latent_image': ['58', 2]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '6': {'inputs': {'text': \"In the video, a miniature pikachu is presented. The pikachu is held in a person's hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.\", 'clip': ['49', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Positive Prompt)'}}, '7': {'inputs': {'text': 'nsfw, ', 'clip': ['38', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Negative Prompt)'}}, '8': {'inputs': {'samples': ['3', 0], 'vae': ['39', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '37': {'inputs': {'unet_name': 'wan2.1_i2v_480p_14B_bf16.safetensors', 'weight_dtype': 'default'}, 'class_type': 'UNETLoader', '_meta': {'title': 'Load Diffusion Model'}}, '38': {'inputs': {'clip_name': 'umt5_xxl_fp16.safetensors', 'type': 'wan', 'device': 'default'}, 'class_type': 'CLIPLoader', '_meta': {'title': 'Load CLIP'}}, '39': {'inputs': {'vae_name': 'wan_2.1_vae.safetensors'}, 'class_type': 'VAELoader', '_meta': {'title': 'Load VAE'}}, '48': {'inputs': {'shift': 8.0, 'model': ['49', 0]}, 'class_type': 'ModelSamplingSD3', '_meta': {'title': 'ModelSamplingSD3'}}, '49': {'inputs': {'lora_name': 'https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors', 'strength_model': 1.0, 'strength_clip': 1.0, 'model': ['54', 0], 'clip': ['38', 0]}, 'class_type': 'LoraLoader', '_meta': {'title': 'Load LoRA'}}, '50': {'inputs': {'frame_rate': 16, 'loop_count': 0, 'filename_prefix': 'R8_Wan', 'format': 'video/h264-mp4', 'pix_fmt': 'yuv420p', 'crf': 19, 'save_metadata': False, 'trim_to_audio': False, 'pingpong': False, 'save_output': True, 'images': ['8', 0]}, 'class_type': 'VHS_VideoCombine', '_meta': {'title': 'Video Combine π₯π
₯π
π
’'}}, '53': {'inputs': {'weight': 0.2, 'model': ['48', 0], 'latent': ['58', 2]}, 'class_type': 'WanVideoEnhanceAVideoKJ', '_meta': {'title': 'WanVideo Enhance A Video (native)'}}, '54': {'inputs': {'rel_l1_thresh': 0.19, 'start_percent': 0.1, 'end_percent': 1, 'cache_device': 'offload_device', 'coefficients': 'i2v_480', 'model': ['37', 0]}, 'class_type': 'WanVideoTeaCacheKJ', '_meta': {'title': 'WanVideo Tea Cache (native)'}}, '55': {'inputs': {'image': 'image.png'}, 'class_type': 'LoadImage', '_meta': {'title': 'Load Image'}}, '56': {'inputs': {'target_size': 644, 'multiple_of': 28, 'image': ['55', 0]}, 'class_type': 'Width and height for scaling image to ideal resolution πͺ΄', '_meta': {'title': 'Width and height for scaling image to ideal resolution πͺ΄'}}, '57': {'inputs': {'width': ['56', 0], 'height': ['56', 1], 'interpolation': 'lanczos', 'method': 'stretch', 'condition': 'always', 'multiple_of': 0, 'image': ['55', 0]}, 'class_type': 'ImageResize+', '_meta': {'title': 'π§ Image Resize'}}, '58': {'inputs': {'width': ['56', 0], 'height': ['56', 1], 'length': 81, 'batch_size': 1, 'positive': ['6', 0], 'negative': ['7', 0], 'vae': ['39', 0], 'clip_vision_output': ['59', 0], 'start_image': ['57', 0]}, 'class_type': 'WanImageToVideo', '_meta': {'title': 'WanImageToVideo'}}, '59': {'inputs': {'crop': 'none', 'clip_vision': ['60', 0], 'image': ['57', 0]}, 'class_type': 'CLIPVisionEncode', '_meta': {'title': 'CLIP Vision Encode'}}, '60': {'inputs': {'clip_name': 'clip_vision_h.safetensors'}, 'class_type': 'CLIPVisionLoader', '_meta': {'title': 'Load CLIP Vision'}}}\nChecking inputs\nβ
/tmp/inputs/image.png\n====================================\nChecking weights\nConverting LoraLoader node 49 to LoraLoaderFromURL\nβ
wan_2.1_vae.safetensors exists in ComfyUI/models/vae\nβ
umt5_xxl_fp16.safetensors exists in ComfyUI/models/text_encoders\nβ
clip_vision_h.safetensors exists in ComfyUI/models/clip_vision\nβ³ Downloading wan2.1_i2v_480p_14B_bf16.safetensors to ComfyUI/models/diffusion_models\nβ
wan2.1_i2v_480p_14B_bf16.safetensors downloaded to ComfyUI/models/diffusion_models in 16.97s, size: 31270.88MB\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 39, title: Load VAE, class type: VAELoader\n[ComfyUI] Using pytorch attention in VAE\n[ComfyUI] Using pytorch attention in VAE\n[ComfyUI] VAE load device: cuda:0, offload device: cpu, dtype: torch.bfloat16\nExecuting node 55, title: Load Image, class type: LoadImage\nExecuting node 56, title: Width and height for scaling image to ideal resolution πͺ΄, class type: Width and height for scaling image to ideal resolution πͺ΄\nExecuting node 57, title: π§ Image Resize, class type: ImageResize+\nExecuting node 60, title: Load CLIP Vision, class type: CLIPVisionLoader\n[ComfyUI] Requested to load CLIPVisionModelProjection\nExecuting node 59, title: CLIP Vision Encode, class type: CLIPVisionEncode\n[ComfyUI] loaded completely 141327.4875 1208.09814453125 True\nExecuting node 38, title: Load CLIP, class type: CLIPLoader\n[ComfyUI] CLIP/text encoder model load device: cuda:0, offload device: cpu, current: cpu, dtype: torch.float16\n[ComfyUI] Requested to load WanTEModel\nExecuting node 7, title: CLIP Text Encode (Negative Prompt), class type: CLIPTextEncode\n[ComfyUI] loaded completely 139855.3869140625 10835.4765625 True\nExecuting node 37, title: Load Diffusion Model, class type: UNETLoader\n[ComfyUI] model weight dtype torch.float16, manual cast: None\n[ComfyUI] model_type FLOW\nExecuting node 54, title: WanVideo Tea Cache (native), class type: WanVideoTeaCacheKJ\nExecuting node 49, title: Load LoRA, class type: LoraLoaderFromURL\n[ComfyUI] Requested to load WanTEModel\nExecuting node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode\n[ComfyUI] loaded completely 139853.3869140625 10835.4765625 True\nExecuting node 58, title: WanImageToVideo, class type: WanImageToVideo\n[ComfyUI] Requested to load WanVAE\n[ComfyUI] loaded completely 125293.1650352478 242.02829551696777 True\nExecuting node 48, title: ModelSamplingSD3, class type: ModelSamplingSD3\nExecuting node 53, title: WanVideo Enhance A Video (native), class type: WanVideoEnhanceAVideoKJ\nExecuting node 3, title: KSampler, class type: KSampler\n[ComfyUI] Requested to load WAN21\n[ComfyUI] loaded completely 122924.54113397522 31269.802368164062 True\n[ComfyUI] Creating huggingface_cache directory within comfy\n[ComfyUI]\n[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]\n[ComfyUI] 3%|β | 1/30 [00:07<03:46, 7.80s/it]\n[ComfyUI] 7%|β | 2/30 [00:17<04:07, 8.82s/it]\n[ComfyUI] 10%|β | 3/30 [00:26<04:07, 9.17s/it]\n[ComfyUI] TeaCache: Initialized\n[ComfyUI]\n[ComfyUI] 13%|ββ | 4/30 [00:38<04:25, 10.23s/it]\n[ComfyUI] 20%|ββ | 6/30 [00:48<02:58, 7.44s/it]\n[ComfyUI] 23%|βββ | 7/30 [00:58<03:06, 8.12s/it]\n[ComfyUI] 30%|βββ | 9/30 [01:08<02:21, 6.74s/it]\n[ComfyUI] 37%|ββββ | 11/30 [01:18<01:55, 6.06s/it]\n[ComfyUI] 43%|βββββ | 13/30 [01:28<01:36, 5.68s/it]\n[ComfyUI] 50%|βββββ | 15/30 [01:38<01:21, 5.44s/it]\n[ComfyUI] 57%|ββββββ | 17/30 [01:48<01:08, 5.30s/it]\n[ComfyUI] 63%|βββββββ | 19/30 [01:58<00:57, 5.20s/it]\n[ComfyUI] 70%|βββββββ | 21/30 [02:08<00:46, 5.13s/it]\n[ComfyUI] 77%|ββββββββ | 23/30 [02:08<00:25, 3.57s/it]\n[ComfyUI] 80%|ββββββββ | 24/30 [02:18<00:28, 4.71s/it]\n[ComfyUI] 87%|βββββββββ | 26/30 [02:18<00:12, 3.13s/it]\n[ComfyUI] 90%|βββββββββ | 27/30 [02:28<00:13, 4.45s/it]\n[ComfyUI] 97%|ββββββββββ| 29/30 [02:38<00:04, 4.64s/it]\n[ComfyUI] 100%|ββββββββββ| 30/30 [02:48<00:00, 5.71s/it]\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 50, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine\n[ComfyUI] 100%|ββββββββββ| 30/30 [02:48<00:00, 5.61s/it]\n[ComfyUI] Prompt executed in 190.75 seconds\noutputs: {'50': {'gifs': [{'filename': 'R8_Wan_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 16.0, 'workflow': 'R8_Wan_00001.png', 'fullpath': '/tmp/outputs/R8_Wan_00001.mp4'}]}}\n====================================\nR8_Wan_00001.png\nR8_Wan_00001.mp4",
"metrics": {
"predict_time": 208.044792774,
"total_time": 208.052085
},
"output": [
"https://replicate.delivery/xezq/OssFM33If0yBG6ODzomeaSi0jplJgF9B8VARAvSJLPGJsdZUA/R8_Wan_00001.mp4"
],
"started_at": "2025-03-17T13:48:41.433292Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-4ehygtgm3yhgebi4vxzz2kbkxviabjgsxv2i4vb3sxn5o5g6qkza",
"get": "https://api.replicate.com/v1/predictions/wazymds0a9rmc0cnmherdhan8m",
"cancel": "https://api.replicate.com/v1/predictions/wazymds0a9rmc0cnmherdhan8m/cancel",
"web": "https://replicate.com/p/wazymds0a9rmc0cnmherdhan8m"
},
"version": "1bebc32a3640c7ce70c520728365c9f939bac5805b6c76d8f6f9ffbcaf77a564"
}
Random seed set to: 4196143057
{'3': {'inputs': {'seed': 4196143057, 'steps': 30, 'cfg': 5.0, 'sampler_name': 'uni_pc', 'scheduler': 'simple', 'denoise': 1, 'model': ['53', 0], 'positive': ['58', 0], 'negative': ['58', 1], 'latent_image': ['58', 2]}, 'class_type': 'KSampler', '_meta': {'title': 'KSampler'}}, '6': {'inputs': {'text': "In the video, a miniature pikachu is presented. The pikachu is held in a person's hands. The person then presses on the pikachu, causing a sq41sh squish effect. The person keeps pressing down on the pikachu, further showing the sq41sh squish effect.", 'clip': ['49', 1]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Positive Prompt)'}}, '7': {'inputs': {'text': 'nsfw, ', 'clip': ['38', 0]}, 'class_type': 'CLIPTextEncode', '_meta': {'title': 'CLIP Text Encode (Negative Prompt)'}}, '8': {'inputs': {'samples': ['3', 0], 'vae': ['39', 0]}, 'class_type': 'VAEDecode', '_meta': {'title': 'VAE Decode'}}, '37': {'inputs': {'unet_name': 'wan2.1_i2v_480p_14B_bf16.safetensors', 'weight_dtype': 'default'}, 'class_type': 'UNETLoader', '_meta': {'title': 'Load Diffusion Model'}}, '38': {'inputs': {'clip_name': 'umt5_xxl_fp16.safetensors', 'type': 'wan', 'device': 'default'}, 'class_type': 'CLIPLoader', '_meta': {'title': 'Load CLIP'}}, '39': {'inputs': {'vae_name': 'wan_2.1_vae.safetensors'}, 'class_type': 'VAELoader', '_meta': {'title': 'Load VAE'}}, '48': {'inputs': {'shift': 8.0, 'model': ['49', 0]}, 'class_type': 'ModelSamplingSD3', '_meta': {'title': 'ModelSamplingSD3'}}, '49': {'inputs': {'lora_name': 'https://huggingface.co/Remade-AI/Squish/resolve/main/squish_18.safetensors', 'strength_model': 1.0, 'strength_clip': 1.0, 'model': ['54', 0], 'clip': ['38', 0]}, 'class_type': 'LoraLoader', '_meta': {'title': 'Load LoRA'}}, '50': {'inputs': {'frame_rate': 16, 'loop_count': 0, 'filename_prefix': 'R8_Wan', 'format': 'video/h264-mp4', 'pix_fmt': 'yuv420p', 'crf': 19, 'save_metadata': False, 'trim_to_audio': False, 'pingpong': False, 'save_output': True, 'images': ['8', 0]}, 'class_type': 'VHS_VideoCombine', '_meta': {'title': 'Video Combine π₯π
₯π
π
’'}}, '53': {'inputs': {'weight': 0.2, 'model': ['48', 0], 'latent': ['58', 2]}, 'class_type': 'WanVideoEnhanceAVideoKJ', '_meta': {'title': 'WanVideo Enhance A Video (native)'}}, '54': {'inputs': {'rel_l1_thresh': 0.19, 'start_percent': 0.1, 'end_percent': 1, 'cache_device': 'offload_device', 'coefficients': 'i2v_480', 'model': ['37', 0]}, 'class_type': 'WanVideoTeaCacheKJ', '_meta': {'title': 'WanVideo Tea Cache (native)'}}, '55': {'inputs': {'image': 'image.png'}, 'class_type': 'LoadImage', '_meta': {'title': 'Load Image'}}, '56': {'inputs': {'target_size': 644, 'multiple_of': 28, 'image': ['55', 0]}, 'class_type': 'Width and height for scaling image to ideal resolution πͺ΄', '_meta': {'title': 'Width and height for scaling image to ideal resolution πͺ΄'}}, '57': {'inputs': {'width': ['56', 0], 'height': ['56', 1], 'interpolation': 'lanczos', 'method': 'stretch', 'condition': 'always', 'multiple_of': 0, 'image': ['55', 0]}, 'class_type': 'ImageResize+', '_meta': {'title': 'π§ Image Resize'}}, '58': {'inputs': {'width': ['56', 0], 'height': ['56', 1], 'length': 81, 'batch_size': 1, 'positive': ['6', 0], 'negative': ['7', 0], 'vae': ['39', 0], 'clip_vision_output': ['59', 0], 'start_image': ['57', 0]}, 'class_type': 'WanImageToVideo', '_meta': {'title': 'WanImageToVideo'}}, '59': {'inputs': {'crop': 'none', 'clip_vision': ['60', 0], 'image': ['57', 0]}, 'class_type': 'CLIPVisionEncode', '_meta': {'title': 'CLIP Vision Encode'}}, '60': {'inputs': {'clip_name': 'clip_vision_h.safetensors'}, 'class_type': 'CLIPVisionLoader', '_meta': {'title': 'Load CLIP Vision'}}}
Checking inputs
β
/tmp/inputs/image.png
====================================
Checking weights
Converting LoraLoader node 49 to LoraLoaderFromURL
β
wan_2.1_vae.safetensors exists in ComfyUI/models/vae
β
umt5_xxl_fp16.safetensors exists in ComfyUI/models/text_encoders
β
clip_vision_h.safetensors exists in ComfyUI/models/clip_vision
β³ Downloading wan2.1_i2v_480p_14B_bf16.safetensors to ComfyUI/models/diffusion_models
β
wan2.1_i2v_480p_14B_bf16.safetensors downloaded to ComfyUI/models/diffusion_models in 16.97s, size: 31270.88MB
====================================
Running workflow
[ComfyUI] got prompt
Executing node 39, title: Load VAE, class type: VAELoader
[ComfyUI] Using pytorch attention in VAE
[ComfyUI] Using pytorch attention in VAE
[ComfyUI] VAE load device: cuda:0, offload device: cpu, dtype: torch.bfloat16
Executing node 55, title: Load Image, class type: LoadImage
Executing node 56, title: Width and height for scaling image to ideal resolution πͺ΄, class type: Width and height for scaling image to ideal resolution πͺ΄
Executing node 57, title: π§ Image Resize, class type: ImageResize+
Executing node 60, title: Load CLIP Vision, class type: CLIPVisionLoader
[ComfyUI] Requested to load CLIPVisionModelProjection
Executing node 59, title: CLIP Vision Encode, class type: CLIPVisionEncode
[ComfyUI] loaded completely 141327.4875 1208.09814453125 True
Executing node 38, title: Load CLIP, class type: CLIPLoader
[ComfyUI] CLIP/text encoder model load device: cuda:0, offload device: cpu, current: cpu, dtype: torch.float16
[ComfyUI] Requested to load WanTEModel
Executing node 7, title: CLIP Text Encode (Negative Prompt), class type: CLIPTextEncode
[ComfyUI] loaded completely 139855.3869140625 10835.4765625 True
Executing node 37, title: Load Diffusion Model, class type: UNETLoader
[ComfyUI] model weight dtype torch.float16, manual cast: None
[ComfyUI] model_type FLOW
Executing node 54, title: WanVideo Tea Cache (native), class type: WanVideoTeaCacheKJ
Executing node 49, title: Load LoRA, class type: LoraLoaderFromURL
[ComfyUI] Requested to load WanTEModel
Executing node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode
[ComfyUI] loaded completely 139853.3869140625 10835.4765625 True
Executing node 58, title: WanImageToVideo, class type: WanImageToVideo
[ComfyUI] Requested to load WanVAE
[ComfyUI] loaded completely 125293.1650352478 242.02829551696777 True
Executing node 48, title: ModelSamplingSD3, class type: ModelSamplingSD3
Executing node 53, title: WanVideo Enhance A Video (native), class type: WanVideoEnhanceAVideoKJ
Executing node 3, title: KSampler, class type: KSampler
[ComfyUI] Requested to load WAN21
[ComfyUI] loaded completely 122924.54113397522 31269.802368164062 True
[ComfyUI] Creating huggingface_cache directory within comfy
[ComfyUI]
[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]
[ComfyUI] 3%|β | 1/30 [00:07<03:46, 7.80s/it]
[ComfyUI] 7%|β | 2/30 [00:17<04:07, 8.82s/it]
[ComfyUI] 10%|β | 3/30 [00:26<04:07, 9.17s/it]
[ComfyUI] TeaCache: Initialized
[ComfyUI]
[ComfyUI] 13%|ββ | 4/30 [00:38<04:25, 10.23s/it]
[ComfyUI] 20%|ββ | 6/30 [00:48<02:58, 7.44s/it]
[ComfyUI] 23%|βββ | 7/30 [00:58<03:06, 8.12s/it]
[ComfyUI] 30%|βββ | 9/30 [01:08<02:21, 6.74s/it]
[ComfyUI] 37%|ββββ | 11/30 [01:18<01:55, 6.06s/it]
[ComfyUI] 43%|βββββ | 13/30 [01:28<01:36, 5.68s/it]
[ComfyUI] 50%|βββββ | 15/30 [01:38<01:21, 5.44s/it]
[ComfyUI] 57%|ββββββ | 17/30 [01:48<01:08, 5.30s/it]
[ComfyUI] 63%|βββββββ | 19/30 [01:58<00:57, 5.20s/it]
[ComfyUI] 70%|βββββββ | 21/30 [02:08<00:46, 5.13s/it]
[ComfyUI] 77%|ββββββββ | 23/30 [02:08<00:25, 3.57s/it]
[ComfyUI] 80%|ββββββββ | 24/30 [02:18<00:28, 4.71s/it]
[ComfyUI] 87%|βββββββββ | 26/30 [02:18<00:12, 3.13s/it]
[ComfyUI] 90%|βββββββββ | 27/30 [02:28<00:13, 4.45s/it]
[ComfyUI] 97%|ββββββββββ| 29/30 [02:38<00:04, 4.64s/it]
[ComfyUI] 100%|ββββββββββ| 30/30 [02:48<00:00, 5.71s/it]
Executing node 8, title: VAE Decode, class type: VAEDecode
Executing node 50, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine
[ComfyUI] 100%|ββββββββββ| 30/30 [02:48<00:00, 5.61s/it]
[ComfyUI] Prompt executed in 190.75 seconds
outputs: {'50': {'gifs': [{'filename': 'R8_Wan_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 16.0, 'workflow': 'R8_Wan_00001.png', 'fullpath': '/tmp/outputs/R8_Wan_00001.mp4'}]}}
====================================
R8_Wan_00001.png
R8_Wan_00001.mp4