Readme
This model doesn't have a readme.
Never Gonna AI-Clone You Up
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run jbilcke/hunyan-video-lora-rickroll using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"jbilcke/hunyan-video-lora-rickroll:cb308143bbe759532a715b7ea14274504182453b3a42a603ad278e48bccb9897",
{
input: {
crf: 19,
steps: 50,
width: 640,
height: 360,
prompt: "In the style of RCKRL. Music video, medium-shot of a sunlit young american woman with red pompadour hair, sunglasses, pastel blue shirt and jeans, he is singing and dancing inside the NYC metro subway, camera is moving horizontally, back and forth.",
lora_url: "",
scheduler: "DPMSolverMultistepScheduler",
flow_shift: 9,
frame_rate: 24,
num_frames: 85,
enhance_end: 1,
enhance_start: 0,
force_offload: true,
lora_strength: 1,
enhance_double: true,
enhance_single: true,
enhance_weight: 0.3,
guidance_scale: 6,
denoise_strength: 1
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run jbilcke/hunyan-video-lora-rickroll using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"jbilcke/hunyan-video-lora-rickroll:cb308143bbe759532a715b7ea14274504182453b3a42a603ad278e48bccb9897",
input={
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "In the style of RCKRL. Music video, medium-shot of a sunlit young american woman with red pompadour hair, sunglasses, pastel blue shirt and jeans, he is singing and dancing inside the NYC metro subway, camera is moving horizontally, back and forth.",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": True,
"lora_strength": 1,
"enhance_double": True,
"enhance_single": True,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run jbilcke/hunyan-video-lora-rickroll using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "jbilcke/hunyan-video-lora-rickroll:cb308143bbe759532a715b7ea14274504182453b3a42a603ad278e48bccb9897",
"input": {
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "In the style of RCKRL. Music video, medium-shot of a sunlit young american woman with red pompadour hair, sunglasses, pastel blue shirt and jeans, he is singing and dancing inside the NYC metro subway, camera is moving horizontally, back and forth.",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": true,
"lora_strength": 1,
"enhance_double": true,
"enhance_single": true,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2025-01-08T13:57:03.478316Z",
"created_at": "2025-01-08T13:53:02.251000Z",
"data_removed": false,
"error": null,
"id": "b155c6db5drm80cm8rrbwfj47w",
"input": {
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "In the style of RCKRL. Music video, medium-shot of a sunlit young american woman with red pompadour hair, sunglasses, pastel blue shirt and jeans, he is singing and dancing inside the NYC metro subway, camera is moving horizontally, back and forth.",
"lora_url": "",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"force_offload": true,
"lora_strength": 1,
"guidance_scale": 6,
"denoise_strength": 1
},
"logs": "Random seed set to: 2329906906\nChecking inputs\n====================================\nChecking weights\n✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae\n✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader\n[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\nExecuting node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\n[ComfyUI]\n[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]\n[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:02, 1.46it/s]\n[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.64it/s]\n[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.70it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.52it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.10it/s]\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\nExecuting node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode\n[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 57\n[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 59\nExecuting node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect\nExecuting node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader\n[ComfyUI] model_type FLOW\n[ComfyUI] Using accelerate to load and assign model weights to device...\n[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0\n[ComfyUI] Requested to load HyVideoModel\n[ComfyUI] Loading 1 new model\n[ComfyUI] loaded completely 0.0 12555.953247070312 True\n[ComfyUI] Input (height, width, video_length) = (360, 640, 85)\nExecuting node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler\n[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps\n[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])\n[ComfyUI]\n[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]\n[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.63s/it]\n[ComfyUI] 4%|▍ | 2/50 [00:06<02:29, 3.12s/it]\n[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.28s/it]\n[ComfyUI] 8%|▊ | 4/50 [00:13<02:33, 3.34s/it]\n[ComfyUI] 10%|█ | 5/50 [00:16<02:32, 3.39s/it]\n[ComfyUI] 12%|█▏ | 6/50 [00:19<02:30, 3.41s/it]\n[ComfyUI] 14%|█▍ | 7/50 [00:23<02:27, 3.43s/it]\n[ComfyUI] 16%|█▌ | 8/50 [00:26<02:24, 3.44s/it]\n[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.45s/it]\n[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.45s/it]\n[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]\n[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]\n[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]\n[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]\n[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]\n[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]\n[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]\n[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]\n[ComfyUI] 38%|███▊ | 19/50 [01:04<01:47, 3.47s/it]\n[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.47s/it]\n[ComfyUI] 42%|████▏ | 21/50 [01:11<01:40, 3.47s/it]\n[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.47s/it]\n[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.47s/it]\n[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.47s/it]\n[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]\n[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.47s/it]\n[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.47s/it]\n[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.47s/it]\n[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]\n[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.47s/it]\n[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.47s/it]\n[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.47s/it]\n[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]\n[ComfyUI] 68%|██████▊ | 34/50 [01:56<00:55, 3.47s/it]\n[ComfyUI] 70%|███████ | 35/50 [02:00<00:52, 3.47s/it]\n[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]\n[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]\n[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]\n[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]\n[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.47s/it]\n[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.47s/it]\n[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.47s/it]\n[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.47s/it]\n[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.47s/it]\n[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]\n[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.47s/it]\n[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]\n[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.47s/it]\n[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]\n[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]\n[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]\n[ComfyUI] Allocated memory: memory=12.759 GB\n[ComfyUI] Max allocated memory: max_memory=16.810 GB\n[ComfyUI] Max reserved memory: max_reserved=18.688 GB\nExecuting node 5, title: HunyuanVideo Decode, class type: HyVideoDecode\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.50s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.29s/it]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.41it/s]\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.41it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.37it/s]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]\nExecuting node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.88it/s]\n[ComfyUI] Prompt executed in 208.31 seconds\noutputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}\n====================================\nHunyuanVideo_00001.png\nHunyuanVideo_00001.mp4",
"metrics": {
"predict_time": 214.262533471,
"total_time": 241.227316
},
"output": "https://replicate.delivery/xezq/XRse3ArjG23ex0HemLcabKwMbAiogjTU0jg4mFCeraO8iNMQB/HunyuanVideo_00001.mp4",
"started_at": "2025-01-08T13:53:29.215782Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-45gopx45xmitmwgsngbwscpt53yh6ziqmifflmvd57aqhl6gsqja",
"get": "https://api.replicate.com/v1/predictions/b155c6db5drm80cm8rrbwfj47w",
"cancel": "https://api.replicate.com/v1/predictions/b155c6db5drm80cm8rrbwfj47w/cancel"
},
"version": "cb308143bbe759532a715b7ea14274504182453b3a42a603ad278e48bccb9897"
}
Random seed set to: 2329906906
Checking inputs
====================================
Checking weights
✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae
✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models
====================================
Running workflow
[ComfyUI] got prompt
Executing node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader
[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
Executing node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
[ComfyUI]
[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:02, 1.46it/s]
[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.64it/s]
[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.70it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.52it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.10it/s]
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
Executing node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode
[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 57
[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 59
Executing node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect
Executing node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader
[ComfyUI] model_type FLOW
[ComfyUI] Using accelerate to load and assign model weights to device...
[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0
[ComfyUI] Requested to load HyVideoModel
[ComfyUI] Loading 1 new model
[ComfyUI] loaded completely 0.0 12555.953247070312 True
[ComfyUI] Input (height, width, video_length) = (360, 640, 85)
Executing node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler
[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps
[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])
[ComfyUI]
[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]
[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.63s/it]
[ComfyUI] 4%|▍ | 2/50 [00:06<02:29, 3.12s/it]
[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.28s/it]
[ComfyUI] 8%|▊ | 4/50 [00:13<02:33, 3.34s/it]
[ComfyUI] 10%|█ | 5/50 [00:16<02:32, 3.39s/it]
[ComfyUI] 12%|█▏ | 6/50 [00:19<02:30, 3.41s/it]
[ComfyUI] 14%|█▍ | 7/50 [00:23<02:27, 3.43s/it]
[ComfyUI] 16%|█▌ | 8/50 [00:26<02:24, 3.44s/it]
[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.45s/it]
[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.45s/it]
[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]
[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]
[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]
[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]
[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]
[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]
[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]
[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]
[ComfyUI] 38%|███▊ | 19/50 [01:04<01:47, 3.47s/it]
[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.47s/it]
[ComfyUI] 42%|████▏ | 21/50 [01:11<01:40, 3.47s/it]
[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.47s/it]
[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.47s/it]
[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.47s/it]
[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]
[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.47s/it]
[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.47s/it]
[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.47s/it]
[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]
[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.47s/it]
[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.47s/it]
[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.47s/it]
[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]
[ComfyUI] 68%|██████▊ | 34/50 [01:56<00:55, 3.47s/it]
[ComfyUI] 70%|███████ | 35/50 [02:00<00:52, 3.47s/it]
[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]
[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]
[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]
[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]
[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.47s/it]
[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.47s/it]
[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.47s/it]
[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.47s/it]
[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.47s/it]
[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]
[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.47s/it]
[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]
[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.47s/it]
[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]
[ComfyUI] Allocated memory: memory=12.759 GB
[ComfyUI] Max allocated memory: max_memory=16.810 GB
[ComfyUI] Max reserved memory: max_reserved=18.688 GB
Executing node 5, title: HunyuanVideo Decode, class type: HyVideoDecode
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.50s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.29s/it]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.41it/s]
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.41it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.37it/s]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
Executing node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.88it/s]
[ComfyUI] Prompt executed in 208.31 seconds
outputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}
====================================
HunyuanVideo_00001.png
HunyuanVideo_00001.mp4
This model runs on Nvidia H100 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Random seed set to: 2329906906
Checking inputs
====================================
Checking weights
✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae
✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models
====================================
Running workflow
[ComfyUI] got prompt
Executing node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader
[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
Executing node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
[ComfyUI]
[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:02, 1.46it/s]
[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.64it/s]
[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.70it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.52it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.10it/s]
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
Executing node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode
[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 57
[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 59
Executing node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect
Executing node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader
[ComfyUI] model_type FLOW
[ComfyUI] Using accelerate to load and assign model weights to device...
[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0
[ComfyUI] Requested to load HyVideoModel
[ComfyUI] Loading 1 new model
[ComfyUI] loaded completely 0.0 12555.953247070312 True
[ComfyUI] Input (height, width, video_length) = (360, 640, 85)
Executing node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler
[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps
[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])
[ComfyUI]
[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]
[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.63s/it]
[ComfyUI] 4%|▍ | 2/50 [00:06<02:29, 3.12s/it]
[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.28s/it]
[ComfyUI] 8%|▊ | 4/50 [00:13<02:33, 3.34s/it]
[ComfyUI] 10%|█ | 5/50 [00:16<02:32, 3.39s/it]
[ComfyUI] 12%|█▏ | 6/50 [00:19<02:30, 3.41s/it]
[ComfyUI] 14%|█▍ | 7/50 [00:23<02:27, 3.43s/it]
[ComfyUI] 16%|█▌ | 8/50 [00:26<02:24, 3.44s/it]
[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.45s/it]
[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.45s/it]
[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]
[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]
[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]
[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]
[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]
[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]
[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]
[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]
[ComfyUI] 38%|███▊ | 19/50 [01:04<01:47, 3.47s/it]
[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.47s/it]
[ComfyUI] 42%|████▏ | 21/50 [01:11<01:40, 3.47s/it]
[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.47s/it]
[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.47s/it]
[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.47s/it]
[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]
[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.47s/it]
[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.47s/it]
[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.47s/it]
[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]
[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.47s/it]
[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.47s/it]
[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.47s/it]
[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]
[ComfyUI] 68%|██████▊ | 34/50 [01:56<00:55, 3.47s/it]
[ComfyUI] 70%|███████ | 35/50 [02:00<00:52, 3.47s/it]
[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]
[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]
[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]
[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]
[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.47s/it]
[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.47s/it]
[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.47s/it]
[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.47s/it]
[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.47s/it]
[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]
[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.47s/it]
[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]
[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.47s/it]
[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]
[ComfyUI] Allocated memory: memory=12.759 GB
[ComfyUI] Max allocated memory: max_memory=16.810 GB
[ComfyUI] Max reserved memory: max_reserved=18.688 GB
Executing node 5, title: HunyuanVideo Decode, class type: HyVideoDecode
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.50s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.29s/it]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.41it/s]
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.41it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.37it/s]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
Executing node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.88it/s]
[ComfyUI] Prompt executed in 208.31 seconds
outputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}
====================================
HunyuanVideo_00001.png
HunyuanVideo_00001.mp4