You're looking at a specific version of this model. Jump to the model overview.
lucataco /hunyuan-heygen-woman-2:923b4f49
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run lucataco/hunyuan-heygen-woman-2 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"lucataco/hunyuan-heygen-woman-2:923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4",
{
input: {
crf: 19,
steps: 30,
width: 960,
height: 544,
prompt: "HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression",
lora_url: "",
scheduler: "DPMSolverMultistepScheduler",
flow_shift: 9,
frame_rate: 20,
num_frames: 49,
enhance_end: 1,
enhance_start: 0,
force_offload: true,
lora_strength: 0.9,
enhance_double: true,
enhance_single: true,
enhance_weight: 0.3,
guidance_scale: 6,
denoise_strength: 1
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run lucataco/hunyuan-heygen-woman-2 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"lucataco/hunyuan-heygen-woman-2:923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4",
input={
"crf": 19,
"steps": 30,
"width": 960,
"height": 544,
"prompt": "HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 20,
"num_frames": 49,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": True,
"lora_strength": 0.9,
"enhance_double": True,
"enhance_single": True,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/hunyuan-heygen-woman-2 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4",
"input": {
"crf": 19,
"steps": 30,
"width": 960,
"height": 544,
"prompt": "HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 20,
"num_frames": 49,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": true,
"lora_strength": 0.9,
"enhance_double": true,
"enhance_single": true,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Pull and run lucataco/hunyuan-heygen-woman-2 using Cog (this will download the full model and run it in your local environment):
cog predict r8.im/lucataco/hunyuan-heygen-woman-2@sha256:923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4 \
-i 'crf=19' \
-i 'steps=30' \
-i 'width=960' \
-i 'height=544' \
-i 'prompt="HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression"' \
-i 'lora_url=""' \
-i 'scheduler="DPMSolverMultistepScheduler"' \
-i 'flow_shift=9' \
-i 'frame_rate=20' \
-i 'num_frames=49' \
-i 'enhance_end=1' \
-i 'enhance_start=0' \
-i 'force_offload=true' \
-i 'lora_strength=0.9' \
-i 'enhance_double=true' \
-i 'enhance_single=true' \
-i 'enhance_weight=0.3' \
-i 'guidance_scale=6' \
-i 'denoise_strength=1'
To learn more, take a look at the Cog documentation.
Pull and run lucataco/hunyuan-heygen-woman-2 using Docker (this will download the full model and run it in your local environment):
docker run -d -p 5000:5000 --gpus=all r8.im/lucataco/hunyuan-heygen-woman-2@sha256:923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "crf": 19, "steps": 30, "width": 960, "height": 544, "prompt": "HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression", "lora_url": "", "scheduler": "DPMSolverMultistepScheduler", "flow_shift": 9, "frame_rate": 20, "num_frames": 49, "enhance_end": 1, "enhance_start": 0, "force_offload": true, "lora_strength": 0.9, "enhance_double": true, "enhance_single": true, "enhance_weight": 0.3, "guidance_scale": 6, "denoise_strength": 1 } }' \ http://localhost:5000/predictions
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2025-01-13T18:03:26.723907Z",
"created_at": "2025-01-13T17:59:27.532000Z",
"data_removed": false,
"error": null,
"id": "v2yv7zgj5hrmc0cmc39a8ckf2r",
"input": {
"crf": 19,
"steps": 30,
"width": 960,
"height": 544,
"prompt": "HGW2 woman sitting on a beige couch in a well-decorated room. She is wearing a light-colored, long-sleeved turtleneck top and has long, straight brown hair. The couch is adorned with several throw pillows, each with a black and white geometric pattern. The background includes a wooden chair with a yellow cushion, a wooden side table, and a large mirror with a wooden frame. The room has a warm and cozy atmosphere, with soft lighting and a comfortable ambiance. The woman appears to be speaking or presenting something, as she is looking directly at the camera with a neutral expression",
"lora_url": "",
"flow_shift": 9,
"frame_rate": 20,
"num_frames": 49,
"force_offload": true,
"lora_strength": 0.9,
"guidance_scale": 6,
"denoise_strength": 1
},
"logs": "Random seed set to: 3841941661\nChecking inputs\n====================================\nChecking weights\n✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models\n✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader\n[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\nExecuting node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\n[ComfyUI]\n[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]\n[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:02, 1.43it/s]\n[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.23it/s]\n[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:02<00:00, 1.13it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:02<00:00, 1.57it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:02<00:00, 1.42it/s]\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\nExecuting node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode\n[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 123\n[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 77\nExecuting node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect\nExecuting node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader\n[ComfyUI] model_type FLOW\n[ComfyUI] Using accelerate to load and assign model weights to device...\n[ComfyUI] Loading LoRA: lora_comfyui with strength: 0.9\n[ComfyUI] Requested to load HyVideoModel\n[ComfyUI] Loading 1 new model\n[ComfyUI] loaded completely 0.0 12555.953247070312 True\n[ComfyUI] Input (height, width, video_length) = (544, 960, 49)\nExecuting node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler\n[ComfyUI] Sampling 49 frames in 13 latents at 960x544 with 30 inference steps\n[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])[ComfyUI]\n[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]\n[ComfyUI] 3%|▎ | 1/30 [00:04<01:58, 4.09s/it]\n[ComfyUI] 7%|▋ | 2/30 [00:09<02:17, 4.92s/it]\n[ComfyUI] 10%|█ | 3/30 [00:15<02:19, 5.18s/it]\n[ComfyUI] 13%|█▎ | 4/30 [00:20<02:17, 5.31s/it]\n[ComfyUI] 17%|█▋ | 5/30 [00:26<02:14, 5.38s/it]\n[ComfyUI] 20%|██ | 6/30 [00:31<02:10, 5.42s/it]\n[ComfyUI] 23%|██▎ | 7/30 [00:37<02:05, 5.45s/it]\n[ComfyUI] 27%|██▋ | 8/30 [00:42<02:00, 5.46s/it]\n[ComfyUI] 30%|███ | 9/30 [00:48<01:55, 5.48s/it]\n[ComfyUI] 33%|███▎ | 10/30 [00:53<01:49, 5.48s/it]\n[ComfyUI] 37%|███▋ | 11/30 [00:59<01:44, 5.49s/it]\n[ComfyUI] 40%|████ | 12/30 [01:04<01:38, 5.49s/it]\n[ComfyUI] 43%|████▎ | 13/30 [01:10<01:33, 5.50s/it]\n[ComfyUI] 47%|████▋ | 14/30 [01:15<01:27, 5.50s/it]\n[ComfyUI] 50%|█████ | 15/30 [01:21<01:22, 5.50s/it]\n[ComfyUI] 53%|█████▎ | 16/30 [01:26<01:17, 5.50s/it]\n[ComfyUI] 57%|█████▋ | 17/30 [01:32<01:11, 5.51s/it]\n[ComfyUI] 60%|██████ | 18/30 [01:37<01:06, 5.50s/it]\n[ComfyUI] 63%|██████▎ | 19/30 [01:43<01:00, 5.50s/it]\n[ComfyUI] 67%|██████▋ | 20/30 [01:48<00:55, 5.51s/it]\n[ComfyUI] 70%|███████ | 21/30 [01:54<00:49, 5.50s/it]\n[ComfyUI] 73%|███████▎ | 22/30 [01:59<00:44, 5.50s/it]\n[ComfyUI] 77%|███████▋ | 23/30 [02:05<00:38, 5.51s/it]\n[ComfyUI] 80%|████████ | 24/30 [02:10<00:33, 5.51s/it]\n[ComfyUI] 83%|████████▎ | 25/30 [02:16<00:27, 5.51s/it]\n[ComfyUI] 87%|████████▋ | 26/30 [02:21<00:22, 5.50s/it]\n[ComfyUI] 90%|█████████ | 27/30 [02:27<00:16, 5.50s/it]\n[ComfyUI] 93%|█████████▎| 28/30 [02:32<00:11, 5.50s/it]\n[ComfyUI] 97%|█████████▋| 29/30 [02:38<00:05, 5.50s/it]\n[ComfyUI] 100%|██████████| 30/30 [02:43<00:00, 5.50s/it]\n[ComfyUI] 100%|██████████| 30/30 [02:43<00:00, 5.46s/it]\n[ComfyUI] Allocated memory: memory=12.760 GB\n[ComfyUI] Max allocated memory: max_memory=18.839 GB\n[ComfyUI] Max reserved memory: max_reserved=20.719 GB\nExecuting node 5, title: HunyuanVideo Decode, class type: HyVideoDecode\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/3 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 33%|███▎ | 1/3 [00:01<00:03, 1.51s/it]\n[ComfyUI] Decoding rows: 67%|██████▋ | 2/3 [00:03<00:01, 1.62s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:04<00:00, 1.40s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:04<00:00, 1.45s/it]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/3 [00:00<?, ?it/s]\n[ComfyUI] Blending tiles: 33%|███▎ | 1/3 [00:00<00:00, 7.82it/s]\nExecuting node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] Blending tiles: 100%|██████████| 3/3 [00:00<00:00, 18.19it/s]\n[ComfyUI] Prompt executed in 201.71 seconds\noutputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 20.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}\n====================================\nHunyuanVideo_00001.png\nHunyuanVideo_00001.mp4",
"metrics": {
"predict_time": 208.047554521,
"total_time": 239.191907
},
"output": "https://replicate.delivery/xezq/zTp9KsCdeFyf0EsWVRwCtE1BLaQQDmuvBvP0sunEJBDudwEUA/HunyuanVideo_00001.mp4",
"started_at": "2025-01-13T17:59:58.676352Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-7cue56omya2vzamcrwojmpueok7ergmkfw3byomekeqclu46cdqq",
"get": "https://api.replicate.com/v1/predictions/v2yv7zgj5hrmc0cmc39a8ckf2r",
"cancel": "https://api.replicate.com/v1/predictions/v2yv7zgj5hrmc0cmc39a8ckf2r/cancel"
},
"version": "923b4f49c7a0a882abb89494ac38fd902ef60640f742eb478bae94f225439ab4"
}
Random seed set to: 3841941661
Checking inputs
====================================
Checking weights
✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models
✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae
====================================
Running workflow
[ComfyUI] got prompt
Executing node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader
[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
Executing node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
[ComfyUI]
[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:02, 1.43it/s]
[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.23it/s]
[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:02<00:00, 1.13it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:02<00:00, 1.57it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:02<00:00, 1.42it/s]
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
Executing node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode
[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 123
[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 77
Executing node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect
Executing node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader
[ComfyUI] model_type FLOW
[ComfyUI] Using accelerate to load and assign model weights to device...
[ComfyUI] Loading LoRA: lora_comfyui with strength: 0.9
[ComfyUI] Requested to load HyVideoModel
[ComfyUI] Loading 1 new model
[ComfyUI] loaded completely 0.0 12555.953247070312 True
[ComfyUI] Input (height, width, video_length) = (544, 960, 49)
Executing node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler
[ComfyUI] Sampling 49 frames in 13 latents at 960x544 with 30 inference steps
[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])[ComfyUI]
[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]
[ComfyUI] 3%|▎ | 1/30 [00:04<01:58, 4.09s/it]
[ComfyUI] 7%|▋ | 2/30 [00:09<02:17, 4.92s/it]
[ComfyUI] 10%|█ | 3/30 [00:15<02:19, 5.18s/it]
[ComfyUI] 13%|█▎ | 4/30 [00:20<02:17, 5.31s/it]
[ComfyUI] 17%|█▋ | 5/30 [00:26<02:14, 5.38s/it]
[ComfyUI] 20%|██ | 6/30 [00:31<02:10, 5.42s/it]
[ComfyUI] 23%|██▎ | 7/30 [00:37<02:05, 5.45s/it]
[ComfyUI] 27%|██▋ | 8/30 [00:42<02:00, 5.46s/it]
[ComfyUI] 30%|███ | 9/30 [00:48<01:55, 5.48s/it]
[ComfyUI] 33%|███▎ | 10/30 [00:53<01:49, 5.48s/it]
[ComfyUI] 37%|███▋ | 11/30 [00:59<01:44, 5.49s/it]
[ComfyUI] 40%|████ | 12/30 [01:04<01:38, 5.49s/it]
[ComfyUI] 43%|████▎ | 13/30 [01:10<01:33, 5.50s/it]
[ComfyUI] 47%|████▋ | 14/30 [01:15<01:27, 5.50s/it]
[ComfyUI] 50%|█████ | 15/30 [01:21<01:22, 5.50s/it]
[ComfyUI] 53%|█████▎ | 16/30 [01:26<01:17, 5.50s/it]
[ComfyUI] 57%|█████▋ | 17/30 [01:32<01:11, 5.51s/it]
[ComfyUI] 60%|██████ | 18/30 [01:37<01:06, 5.50s/it]
[ComfyUI] 63%|██████▎ | 19/30 [01:43<01:00, 5.50s/it]
[ComfyUI] 67%|██████▋ | 20/30 [01:48<00:55, 5.51s/it]
[ComfyUI] 70%|███████ | 21/30 [01:54<00:49, 5.50s/it]
[ComfyUI] 73%|███████▎ | 22/30 [01:59<00:44, 5.50s/it]
[ComfyUI] 77%|███████▋ | 23/30 [02:05<00:38, 5.51s/it]
[ComfyUI] 80%|████████ | 24/30 [02:10<00:33, 5.51s/it]
[ComfyUI] 83%|████████▎ | 25/30 [02:16<00:27, 5.51s/it]
[ComfyUI] 87%|████████▋ | 26/30 [02:21<00:22, 5.50s/it]
[ComfyUI] 90%|█████████ | 27/30 [02:27<00:16, 5.50s/it]
[ComfyUI] 93%|█████████▎| 28/30 [02:32<00:11, 5.50s/it]
[ComfyUI] 97%|█████████▋| 29/30 [02:38<00:05, 5.50s/it]
[ComfyUI] 100%|██████████| 30/30 [02:43<00:00, 5.50s/it]
[ComfyUI] 100%|██████████| 30/30 [02:43<00:00, 5.46s/it]
[ComfyUI] Allocated memory: memory=12.760 GB
[ComfyUI] Max allocated memory: max_memory=18.839 GB
[ComfyUI] Max reserved memory: max_reserved=20.719 GB
Executing node 5, title: HunyuanVideo Decode, class type: HyVideoDecode
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/3 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 33%|███▎ | 1/3 [00:01<00:03, 1.51s/it]
[ComfyUI] Decoding rows: 67%|██████▋ | 2/3 [00:03<00:01, 1.62s/it]
[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:04<00:00, 1.40s/it]
[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:04<00:00, 1.45s/it]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/3 [00:00<?, ?it/s]
[ComfyUI] Blending tiles: 33%|███▎ | 1/3 [00:00<00:00, 7.82it/s]
Executing node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] Blending tiles: 100%|██████████| 3/3 [00:00<00:00, 18.19it/s]
[ComfyUI] Prompt executed in 201.71 seconds
outputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 20.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}
====================================
HunyuanVideo_00001.png
HunyuanVideo_00001.mp4