Readme
This model doesn't have a readme.
A Hunyuan fine-tuned on the Homer disappearing into a bush meme
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/hunyuan-homer-meme using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/hunyuan-homer-meme:f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b",
{
input: {
crf: 19,
steps: 50,
width: 640,
height: 360,
prompt: "A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight",
lora_url: "",
scheduler: "DPMSolverMultistepScheduler",
flow_shift: 9,
frame_rate: 24,
num_frames: 85,
enhance_end: 1,
enhance_start: 0,
force_offload: true,
lora_strength: 1,
enhance_double: true,
enhance_single: true,
enhance_weight: 0.3,
guidance_scale: 6,
denoise_strength: 1
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/hunyuan-homer-meme using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/hunyuan-homer-meme:f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b",
input={
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": True,
"lora_strength": 1,
"enhance_double": True,
"enhance_single": True,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/hunyuan-homer-meme using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b",
"input": {
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight",
"lora_url": "",
"scheduler": "DPMSolverMultistepScheduler",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"enhance_end": 1,
"enhance_start": 0,
"force_offload": true,
"lora_strength": 1,
"enhance_double": true,
"enhance_single": true,
"enhance_weight": 0.3,
"guidance_scale": 6,
"denoise_strength": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/hunyuan-homer-meme@sha256:f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b \
-i 'crf=19' \
-i 'steps=50' \
-i 'width=640' \
-i 'height=360' \
-i 'prompt="A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight"' \
-i 'lora_url=""' \
-i 'scheduler="DPMSolverMultistepScheduler"' \
-i 'flow_shift=9' \
-i 'frame_rate=24' \
-i 'num_frames=85' \
-i 'enhance_end=1' \
-i 'enhance_start=0' \
-i 'force_offload=true' \
-i 'lora_strength=1' \
-i 'enhance_double=true' \
-i 'enhance_single=true' \
-i 'enhance_weight=0.3' \
-i 'guidance_scale=6' \
-i 'denoise_strength=1'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/hunyuan-homer-meme@sha256:f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "crf": 19, "steps": 50, "width": 640, "height": 360, "prompt": "A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight", "lora_url": "", "scheduler": "DPMSolverMultistepScheduler", "flow_shift": 9, "frame_rate": 24, "num_frames": 85, "enhance_end": 1, "enhance_start": 0, "force_offload": true, "lora_strength": 1, "enhance_double": true, "enhance_single": true, "enhance_weight": 0.3, "guidance_scale": 6, "denoise_strength": 1 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2025-01-08T08:00:27.105310Z",
"created_at": "2025-01-08T07:56:16.869000Z",
"data_removed": false,
"error": null,
"id": "00pqh8rccnrmc0cm8knb0c69aw",
"input": {
"crf": 19,
"steps": 50,
"width": 640,
"height": 360,
"prompt": "A Simpsons animation showing HOMER slowly backing away into green bushes until he is out of sight",
"lora_url": "",
"flow_shift": 9,
"frame_rate": 24,
"num_frames": 85,
"force_offload": true,
"lora_strength": 1,
"guidance_scale": 6,
"denoise_strength": 1
},
"logs": "Random seed set to: 3492392950\nChecking inputs\n====================================\nChecking weights\n✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models\n✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader\nExecuting node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder\n[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\n[ComfyUI]\n[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]\n[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:01, 1.71it/s]\n[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.74it/s]\n[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.78it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.60it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.21it/s]\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\nExecuting node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode\n[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 20\n[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 19\nExecuting node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect\nExecuting node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader\n[ComfyUI] model_type FLOW\n[ComfyUI] Using accelerate to load and assign model weights to device...\n[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0\n[ComfyUI] Requested to load HyVideoModel\n[ComfyUI] Loading 1 new model\n[ComfyUI] loaded completely 0.0 12555.953247070312 True\n[ComfyUI] Input (height, width, video_length) = (360, 640, 85)\nExecuting node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler\n[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps\n[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])\n[ComfyUI]\n[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]\n[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.64s/it]\n[ComfyUI] 4%|▍ | 2/50 [00:06<02:30, 3.13s/it]\n[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.29s/it]\n[ComfyUI] 8%|▊ | 4/50 [00:13<02:34, 3.36s/it]\n[ComfyUI] 10%|█ | 5/50 [00:16<02:33, 3.41s/it]\n[ComfyUI] 12%|█▏ | 6/50 [00:20<02:30, 3.43s/it]\n[ComfyUI] 14%|█▍ | 7/50 [00:23<02:28, 3.45s/it]\n[ComfyUI] 16%|█▌ | 8/50 [00:26<02:25, 3.45s/it]\n[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.46s/it]\n[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.46s/it]\n[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]\n[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]\n[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]\n[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]\n[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]\n[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]\n[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]\n[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]\n[ComfyUI] 38%|███▊ | 19/50 [01:05<01:47, 3.46s/it]\n[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.46s/it]\n[ComfyUI] 42%|████▏ | 21/50 [01:12<01:40, 3.46s/it]\n[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.46s/it]\n[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.46s/it]\n[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.46s/it]\n[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]\n[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.46s/it]\n[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.46s/it]\n[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.46s/it]\n[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]\n[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.46s/it]\n[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.46s/it]\n[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.46s/it]\n[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]\n[ComfyUI] 68%|██████▊ | 34/50 [01:57<00:55, 3.46s/it]\n[ComfyUI] 70%|███████ | 35/50 [02:00<00:51, 3.47s/it]\n[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]\n[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]\n[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]\n[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]\n[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.46s/it]\n[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.46s/it]\n[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.46s/it]\n[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.46s/it]\n[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.46s/it]\n[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]\n[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.46s/it]\n[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]\n[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.46s/it]\n[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]\n[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]\n[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]\n[ComfyUI] Allocated memory: memory=12.759 GB\n[ComfyUI] Max allocated memory: max_memory=16.810 GB\n[ComfyUI] Max reserved memory: max_reserved=18.688 GB\nExecuting node 5, title: HunyuanVideo Decode, class type: HyVideoDecode\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.51s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.30s/it]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.38it/s]\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.40it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.36it/s]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]\nExecuting node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.58it/s]\n[ComfyUI] Prompt executed in 207.30 seconds\noutputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}\n====================================\nHunyuanVideo_00001.png\nHunyuanVideo_00001.mp4",
"metrics": {
"predict_time": 222.619138525,
"total_time": 250.23631
},
"output": "https://replicate.delivery/xezq/PfsvC1StEjQeLkzTsmXsdxKhMMj2enSe6T5rLl32Dgnup4LQB/HunyuanVideo_00001.mp4",
"started_at": "2025-01-08T07:56:44.486171Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-wsmmv52c2gkjz7mwgcw62keeudnelp4iuyozlxot4p6irsujwczq",
"get": "https://api.replicate.com/v1/predictions/00pqh8rccnrmc0cm8knb0c69aw",
"cancel": "https://api.replicate.com/v1/predictions/00pqh8rccnrmc0cm8knb0c69aw/cancel"
},
"version": "f0eab4bab4afa1d9d1484a21118bb7539babfac0a119dc41e5b1764a1ec1357b"
}
Random seed set to: 3492392950
Checking inputs
====================================
Checking weights
✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models
✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae
====================================
Running workflow
[ComfyUI] got prompt
Executing node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader
Executing node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder
[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
[ComfyUI]
[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:01, 1.71it/s]
[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.74it/s]
[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.78it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.60it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.21it/s]
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
Executing node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode
[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 20
[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 19
Executing node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect
Executing node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader
[ComfyUI] model_type FLOW
[ComfyUI] Using accelerate to load and assign model weights to device...
[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0
[ComfyUI] Requested to load HyVideoModel
[ComfyUI] Loading 1 new model
[ComfyUI] loaded completely 0.0 12555.953247070312 True
[ComfyUI] Input (height, width, video_length) = (360, 640, 85)
Executing node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler
[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps
[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])
[ComfyUI]
[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]
[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.64s/it]
[ComfyUI] 4%|▍ | 2/50 [00:06<02:30, 3.13s/it]
[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.29s/it]
[ComfyUI] 8%|▊ | 4/50 [00:13<02:34, 3.36s/it]
[ComfyUI] 10%|█ | 5/50 [00:16<02:33, 3.41s/it]
[ComfyUI] 12%|█▏ | 6/50 [00:20<02:30, 3.43s/it]
[ComfyUI] 14%|█▍ | 7/50 [00:23<02:28, 3.45s/it]
[ComfyUI] 16%|█▌ | 8/50 [00:26<02:25, 3.45s/it]
[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.46s/it]
[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.46s/it]
[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]
[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]
[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]
[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]
[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]
[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]
[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]
[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]
[ComfyUI] 38%|███▊ | 19/50 [01:05<01:47, 3.46s/it]
[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.46s/it]
[ComfyUI] 42%|████▏ | 21/50 [01:12<01:40, 3.46s/it]
[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.46s/it]
[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.46s/it]
[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.46s/it]
[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]
[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.46s/it]
[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.46s/it]
[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.46s/it]
[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]
[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.46s/it]
[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.46s/it]
[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.46s/it]
[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]
[ComfyUI] 68%|██████▊ | 34/50 [01:57<00:55, 3.46s/it]
[ComfyUI] 70%|███████ | 35/50 [02:00<00:51, 3.47s/it]
[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]
[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]
[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]
[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]
[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.46s/it]
[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.46s/it]
[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.46s/it]
[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.46s/it]
[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.46s/it]
[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]
[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.46s/it]
[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]
[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.46s/it]
[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]
[ComfyUI] Allocated memory: memory=12.759 GB
[ComfyUI] Max allocated memory: max_memory=16.810 GB
[ComfyUI] Max reserved memory: max_reserved=18.688 GB
Executing node 5, title: HunyuanVideo Decode, class type: HyVideoDecode
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.51s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.30s/it]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.38it/s]
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.40it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.36it/s]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
Executing node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.58it/s]
[ComfyUI] Prompt executed in 207.30 seconds
outputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}
====================================
HunyuanVideo_00001.png
HunyuanVideo_00001.mp4
This model runs on Nvidia H100 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Random seed set to: 3492392950
Checking inputs
====================================
Checking weights
✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models
✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae
====================================
Running workflow
[ComfyUI] got prompt
Executing node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader
Executing node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder
[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14
[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
[ComfyUI]
[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:01, 1.71it/s]
[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.74it/s]
[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.78it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.60it/s]
[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.21it/s]
[ComfyUI] Text encoder to dtype: torch.float16
[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer
Executing node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode
[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 20
[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 19
Executing node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect
Executing node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader
[ComfyUI] model_type FLOW
[ComfyUI] Using accelerate to load and assign model weights to device...
[ComfyUI] Loading LoRA: lora_comfyui with strength: 1.0
[ComfyUI] Requested to load HyVideoModel
[ComfyUI] Loading 1 new model
[ComfyUI] loaded completely 0.0 12555.953247070312 True
[ComfyUI] Input (height, width, video_length) = (360, 640, 85)
Executing node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler
[ComfyUI] Sampling 85 frames in 22 latents at 640x368 with 50 inference steps
[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['num_train_timesteps', 'n_tokens'])])
[ComfyUI]
[ComfyUI] 0%| | 0/50 [00:00<?, ?it/s]
[ComfyUI] 2%|▏ | 1/50 [00:02<02:09, 2.64s/it]
[ComfyUI] 4%|▍ | 2/50 [00:06<02:30, 3.13s/it]
[ComfyUI] 6%|▌ | 3/50 [00:09<02:34, 3.29s/it]
[ComfyUI] 8%|▊ | 4/50 [00:13<02:34, 3.36s/it]
[ComfyUI] 10%|█ | 5/50 [00:16<02:33, 3.41s/it]
[ComfyUI] 12%|█▏ | 6/50 [00:20<02:30, 3.43s/it]
[ComfyUI] 14%|█▍ | 7/50 [00:23<02:28, 3.45s/it]
[ComfyUI] 16%|█▌ | 8/50 [00:26<02:25, 3.45s/it]
[ComfyUI] 18%|█▊ | 9/50 [00:30<02:21, 3.46s/it]
[ComfyUI] 20%|██ | 10/50 [00:33<02:18, 3.46s/it]
[ComfyUI] 22%|██▏ | 11/50 [00:37<02:14, 3.46s/it]
[ComfyUI] 24%|██▍ | 12/50 [00:40<02:11, 3.46s/it]
[ComfyUI] 26%|██▌ | 13/50 [00:44<02:08, 3.46s/it]
[ComfyUI] 28%|██▊ | 14/50 [00:47<02:04, 3.46s/it]
[ComfyUI] 30%|███ | 15/50 [00:51<02:01, 3.46s/it]
[ComfyUI] 32%|███▏ | 16/50 [00:54<01:57, 3.46s/it]
[ComfyUI] 34%|███▍ | 17/50 [00:58<01:54, 3.46s/it]
[ComfyUI] 36%|███▌ | 18/50 [01:01<01:50, 3.46s/it]
[ComfyUI] 38%|███▊ | 19/50 [01:05<01:47, 3.46s/it]
[ComfyUI] 40%|████ | 20/50 [01:08<01:43, 3.46s/it]
[ComfyUI] 42%|████▏ | 21/50 [01:12<01:40, 3.46s/it]
[ComfyUI] 44%|████▍ | 22/50 [01:15<01:37, 3.46s/it]
[ComfyUI] 46%|████▌ | 23/50 [01:18<01:33, 3.46s/it]
[ComfyUI] 48%|████▊ | 24/50 [01:22<01:30, 3.46s/it]
[ComfyUI] 50%|█████ | 25/50 [01:25<01:26, 3.47s/it]
[ComfyUI] 52%|█████▏ | 26/50 [01:29<01:23, 3.46s/it]
[ComfyUI] 54%|█████▍ | 27/50 [01:32<01:19, 3.46s/it]
[ComfyUI] 56%|█████▌ | 28/50 [01:36<01:16, 3.46s/it]
[ComfyUI] 58%|█████▊ | 29/50 [01:39<01:12, 3.47s/it]
[ComfyUI] 60%|██████ | 30/50 [01:43<01:09, 3.46s/it]
[ComfyUI] 62%|██████▏ | 31/50 [01:46<01:05, 3.46s/it]
[ComfyUI] 64%|██████▍ | 32/50 [01:50<01:02, 3.46s/it]
[ComfyUI] 66%|██████▌ | 33/50 [01:53<00:58, 3.47s/it]
[ComfyUI] 68%|██████▊ | 34/50 [01:57<00:55, 3.46s/it]
[ComfyUI] 70%|███████ | 35/50 [02:00<00:51, 3.47s/it]
[ComfyUI] 72%|███████▏ | 36/50 [02:03<00:48, 3.47s/it]
[ComfyUI] 74%|███████▍ | 37/50 [02:07<00:45, 3.47s/it]
[ComfyUI] 76%|███████▌ | 38/50 [02:10<00:41, 3.47s/it]
[ComfyUI] 78%|███████▊ | 39/50 [02:14<00:38, 3.47s/it]
[ComfyUI] 80%|████████ | 40/50 [02:17<00:34, 3.46s/it]
[ComfyUI] 82%|████████▏ | 41/50 [02:21<00:31, 3.46s/it]
[ComfyUI] 84%|████████▍ | 42/50 [02:24<00:27, 3.46s/it]
[ComfyUI] 86%|████████▌ | 43/50 [02:28<00:24, 3.46s/it]
[ComfyUI] 88%|████████▊ | 44/50 [02:31<00:20, 3.46s/it]
[ComfyUI] 90%|█████████ | 45/50 [02:35<00:17, 3.47s/it]
[ComfyUI] 92%|█████████▏| 46/50 [02:38<00:13, 3.46s/it]
[ComfyUI] 94%|█████████▍| 47/50 [02:42<00:10, 3.47s/it]
[ComfyUI] 96%|█████████▌| 48/50 [02:45<00:06, 3.46s/it]
[ComfyUI] 98%|█████████▊| 49/50 [02:49<00:03, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.47s/it]
[ComfyUI] 100%|██████████| 50/50 [02:52<00:00, 3.45s/it]
[ComfyUI] Allocated memory: memory=12.759 GB
[ComfyUI] Max allocated memory: max_memory=16.810 GB
[ComfyUI] Max reserved memory: max_reserved=18.688 GB
Executing node 5, title: HunyuanVideo Decode, class type: HyVideoDecode
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:01<00:01, 1.51s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.26s/it]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:02<00:00, 1.30s/it]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 28.38it/s]
[ComfyUI]
[ComfyUI] Decoding rows: 0%| | 0/2 [00:00<?, ?it/s]
[ComfyUI] Decoding rows: 50%|█████ | 1/2 [00:00<00:00, 1.18it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.40it/s]
[ComfyUI] Decoding rows: 100%|██████████| 2/2 [00:01<00:00, 1.36it/s]
[ComfyUI]
[ComfyUI] Blending tiles: 0%| | 0/2 [00:00<?, ?it/s]
Executing node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] Blending tiles: 100%|██████████| 2/2 [00:00<00:00, 47.58it/s]
[ComfyUI] Prompt executed in 207.30 seconds
outputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 24.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}
====================================
HunyuanVideo_00001.png
HunyuanVideo_00001.mp4