typetext
{
"crf": 19,
"denoise_strength": 1,
"enhance_double": true,
"enhance_end": 1,
"enhance_single": true,
"enhance_start": 0,
"enhance_weight": 0.3,
"flow_shift": 9,
"force_offload": true,
"frame_rate": 15,
"guidance_scale": 6,
"height": 512,
"lora_strength": 1,
"lora_url": "lucataco/hunyuan-musubi-rose-6",
"num_frames": 33,
"prompt": "In the style of RSNG. A woman with blonde hair stands on a balcony at night, framed against a backdrop of city lights. She wears a white crop top and a dark jacket, exuding a confident presence as she gazes directly at the camera",
"scheduler": "DPMSolverMultistepScheduler",
"steps": 30,
"width": 512
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Pa5**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run zsxkib/hunyuan-video-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"zsxkib/hunyuan-video-lora:04279caf015c30a635cabc4077b5bd82c5c706262eb61797a48db139444bcca9",
{
input: {
crf: 19,
denoise_strength: 1,
enhance_double: true,
enhance_end: 1,
enhance_single: true,
enhance_start: 0,
enhance_weight: 0.3,
flow_shift: 9,
force_offload: true,
frame_rate: 15,
guidance_scale: 6,
height: 512,
lora_strength: 1,
lora_url: "lucataco/hunyuan-musubi-rose-6",
num_frames: 33,
prompt: "In the style of RSNG. A woman with blonde hair stands on a balcony at night, framed against a backdrop of city lights. She wears a white crop top and a dark jacket, exuding a confident presence as she gazes directly at the camera",
scheduler: "DPMSolverMultistepScheduler",
steps: 30,
width: 512
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Pa5**********************************
This is your API token. Keep it to yourself.
import replicate
Run zsxkib/hunyuan-video-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"zsxkib/hunyuan-video-lora:04279caf015c30a635cabc4077b5bd82c5c706262eb61797a48db139444bcca9",
input={
"crf": 19,
"denoise_strength": 1,
"enhance_double": True,
"enhance_end": 1,
"enhance_single": True,
"enhance_start": 0,
"enhance_weight": 0.3,
"flow_shift": 9,
"force_offload": True,
"frame_rate": 15,
"guidance_scale": 6,
"height": 512,
"lora_strength": 1,
"lora_url": "lucataco/hunyuan-musubi-rose-6",
"num_frames": 33,
"prompt": "In the style of RSNG. A woman with blonde hair stands on a balcony at night, framed against a backdrop of city lights. She wears a white crop top and a dark jacket, exuding a confident presence as she gazes directly at the camera",
"scheduler": "DPMSolverMultistepScheduler",
"steps": 30,
"width": 512
}
)
# To access the file URL:
print(output.url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output.read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Pa5**********************************
This is your API token. Keep it to yourself.
Run zsxkib/hunyuan-video-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "zsxkib/hunyuan-video-lora:04279caf015c30a635cabc4077b5bd82c5c706262eb61797a48db139444bcca9",
"input": {
"crf": 19,
"denoise_strength": 1,
"enhance_double": true,
"enhance_end": 1,
"enhance_single": true,
"enhance_start": 0,
"enhance_weight": 0.3,
"flow_shift": 9,
"force_offload": true,
"frame_rate": 15,
"guidance_scale": 6,
"height": 512,
"lora_strength": 1,
"lora_url": "lucataco/hunyuan-musubi-rose-6",
"num_frames": 33,
"prompt": "In the style of RSNG. A woman with blonde hair stands on a balcony at night, framed against a backdrop of city lights. She wears a white crop top and a dark jacket, exuding a confident presence as she gazes directly at the camera",
"scheduler": "DPMSolverMultistepScheduler",
"steps": 30,
"width": 512
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
{
"id": "qmrfs2n521rm80cmeq98yps03m",
"model": "zsxkib/hunyuan-video-lora",
"version": "04279caf015c30a635cabc4077b5bd82c5c706262eb61797a48db139444bcca9",
"input": {
"crf": 19,
"denoise_strength": 1,
"enhance_double": true,
"enhance_end": 1,
"enhance_single": true,
"enhance_start": 0,
"enhance_weight": 0.3,
"flow_shift": 9,
"force_offload": true,
"frame_rate": 15,
"guidance_scale": 6,
"height": 512,
"lora_strength": 1,
"lora_url": "lucataco/hunyuan-musubi-rose-6",
"num_frames": 33,
"prompt": "In the style of RSNG. A woman with blonde hair stands on a balcony at night, framed against a backdrop of city lights. She wears a white crop top and a dark jacket, exuding a confident presence as she gazes directly at the camera",
"scheduler": "DPMSolverMultistepScheduler",
"steps": 30,
"width": 512
},
"logs": "Random seed set to: 2755017607\nChecking inputs\n====================================\nChecking weights\n✅ hunyuan_video_vae_bf16.safetensors exists in ComfyUI/models/vae\n✅ hunyuan_video_720_fp8_e4m3fn.safetensors exists in ComfyUI/models/diffusion_models\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 7, title: HunyuanVideo VAE Loader, class type: HyVideoVAELoader\nExecuting node 42, title: HunyuanVideo Enhance A Video, class type: HyVideoEnhanceAVideo\nExecuting node 16, title: (Down)Load HunyuanVideo TextEncoder, class type: DownloadAndLoadHyVideoTextEncoder\n[ComfyUI] Loading text encoder model (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (clipL) from: /src/ComfyUI/models/clip/clip-vit-large-patch14\n[ComfyUI] Loading text encoder model (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\n[ComfyUI]\n[ComfyUI] Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]\n[ComfyUI] Loading checkpoint shards: 25%|██▌ | 1/4 [00:00<00:01, 1.78it/s]\n[ComfyUI] Loading checkpoint shards: 50%|█████ | 2/4 [00:01<00:01, 1.75it/s]\n[ComfyUI] Loading checkpoint shards: 75%|███████▌ | 3/4 [00:01<00:00, 1.76it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.58it/s]\n[ComfyUI] Loading checkpoint shards: 100%|██████████| 4/4 [00:01<00:00, 2.21it/s]\n[ComfyUI] Text encoder to dtype: torch.float16\n[ComfyUI] Loading tokenizer (llm) from: /src/ComfyUI/models/LLM/llava-llama-3-8b-text-encoder-tokenizer\nExecuting node 30, title: HunyuanVideo TextEncode, class type: HyVideoTextEncode\n[ComfyUI] llm prompt attention_mask shape: torch.Size([1, 161]), masked tokens: 52\n[ComfyUI] clipL prompt attention_mask shape: torch.Size([1, 77]), masked tokens: 54\nExecuting node 41, title: HunyuanVideo Lora Select, class type: HyVideoLoraSelect\nExecuting node 1, title: HunyuanVideo Model Loader, class type: HyVideoModelLoader\n[ComfyUI] model_type FLOW\n[ComfyUI] The config attributes {'use_flow_sigmas': True, 'prediction_type': 'flow_prediction'} were passed to FlowMatchDiscreteScheduler, but are not expected and will be ignored. Please verify your scheduler_config.json configuration file.\n[ComfyUI] Using accelerate to load and assign model weights to device...\n[ComfyUI] Loading LoRA: lora with strength: 1.0\n[ComfyUI] Requested to load HyVideoModel\n[ComfyUI] loaded completely 9.5367431640625e+25 12555.953247070312 True\n[ComfyUI] Input (height, width, video_length) = (512, 512, 33)\nExecuting node 3, title: HunyuanVideo Sampler, class type: HyVideoSampler\n[ComfyUI] The config attributes {'reverse': True, 'solver': 'euler'} were passed to DPMSolverMultistepScheduler, but are not expected and will be ignored. Please verify your scheduler_config.json configuration file.\n[ComfyUI] Sampling 33 frames in 9 latents at 512x512 with 30 inference steps\n[ComfyUI] Scheduler config: FrozenDict([('num_train_timesteps', 1000), ('flow_shift', 9.0), ('reverse', True), ('solver', 'euler'), ('n_tokens', None), ('_use_default_values', ['n_tokens', 'num_train_timesteps'])])[ComfyUI]\n[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]\n[ComfyUI] 3%|▎ | 1/30 [00:01<00:33, 1.16s/it]\n[ComfyUI] 7%|▋ | 2/30 [00:01<00:26, 1.04it/s]\n[ComfyUI] 10%|█ | 3/30 [00:03<00:26, 1.00it/s]\n[ComfyUI] 13%|█▎ | 4/30 [00:04<00:26, 1.01s/it]\n[ComfyUI] 17%|█▋ | 5/30 [00:05<00:25, 1.02s/it]\n[ComfyUI] 20%|██ | 6/30 [00:06<00:24, 1.03s/it]\n[ComfyUI] 23%|██▎ | 7/30 [00:07<00:23, 1.03s/it]\n[ComfyUI] 27%|██▋ | 8/30 [00:08<00:22, 1.03s/it]\n[ComfyUI] 30%|███ | 9/30 [00:09<00:21, 1.03s/it]\n[ComfyUI] 33%|███▎ | 10/30 [00:10<00:20, 1.04s/it]\n[ComfyUI] 37%|███▋ | 11/30 [00:11<00:19, 1.04s/it]\n[ComfyUI] 40%|████ | 12/30 [00:12<00:18, 1.04s/it]\n[ComfyUI] 43%|████▎ | 13/30 [00:13<00:18, 1.08s/it]\n[ComfyUI] 47%|████▋ | 14/30 [00:14<00:17, 1.06s/it]\n[ComfyUI] 50%|█████ | 15/30 [00:15<00:15, 1.06s/it]\n[ComfyUI] 53%|█████▎ | 16/30 [00:16<00:14, 1.05s/it]\n[ComfyUI] 57%|█████▋ | 17/30 [00:17<00:13, 1.05s/it]\n[ComfyUI] 60%|██████ | 18/30 [00:18<00:12, 1.05s/it]\n[ComfyUI] 63%|██████▎ | 19/30 [00:19<00:11, 1.04s/it]\n[ComfyUI] 67%|██████▋ | 20/30 [00:20<00:10, 1.04s/it]\n[ComfyUI] 70%|███████ | 21/30 [00:21<00:09, 1.04s/it]\n[ComfyUI] 73%|███████▎ | 22/30 [00:22<00:08, 1.04s/it]\n[ComfyUI] 77%|███████▋ | 23/30 [00:23<00:07, 1.04s/it]\n[ComfyUI] 80%|████████ | 24/30 [00:24<00:06, 1.04s/it]\n[ComfyUI] 83%|████████▎ | 25/30 [00:26<00:05, 1.04s/it]\n[ComfyUI] 87%|████████▋ | 26/30 [00:27<00:04, 1.04s/it]\n[ComfyUI] 90%|█████████ | 27/30 [00:28<00:03, 1.04s/it]\n[ComfyUI] 93%|█████████▎| 28/30 [00:29<00:02, 1.04s/it]\n[ComfyUI] 97%|█████████▋| 29/30 [00:30<00:01, 1.04s/it]\n[ComfyUI] 100%|██████████| 30/30 [00:31<00:00, 1.04s/it]\n[ComfyUI] 100%|██████████| 30/30 [00:31<00:00, 1.04s/it]\n[ComfyUI] Allocated memory: memory=12.757 GB\n[ComfyUI] Max allocated memory: max_memory=14.346 GB\n[ComfyUI] Max reserved memory: max_reserved=14.812 GB\nExecuting node 5, title: HunyuanVideo Decode, class type: HyVideoDecode\n[ComfyUI]\n[ComfyUI] Decoding rows: 0%| | 0/3 [00:00<?, ?it/s]\n[ComfyUI] Decoding rows: 33%|███▎ | 1/3 [00:00<00:01, 1.80it/s]\n[ComfyUI] Decoding rows: 67%|██████▋ | 2/3 [00:01<00:00, 1.74it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:01<00:00, 2.11it/s]\n[ComfyUI] Decoding rows: 100%|██████████| 3/3 [00:01<00:00, 2.00it/s]\n[ComfyUI]\n[ComfyUI] Blending tiles: 0%| | 0/3 [00:00<?, ?it/s]\nExecuting node 34, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] Blending tiles: 100%|██████████| 3/3 [00:00<00:00, 51.92it/s]\n[ComfyUI] Prompt executed in 62.51 seconds\noutputs: {'34': {'gifs': [{'filename': 'HunyuanVideo_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 15.0, 'workflow': 'HunyuanVideo_00001.png', 'fullpath': '/tmp/outputs/HunyuanVideo_00001.mp4'}]}}\n====================================\nHunyuanVideo_00001.png\nHunyuanVideo_00001.mp4",
"output": "https://replicate.delivery/xezq/JB66or6rpw4XCBK9OVDIkW4OSZH599iEnC47btdZckaYnhBF/HunyuanVideo_00001.mp4",
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2025-01-17T19:52:06.672Z",
"started_at": "2025-01-17T19:52:35.502561Z",
"completed_at": "2025-01-17T19:54:09.871276Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/qmrfs2n521rm80cmeq98yps03m/cancel",
"get": "https://api.replicate.com/v1/predictions/qmrfs2n521rm80cmeq98yps03m",
"stream": "https://stream.replicate.com/v1/files/bcwr-knewvnm2tsidbgoiemo56kapb76me4k2sfzwjca7bito4cf4zmla",
"web": "https://replicate.com/p/qmrfs2n521rm80cmeq98yps03m"
},
"metrics": {
"predict_time": 94.368714911,
"total_time": 123.199276
}
}