typetext
{
"aspect_ratio": "16:9",
"frames": 81,
"lora_strength_clip": 1,
"lora_strength_model": 1,
"lora_url": "https://huggingface.co/motimalu/wan-flat-color-v2/resolve/main/wan_flat_color_v2.safetensors",
"model": "14b",
"prompt": "flat color 2d animation of a portrait of woman with white hair and green eyes, dynamic scene",
"sample_guide_scale": 5,
"sample_shift": 8,
"sample_steps": 30
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_U3P**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run wan-video/wan2.1-with-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"wan-video/wan2.1-with-lora:0615656dcf90c621b2fe05bddc3c87db422dda0aad7b0a681fa641d8749e9fd3",
{
input: {
aspect_ratio: "16:9",
frames: 81,
lora_strength_clip: 1,
lora_strength_model: 1,
lora_url: "https://huggingface.co/motimalu/wan-flat-color-v2/resolve/main/wan_flat_color_v2.safetensors",
model: "14b",
prompt: "flat color 2d animation of a portrait of woman with white hair and green eyes, dynamic scene",
sample_guide_scale: 5,
sample_shift: 8,
sample_steps: 30
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_U3P**********************************
This is your API token. Keep it to yourself.
import replicate
Run wan-video/wan2.1-with-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"wan-video/wan2.1-with-lora:0615656dcf90c621b2fe05bddc3c87db422dda0aad7b0a681fa641d8749e9fd3",
input={
"aspect_ratio": "16:9",
"frames": 81,
"lora_strength_clip": 1,
"lora_strength_model": 1,
"lora_url": "https://huggingface.co/motimalu/wan-flat-color-v2/resolve/main/wan_flat_color_v2.safetensors",
"model": "14b",
"prompt": "flat color 2d animation of a portrait of woman with white hair and green eyes, dynamic scene",
"sample_guide_scale": 5,
"sample_shift": 8,
"sample_steps": 30
}
)
# To access the file URL:
print(output[0].url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output[0].read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_U3P**********************************
This is your API token. Keep it to yourself.
Run wan-video/wan2.1-with-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "wan-video/wan2.1-with-lora:0615656dcf90c621b2fe05bddc3c87db422dda0aad7b0a681fa641d8749e9fd3",
"input": {
"aspect_ratio": "16:9",
"frames": 81,
"lora_strength_clip": 1,
"lora_strength_model": 1,
"lora_url": "https://huggingface.co/motimalu/wan-flat-color-v2/resolve/main/wan_flat_color_v2.safetensors",
"model": "14b",
"prompt": "flat color 2d animation of a portrait of woman with white hair and green eyes, dynamic scene",
"sample_guide_scale": 5,
"sample_shift": 8,
"sample_steps": 30
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
{
"id": "swdwvq9tdxrme0cng2281bmcwc",
"model": "wan-video/wan2.1-with-lora",
"version": "0615656dcf90c621b2fe05bddc3c87db422dda0aad7b0a681fa641d8749e9fd3",
"input": {
"aspect_ratio": "16:9",
"frames": 81,
"lora_strength_clip": 1,
"lora_strength_model": 1,
"lora_url": "https://huggingface.co/motimalu/wan-flat-color-v2/resolve/main/wan_flat_color_v2.safetensors",
"model": "14b",
"prompt": "flat color 2d animation of a portrait of woman with white hair and green eyes, dynamic scene",
"sample_guide_scale": 5,
"sample_shift": 8,
"sample_steps": 30
},
"logs": "Random seed set to: 2381169083\nChecking inputs\n====================================\nChecking weights\nConverting LoraLoader node 49 to LoraLoaderFromURL\n✅ umt5_xxl_fp16.safetensors exists in ComfyUI/models/text_encoders\n✅ wan_2.1_vae.safetensors exists in ComfyUI/models/vae\n✅ wan2.1_t2v_14B_bf16.safetensors exists in ComfyUI/models/diffusion_models\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 37, title: Load Diffusion Model, class type: UNETLoader\n[ComfyUI] model weight dtype torch.bfloat16, manual cast: None\n[ComfyUI] model_type FLOW\nExecuting node 49, title: Load LoRA, class type: LoraLoaderFromURL\n[ComfyUI] Requested to load WanTEModel\nExecuting node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode\n[ComfyUI] loaded completely 140527.45920448302 10835.4765625 True\nExecuting node 48, title: ModelSamplingSD3, class type: ModelSamplingSD3\nExecuting node 3, title: KSampler, class type: KSampler\n[ComfyUI] Requested to load WAN21\n[ComfyUI] loaded completely 123801.93451991271 27251.406372070312 True\n[ComfyUI]\n[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]\n[ComfyUI] 3%|▎ | 1/30 [00:06<03:00, 6.21s/it]\n[ComfyUI] 7%|▋ | 2/30 [00:15<03:37, 7.76s/it]\n[ComfyUI] 10%|█ | 3/30 [00:23<03:42, 8.26s/it]\n[ComfyUI] 13%|█▎ | 4/30 [00:32<03:40, 8.50s/it]\n[ComfyUI] 17%|█▋ | 5/30 [00:41<03:36, 8.64s/it]\n[ComfyUI] 20%|██ | 6/30 [00:50<03:29, 8.73s/it]\n[ComfyUI] 23%|██▎ | 7/30 [00:59<03:22, 8.78s/it]\n[ComfyUI] 27%|██▋ | 8/30 [01:08<03:14, 8.82s/it]\n[ComfyUI] 30%|███ | 9/30 [01:17<03:05, 8.85s/it]\n[ComfyUI] 33%|███▎ | 10/30 [01:26<02:57, 8.86s/it]\n[ComfyUI] 37%|███▋ | 11/30 [01:35<02:48, 8.88s/it]\n[ComfyUI] 40%|████ | 12/30 [01:43<02:40, 8.89s/it]\n[ComfyUI] 43%|████▎ | 13/30 [01:52<02:31, 8.90s/it]\n[ComfyUI] 47%|████▋ | 14/30 [02:01<02:22, 8.90s/it]\n[ComfyUI] 50%|█████ | 15/30 [02:10<02:13, 8.90s/it]\n[ComfyUI] 53%|█████▎ | 16/30 [02:19<02:04, 8.90s/it]\n[ComfyUI] 57%|█████▋ | 17/30 [02:28<01:55, 8.91s/it]\n[ComfyUI] 60%|██████ | 18/30 [02:37<01:46, 8.90s/it]\n[ComfyUI] 63%|██████▎ | 19/30 [02:46<01:37, 8.91s/it]\n[ComfyUI] 67%|██████▋ | 20/30 [02:55<01:29, 8.91s/it]\n[ComfyUI] 70%|███████ | 21/30 [03:04<01:20, 8.91s/it]\n[ComfyUI] 73%|███████▎ | 22/30 [03:13<01:11, 8.91s/it]\n[ComfyUI] 77%|███████▋ | 23/30 [03:21<01:02, 8.91s/it]\n[ComfyUI] 80%|████████ | 24/30 [03:30<00:53, 8.91s/it]\n[ComfyUI] 83%|████████▎ | 25/30 [03:39<00:44, 8.91s/it]\n[ComfyUI] 87%|████████▋ | 26/30 [03:48<00:35, 8.91s/it]\n[ComfyUI] 90%|█████████ | 27/30 [03:57<00:26, 8.91s/it]\n[ComfyUI] 93%|█████████▎| 28/30 [04:06<00:17, 8.91s/it]\n[ComfyUI] 97%|█████████▋| 29/30 [04:15<00:08, 8.91s/it]\n[ComfyUI] 100%|██████████| 30/30 [04:27<00:00, 9.71s/it]\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 50, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] 100%|██████████| 30/30 [04:27<00:00, 8.90s/it]\n[ComfyUI] Prompt executed in 392.13 seconds\noutputs: {'50': {'gifs': [{'filename': 'R8_Wan_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 16.0, 'workflow': 'R8_Wan_00001.png', 'fullpath': '/tmp/outputs/R8_Wan_00001.mp4'}]}}\n====================================\nR8_Wan_00001.png\nR8_Wan_00001.mp4",
"output": [
"https://replicate.delivery/xezq/EVxdGGJo5m6hDxI2Saz3pOeLwUOEegyijKf2ToWHaBQX1VuoA/R8_Wan_00001.mp4"
],
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2025-03-10T14:45:04.239Z",
"started_at": "2025-03-10T14:45:55.387705Z",
"completed_at": "2025-03-10T14:52:27.712452Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/swdwvq9tdxrme0cng2281bmcwc/cancel",
"get": "https://api.replicate.com/v1/predictions/swdwvq9tdxrme0cng2281bmcwc",
"stream": "https://stream.replicate.com/v1/files/bcwr-ljynnsvgwvxa3r5b53cd3ydxoajxlphdh3vfyefzpmfalhxancoq",
"web": "https://replicate.com/p/swdwvq9tdxrme0cng2281bmcwc"
},
"metrics": {
"predict_time": 392.32474699,
"total_time": 443.473452
}
}