Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
fofr /video-morpher:355c6bba
This version has been disabled because it consistently fails to complete setup.
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/video-morpher using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/video-morpher:355c6bbaf8bc2deeafd3e2384a50af51bc2091a8be96dc082f1ef02c74640baf",
{
input: {
mode: "upscaled-and-interpolated",
prompt: "",
checkpoint: "anime",
style_image: "https://replicate.delivery/pbxt/Knt8RPT8KLlsXLreP04hXAULTlrL29TH9W8NNUV3eKDfGkug/replicate-prediction-6da3fldbhkwkmaeba4bhzif72m.png",
aspect_ratio: "4:3",
style_strength: 0.25,
negative_prompt: "",
subject_image_1: "https://replicate.delivery/pbxt/Knt8R9B6sVKljclLEsUe1tUz5gELYq3WQ9mebHcdEnpaKEvY/fofr_a_middle_aged_man_with_thick_glasses_16f412ff-db00-4e06-acdf-828885df6c58.webp",
subject_image_2: "https://replicate.delivery/pbxt/Knt8QyjO5NMhX7S9wCjJ6ZKCxYnL2cBSKyeU9oJlpABFIjHb/fofr_a_blonde_woman_studio_headshot_cf000993-a68c-499f-9619-026601661d97.png",
subject_image_3: "https://replicate.delivery/pbxt/Knt8REy4ySLJqPNNsw6RPAnxfdmSEXyWWjFsfqsMwpIX11tK/marble-statue-antinous-height-180-cm-9513049.jpg.webp",
subject_image_4: "https://replicate.delivery/pbxt/Knt8RgLdi4GZ7AOdQ7INvzI4SMKWlR1eJQjfIxK8cZIzifIA/ComfyUI_02710_.png"
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/video-morpher using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/video-morpher:355c6bbaf8bc2deeafd3e2384a50af51bc2091a8be96dc082f1ef02c74640baf",
input={
"mode": "upscaled-and-interpolated",
"prompt": "",
"checkpoint": "anime",
"style_image": "https://replicate.delivery/pbxt/Knt8RPT8KLlsXLreP04hXAULTlrL29TH9W8NNUV3eKDfGkug/replicate-prediction-6da3fldbhkwkmaeba4bhzif72m.png",
"aspect_ratio": "4:3",
"style_strength": 0.25,
"negative_prompt": "",
"subject_image_1": "https://replicate.delivery/pbxt/Knt8R9B6sVKljclLEsUe1tUz5gELYq3WQ9mebHcdEnpaKEvY/fofr_a_middle_aged_man_with_thick_glasses_16f412ff-db00-4e06-acdf-828885df6c58.webp",
"subject_image_2": "https://replicate.delivery/pbxt/Knt8QyjO5NMhX7S9wCjJ6ZKCxYnL2cBSKyeU9oJlpABFIjHb/fofr_a_blonde_woman_studio_headshot_cf000993-a68c-499f-9619-026601661d97.png",
"subject_image_3": "https://replicate.delivery/pbxt/Knt8REy4ySLJqPNNsw6RPAnxfdmSEXyWWjFsfqsMwpIX11tK/marble-statue-antinous-height-180-cm-9513049.jpg.webp",
"subject_image_4": "https://replicate.delivery/pbxt/Knt8RgLdi4GZ7AOdQ7INvzI4SMKWlR1eJQjfIxK8cZIzifIA/ComfyUI_02710_.png"
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/video-morpher using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/video-morpher:355c6bbaf8bc2deeafd3e2384a50af51bc2091a8be96dc082f1ef02c74640baf",
"input": {
"mode": "upscaled-and-interpolated",
"prompt": "",
"checkpoint": "anime",
"style_image": "https://replicate.delivery/pbxt/Knt8RPT8KLlsXLreP04hXAULTlrL29TH9W8NNUV3eKDfGkug/replicate-prediction-6da3fldbhkwkmaeba4bhzif72m.png",
"aspect_ratio": "4:3",
"style_strength": 0.25,
"negative_prompt": "",
"subject_image_1": "https://replicate.delivery/pbxt/Knt8R9B6sVKljclLEsUe1tUz5gELYq3WQ9mebHcdEnpaKEvY/fofr_a_middle_aged_man_with_thick_glasses_16f412ff-db00-4e06-acdf-828885df6c58.webp",
"subject_image_2": "https://replicate.delivery/pbxt/Knt8QyjO5NMhX7S9wCjJ6ZKCxYnL2cBSKyeU9oJlpABFIjHb/fofr_a_blonde_woman_studio_headshot_cf000993-a68c-499f-9619-026601661d97.png",
"subject_image_3": "https://replicate.delivery/pbxt/Knt8REy4ySLJqPNNsw6RPAnxfdmSEXyWWjFsfqsMwpIX11tK/marble-statue-antinous-height-180-cm-9513049.jpg.webp",
"subject_image_4": "https://replicate.delivery/pbxt/Knt8RgLdi4GZ7AOdQ7INvzI4SMKWlR1eJQjfIxK8cZIzifIA/ComfyUI_02710_.png"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2024-04-24T15:28:58.190754Z",
"created_at": "2024-04-24T15:24:10.944000Z",
"data_removed": false,
"error": null,
"id": "zq1ppbr981rgg0cf22trg7ex1r",
"input": {
"mode": "upscaled-and-interpolated",
"prompt": "",
"checkpoint": "anime",
"style_image": "https://replicate.delivery/pbxt/Knt8RPT8KLlsXLreP04hXAULTlrL29TH9W8NNUV3eKDfGkug/replicate-prediction-6da3fldbhkwkmaeba4bhzif72m.png",
"aspect_ratio": "4:3",
"style_strength": 0.25,
"negative_prompt": "",
"subject_image_1": "https://replicate.delivery/pbxt/Knt8R9B6sVKljclLEsUe1tUz5gELYq3WQ9mebHcdEnpaKEvY/fofr_a_middle_aged_man_with_thick_glasses_16f412ff-db00-4e06-acdf-828885df6c58.webp",
"subject_image_2": "https://replicate.delivery/pbxt/Knt8QyjO5NMhX7S9wCjJ6ZKCxYnL2cBSKyeU9oJlpABFIjHb/fofr_a_blonde_woman_studio_headshot_cf000993-a68c-499f-9619-026601661d97.png",
"subject_image_3": "https://replicate.delivery/pbxt/Knt8REy4ySLJqPNNsw6RPAnxfdmSEXyWWjFsfqsMwpIX11tK/marble-statue-antinous-height-180-cm-9513049.jpg.webp",
"subject_image_4": "https://replicate.delivery/pbxt/Knt8RgLdi4GZ7AOdQ7INvzI4SMKWlR1eJQjfIxK8cZIzifIA/ComfyUI_02710_.png"
},
"logs": "Random seed set to: 665779694\nChecking inputs\n✅ /tmp/inputs/2.png\n✅ /tmp/inputs/1.png\n✅ /tmp/inputs/3.png\n✅ /tmp/inputs/4.png\n✅ /tmp/inputs/circles.mp4\n✅ /tmp/inputs/style.png\n====================================\nChecking weights\nIncluding weights for IPAdapter preset: PLUS (high strength)\n⏳ Downloading ip-adapter-plus_sdxl_vit-h.safetensors to ComfyUI/models/ipadapter\n⌛️ Downloaded ip-adapter-plus_sdxl_vit-h.safetensors in 0.60s, size: 808.26MB\n✅ ip-adapter-plus_sdxl_vit-h.safetensors\n⏳ Downloading RealESRGAN_x4.pth to ComfyUI/models/upscale_models\n⌛️ Downloaded RealESRGAN_x4.pth in 0.15s, size: 63.94MB\n✅ RealESRGAN_x4.pth\n⏳ Downloading vae-ft-mse-840000-ema-pruned.safetensors to ComfyUI/models/vae\n⌛️ Downloaded vae-ft-mse-840000-ema-pruned.safetensors in 0.35s, size: 319.14MB\n✅ vae-ft-mse-840000-ema-pruned.safetensors\n⏳ Downloading CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors to ComfyUI/models/clip_vision\n⌛️ Downloaded CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors in 1.94s, size: 2411.24MB\n✅ CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\n⏳ Downloading ip-adapter-plus_sd15.safetensors to ComfyUI/models/ipadapter\n⌛️ Downloaded ip-adapter-plus_sd15.safetensors in 0.18s, size: 93.63MB\n✅ ip-adapter-plus_sd15.safetensors\n⏳ Downloading film_net_fp32.pt to ComfyUI/custom_nodes/ComfyUI-Frame-Interpolation/ckpts/film\n⌛️ Downloaded film_net_fp32.pt in 0.25s, size: 131.53MB\n✅ film_net_fp32.pt\n⏳ Downloading AnimateLCM_sd15_t2v_lora.safetensors to ComfyUI/models/loras\n⌛️ Downloaded AnimateLCM_sd15_t2v_lora.safetensors in 0.23s, size: 128.39MB\n✅ AnimateLCM_sd15_t2v_lora.safetensors\n⏳ Downloading toonyou_beta6.safetensors to ComfyUI/models/checkpoints\n⌛️ Downloaded toonyou_beta6.safetensors in 1.36s, size: 2193.39MB\n✅ toonyou_beta6.safetensors\n⏳ Downloading control_v1p_sd15_qrcode_monster.safetensors to ComfyUI/models/controlnet\n⌛️ Downloaded control_v1p_sd15_qrcode_monster.safetensors in 0.55s, size: 689.12MB\n✅ control_v1p_sd15_qrcode_monster.safetensors\n⏳ Downloading AnimateLCM_sd15_t2v.ckpt to ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models\n⌛️ Downloaded AnimateLCM_sd15_t2v.ckpt in 1.16s, size: 1729.05MB\n✅ AnimateLCM_sd15_t2v.ckpt\n====================================\nRunning workflow\ngot prompt\nExecuting node 564, title: Load Checkpoint, class type: CheckpointLoaderSimple\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']\nloaded straight to GPU\nRequested to load BaseModel\nLoading 1 new model\nExecuting node 563, title: LoraLoaderModelOnly, class type: LoraLoaderModelOnly\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Loading motion module AnimateLCM_sd15_t2v.ckpt via Gen2\nExecuting node 87, title: Load AnimateDiff Model 🎭🅐🅓②, class type: ADE_LoadAnimateDiffModel\nExecuting node 256, title: Motion Scale 🎭🅐🅓, class type: ADE_MultivalDynamic\nExecuting node 79, title: Apply AnimateDiff Model 🎭🅐🅓②, class type: ADE_ApplyAnimateDiffModelSimple\nExecuting node 156, title: Context Options◆Looped Uniform 🎭🅐🅓, class type: ADE_LoopedUniformContextOptions\nExecuting node 77, title: Use Evolved Sampling 🎭🅐🅓②, class type: ADE_UseEvolvedSampling\nExecuting node 573, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader\n\u001b[33mINFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\u001b[0m\n\u001b[33mINFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sd15.safetensors\u001b[0m\nExecuting node 142, title: Load Image, class type: LoadImage\nExecuting node 701, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nRequested to load CLIPVisionModelProjection\nLoading 1 new model\nExecuting node 545, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 135, title: Load Image, class type: LoadImage\nExecuting node 707, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 548, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 680, title: Load Image, class type: LoadImage\nExecuting node 710, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 681, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\n\u001b[33mINFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.\u001b[0m\nExecuting node 683, title: Load Image, class type: LoadImage\nExecuting node 713, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 682, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 752, title: Load Image, class type: LoadImage\nExecuting node 751, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\n\u001b[33mINFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.\u001b[0m\nRequested to load SD1ClipModel\nLoading 1 new model\nExecuting node 565, title: Positive, class type: CLIPTextEncode\nExecuting node 566, title: Negative, class type: CLIPTextEncode\nExecuting node 134, title: Empty Latent Image, class type: EmptyLatentImage\nExecuting node 80, title: KSampler, class type: KSampler\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Sliding context window activated - latents passed in (96) greater than context_length 16.\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.\nRequested to load BaseModel\nRequested to load AnimateDiffModel\nLoading 2 new models\n 0%| | 0/8 [00:00<?, ?it/s]\n 12%|█▎ | 1/8 [00:02<00:14, 2.09s/it]\n 25%|██▌ | 2/8 [00:04<00:11, 1.99s/it]\n 38%|███▊ | 3/8 [00:05<00:09, 1.96s/it]\n 50%|█████ | 4/8 [00:07<00:07, 1.95s/it]\n 62%|██████▎ | 5/8 [00:09<00:05, 1.94s/it]\n 75%|███████▌ | 6/8 [00:11<00:03, 1.94s/it]\n 88%|████████▊ | 7/8 [00:13<00:01, 1.93s/it]\n100%|██████████| 8/8 [00:15<00:00, 1.93s/it]\n100%|██████████| 8/8 [00:15<00:00, 1.95s/it]\nExecuting node 85, title: Load VAE, class type: VAELoader\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nRequested to load AutoencoderKL\nLoading 1 new model\nExecuting node 84, title: VAE Decode, class type: VAEDecode\nExecuting node 53, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\nExecuting node 203, title: Upscale Image By, class type: ImageScaleBy\nExecuting node 204, title: VAE Encode, class type: VAEEncode\nExecuting node 198, title: KSampler, class type: KSampler\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Sliding context window activated - latents passed in (96) greater than context_length 16.\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.\n 0%| | 0/8 [00:00<?, ?it/s]\n 12%|█▎ | 1/8 [00:03<00:27, 3.88s/it]\n 25%|██▌ | 2/8 [00:07<00:23, 3.88s/it]\n 38%|███▊ | 3/8 [00:11<00:19, 3.89s/it]\n 50%|█████ | 4/8 [00:15<00:15, 3.89s/it]\n 62%|██████▎ | 5/8 [00:19<00:11, 3.95s/it]\n 75%|███████▌ | 6/8 [00:23<00:07, 3.94s/it]\n 88%|████████▊ | 7/8 [00:27<00:03, 3.93s/it]\n100%|██████████| 8/8 [00:31<00:00, 3.93s/it]\n100%|██████████| 8/8 [00:31<00:00, 3.92s/it]\nExecuting node 201, title: VAE Decode, class type: VAEDecode\nExecuting node 205, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\nExecuting node 270, title: Load Upscale Model, class type: UpscaleModelLoader\nExecuting node 271, title: Upscale Image (using Model), class type: ImageUpscaleWithModel\nExecuting node 279, title: Upscale Image, class type: ImageScale\nExecuting node 272, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\nExecuting node 770, title: FILM VFI, class type: FILM VFI\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Final clearing cache...\nComfy-VFI: Done cache clearing\nExecuting node 219, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\nPrompt executed in 250.36 seconds\noutputs: {'53': {'gifs': [{'filename': 'preview_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '205': {'gifs': [{'filename': 'upscaled_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '272': {'gifs': [{'filename': 'upscaled_model_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '219': {'gifs': [{'filename': 'interpolated_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}}\n====================================\npreview_00001.mp4\nupscaled_00001.mp4\nupscaled_model_00001.mp4\ninterpolated_00001.mp4",
"metrics": {
"predict_time": 265.163699,
"total_time": 287.246754
},
"output": [
"https://replicate.delivery/pbxt/K9dkJNIWZC72LRQeSGRxexJgSSkUy8Ib7vt2s0goVjp3cttSA/preview_00001.mp4",
"https://replicate.delivery/pbxt/ff9G37f5SNWF6o7a56G5hrS1YQLwcBYJRi123cjvw3cw5ablA/upscaled_00001.mp4",
"https://replicate.delivery/pbxt/RqySCgbh21KvGVkmpCvImkFNJALWAAKPqZOeOPuiELucu2WJA/upscaled_model_00001.mp4",
"https://replicate.delivery/pbxt/5VWDfWRTrF0nMivSriscwK3SSdZl1oy7amOshxofepdy5ablA/interpolated_00001.mp4"
],
"started_at": "2024-04-24T15:24:33.027055Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/zq1ppbr981rgg0cf22trg7ex1r",
"cancel": "https://api.replicate.com/v1/predictions/zq1ppbr981rgg0cf22trg7ex1r/cancel"
},
"version": "355c6bbaf8bc2deeafd3e2384a50af51bc2091a8be96dc082f1ef02c74640baf"
}
Random seed set to: 665779694
Checking inputs
✅ /tmp/inputs/2.png
✅ /tmp/inputs/1.png
✅ /tmp/inputs/3.png
✅ /tmp/inputs/4.png
✅ /tmp/inputs/circles.mp4
✅ /tmp/inputs/style.png
====================================
Checking weights
Including weights for IPAdapter preset: PLUS (high strength)
⏳ Downloading ip-adapter-plus_sdxl_vit-h.safetensors to ComfyUI/models/ipadapter
⌛️ Downloaded ip-adapter-plus_sdxl_vit-h.safetensors in 0.60s, size: 808.26MB
✅ ip-adapter-plus_sdxl_vit-h.safetensors
⏳ Downloading RealESRGAN_x4.pth to ComfyUI/models/upscale_models
⌛️ Downloaded RealESRGAN_x4.pth in 0.15s, size: 63.94MB
✅ RealESRGAN_x4.pth
⏳ Downloading vae-ft-mse-840000-ema-pruned.safetensors to ComfyUI/models/vae
⌛️ Downloaded vae-ft-mse-840000-ema-pruned.safetensors in 0.35s, size: 319.14MB
✅ vae-ft-mse-840000-ema-pruned.safetensors
⏳ Downloading CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors to ComfyUI/models/clip_vision
⌛️ Downloaded CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors in 1.94s, size: 2411.24MB
✅ CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
⏳ Downloading ip-adapter-plus_sd15.safetensors to ComfyUI/models/ipadapter
⌛️ Downloaded ip-adapter-plus_sd15.safetensors in 0.18s, size: 93.63MB
✅ ip-adapter-plus_sd15.safetensors
⏳ Downloading film_net_fp32.pt to ComfyUI/custom_nodes/ComfyUI-Frame-Interpolation/ckpts/film
⌛️ Downloaded film_net_fp32.pt in 0.25s, size: 131.53MB
✅ film_net_fp32.pt
⏳ Downloading AnimateLCM_sd15_t2v_lora.safetensors to ComfyUI/models/loras
⌛️ Downloaded AnimateLCM_sd15_t2v_lora.safetensors in 0.23s, size: 128.39MB
✅ AnimateLCM_sd15_t2v_lora.safetensors
⏳ Downloading toonyou_beta6.safetensors to ComfyUI/models/checkpoints
⌛️ Downloaded toonyou_beta6.safetensors in 1.36s, size: 2193.39MB
✅ toonyou_beta6.safetensors
⏳ Downloading control_v1p_sd15_qrcode_monster.safetensors to ComfyUI/models/controlnet
⌛️ Downloaded control_v1p_sd15_qrcode_monster.safetensors in 0.55s, size: 689.12MB
✅ control_v1p_sd15_qrcode_monster.safetensors
⏳ Downloading AnimateLCM_sd15_t2v.ckpt to ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models
⌛️ Downloaded AnimateLCM_sd15_t2v.ckpt in 1.16s, size: 1729.05MB
✅ AnimateLCM_sd15_t2v.ckpt
====================================
Running workflow
got prompt
Executing node 564, title: Load Checkpoint, class type: CheckpointLoaderSimple
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']
loaded straight to GPU
Requested to load BaseModel
Loading 1 new model
Executing node 563, title: LoraLoaderModelOnly, class type: LoraLoaderModelOnly
[AnimateDiffEvo] - INFO - Loading motion module AnimateLCM_sd15_t2v.ckpt via Gen2
Executing node 87, title: Load AnimateDiff Model 🎭🅐🅓②, class type: ADE_LoadAnimateDiffModel
Executing node 256, title: Motion Scale 🎭🅐🅓, class type: ADE_MultivalDynamic
Executing node 79, title: Apply AnimateDiff Model 🎭🅐🅓②, class type: ADE_ApplyAnimateDiffModelSimple
Executing node 156, title: Context Options◆Looped Uniform 🎭🅐🅓, class type: ADE_LoopedUniformContextOptions
Executing node 77, title: Use Evolved Sampling 🎭🅐🅓②, class type: ADE_UseEvolvedSampling
Executing node 573, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader
INFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
INFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sd15.safetensors
Executing node 142, title: Load Image, class type: LoadImage
Executing node 701, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Requested to load CLIPVisionModelProjection
Loading 1 new model
Executing node 545, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 135, title: Load Image, class type: LoadImage
Executing node 707, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 548, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 680, title: Load Image, class type: LoadImage
Executing node 710, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 681, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
INFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.
Executing node 683, title: Load Image, class type: LoadImage
Executing node 713, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 682, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 752, title: Load Image, class type: LoadImage
Executing node 751, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
INFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.
Requested to load SD1ClipModel
Loading 1 new model
Executing node 565, title: Positive, class type: CLIPTextEncode
Executing node 566, title: Negative, class type: CLIPTextEncode
Executing node 134, title: Empty Latent Image, class type: EmptyLatentImage
Executing node 80, title: KSampler, class type: KSampler
[AnimateDiffEvo] - INFO - Sliding context window activated - latents passed in (96) greater than context_length 16.
[AnimateDiffEvo] - INFO - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.
Requested to load BaseModel
Requested to load AnimateDiffModel
Loading 2 new models
0%| | 0/8 [00:00<?, ?it/s]
12%|█▎ | 1/8 [00:02<00:14, 2.09s/it]
25%|██▌ | 2/8 [00:04<00:11, 1.99s/it]
38%|███▊ | 3/8 [00:05<00:09, 1.96s/it]
50%|█████ | 4/8 [00:07<00:07, 1.95s/it]
62%|██████▎ | 5/8 [00:09<00:05, 1.94s/it]
75%|███████▌ | 6/8 [00:11<00:03, 1.94s/it]
88%|████████▊ | 7/8 [00:13<00:01, 1.93s/it]
100%|██████████| 8/8 [00:15<00:00, 1.93s/it]
100%|██████████| 8/8 [00:15<00:00, 1.95s/it]
Executing node 85, title: Load VAE, class type: VAELoader
Using pytorch attention in VAE
Using pytorch attention in VAE
Requested to load AutoencoderKL
Loading 1 new model
Executing node 84, title: VAE Decode, class type: VAEDecode
Executing node 53, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
Executing node 203, title: Upscale Image By, class type: ImageScaleBy
Executing node 204, title: VAE Encode, class type: VAEEncode
Executing node 198, title: KSampler, class type: KSampler
[AnimateDiffEvo] - INFO - Sliding context window activated - latents passed in (96) greater than context_length 16.
[AnimateDiffEvo] - INFO - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.
0%| | 0/8 [00:00<?, ?it/s]
12%|█▎ | 1/8 [00:03<00:27, 3.88s/it]
25%|██▌ | 2/8 [00:07<00:23, 3.88s/it]
38%|███▊ | 3/8 [00:11<00:19, 3.89s/it]
50%|█████ | 4/8 [00:15<00:15, 3.89s/it]
62%|██████▎ | 5/8 [00:19<00:11, 3.95s/it]
75%|███████▌ | 6/8 [00:23<00:07, 3.94s/it]
88%|████████▊ | 7/8 [00:27<00:03, 3.93s/it]
100%|██████████| 8/8 [00:31<00:00, 3.93s/it]
100%|██████████| 8/8 [00:31<00:00, 3.92s/it]
Executing node 201, title: VAE Decode, class type: VAEDecode
Executing node 205, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
Executing node 270, title: Load Upscale Model, class type: UpscaleModelLoader
Executing node 271, title: Upscale Image (using Model), class type: ImageUpscaleWithModel
Executing node 279, title: Upscale Image, class type: ImageScale
Executing node 272, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
Executing node 770, title: FILM VFI, class type: FILM VFI
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Final clearing cache...
Comfy-VFI: Done cache clearing
Executing node 219, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
Prompt executed in 250.36 seconds
outputs: {'53': {'gifs': [{'filename': 'preview_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '205': {'gifs': [{'filename': 'upscaled_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '272': {'gifs': [{'filename': 'upscaled_model_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '219': {'gifs': [{'filename': 'interpolated_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}}
====================================
preview_00001.mp4
upscaled_00001.mp4
upscaled_model_00001.mp4
interpolated_00001.mp4