Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
fofr /video-morpher:e70e9750
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/video-morpher using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/video-morpher:e70e975067d2b5dbe9e2d9022833d27230a1bdeb3f4af6fe6bb49a548a3039a7",
{
input: {
mode: "upscaled-and-interpolated",
prompt: "bright, vibrant, high contrast",
checkpoint: "3D",
style_image: "https://replicate.delivery/pbxt/KnxLVfe6BRRu2zHc3gT99mnwaemKfR4JzaZWxMCLsZYSTKzp/2024-03-05--06-47-29-u-q1-fofr_tropical_purple_beksinski_aaad09f0-d194-4e40-b312-51054fc4ebbf.png",
aspect_ratio: "3:4",
style_strength: 0.5,
use_controlnet: true,
negative_prompt: "dark, gloomy",
subject_image_1: "https://replicate.delivery/pbxt/KnxLX9qnA82YKCkScCJZf5VCX6hy5RuprGwLEAVjp3vu6Oh1/1.webp",
subject_image_2: "https://replicate.delivery/pbxt/KnxLWqPg086DnRTUvxDM7gCBYi3W3coIbn3Q8jXnTJsfvZzt/2.webp",
subject_image_3: "https://replicate.delivery/pbxt/KnxLWU6aHjc6kVW2QOF0xC4oen8zuPnlsK2o24GtOV2bJEn8/4.webp",
subject_image_4: "https://replicate.delivery/pbxt/KnxLVg54ySFfg8s78YyfzK0Qgse8YdTbSmEZKpnkFkmSFlsf/4.webp"
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fofr/video-morpher using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/video-morpher:e70e975067d2b5dbe9e2d9022833d27230a1bdeb3f4af6fe6bb49a548a3039a7",
input={
"mode": "upscaled-and-interpolated",
"prompt": "bright, vibrant, high contrast",
"checkpoint": "3D",
"style_image": "https://replicate.delivery/pbxt/KnxLVfe6BRRu2zHc3gT99mnwaemKfR4JzaZWxMCLsZYSTKzp/2024-03-05--06-47-29-u-q1-fofr_tropical_purple_beksinski_aaad09f0-d194-4e40-b312-51054fc4ebbf.png",
"aspect_ratio": "3:4",
"style_strength": 0.5,
"use_controlnet": True,
"negative_prompt": "dark, gloomy",
"subject_image_1": "https://replicate.delivery/pbxt/KnxLX9qnA82YKCkScCJZf5VCX6hy5RuprGwLEAVjp3vu6Oh1/1.webp",
"subject_image_2": "https://replicate.delivery/pbxt/KnxLWqPg086DnRTUvxDM7gCBYi3W3coIbn3Q8jXnTJsfvZzt/2.webp",
"subject_image_3": "https://replicate.delivery/pbxt/KnxLWU6aHjc6kVW2QOF0xC4oen8zuPnlsK2o24GtOV2bJEn8/4.webp",
"subject_image_4": "https://replicate.delivery/pbxt/KnxLVg54ySFfg8s78YyfzK0Qgse8YdTbSmEZKpnkFkmSFlsf/4.webp"
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fofr/video-morpher using Replicateβs API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/video-morpher:e70e975067d2b5dbe9e2d9022833d27230a1bdeb3f4af6fe6bb49a548a3039a7",
"input": {
"mode": "upscaled-and-interpolated",
"prompt": "bright, vibrant, high contrast",
"checkpoint": "3D",
"style_image": "https://replicate.delivery/pbxt/KnxLVfe6BRRu2zHc3gT99mnwaemKfR4JzaZWxMCLsZYSTKzp/2024-03-05--06-47-29-u-q1-fofr_tropical_purple_beksinski_aaad09f0-d194-4e40-b312-51054fc4ebbf.png",
"aspect_ratio": "3:4",
"style_strength": 0.5,
"use_controlnet": true,
"negative_prompt": "dark, gloomy",
"subject_image_1": "https://replicate.delivery/pbxt/KnxLX9qnA82YKCkScCJZf5VCX6hy5RuprGwLEAVjp3vu6Oh1/1.webp",
"subject_image_2": "https://replicate.delivery/pbxt/KnxLWqPg086DnRTUvxDM7gCBYi3W3coIbn3Q8jXnTJsfvZzt/2.webp",
"subject_image_3": "https://replicate.delivery/pbxt/KnxLWU6aHjc6kVW2QOF0xC4oen8zuPnlsK2o24GtOV2bJEn8/4.webp",
"subject_image_4": "https://replicate.delivery/pbxt/KnxLVg54ySFfg8s78YyfzK0Qgse8YdTbSmEZKpnkFkmSFlsf/4.webp"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicateβs HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2024-04-24T20:05:10.327727Z",
"created_at": "2024-04-24T19:59:43.271000Z",
"data_removed": false,
"error": null,
"id": "9esbb1jcmxrgg0cf26rsptyyf8",
"input": {
"mode": "upscaled-and-interpolated",
"prompt": "bright, vibrant, high contrast",
"checkpoint": "3D",
"style_image": "https://replicate.delivery/pbxt/KnxLVfe6BRRu2zHc3gT99mnwaemKfR4JzaZWxMCLsZYSTKzp/2024-03-05--06-47-29-u-q1-fofr_tropical_purple_beksinski_aaad09f0-d194-4e40-b312-51054fc4ebbf.png",
"aspect_ratio": "3:4",
"style_strength": 0.5,
"use_controlnet": true,
"negative_prompt": "dark, gloomy",
"subject_image_1": "https://replicate.delivery/pbxt/KnxLX9qnA82YKCkScCJZf5VCX6hy5RuprGwLEAVjp3vu6Oh1/1.webp",
"subject_image_2": "https://replicate.delivery/pbxt/KnxLWqPg086DnRTUvxDM7gCBYi3W3coIbn3Q8jXnTJsfvZzt/2.webp",
"subject_image_3": "https://replicate.delivery/pbxt/KnxLWU6aHjc6kVW2QOF0xC4oen8zuPnlsK2o24GtOV2bJEn8/4.webp",
"subject_image_4": "https://replicate.delivery/pbxt/KnxLVg54ySFfg8s78YyfzK0Qgse8YdTbSmEZKpnkFkmSFlsf/4.webp"
},
"logs": "Random seed set to: 2661073672\nChecking inputs\nβ
/tmp/inputs/2.png\nβ
/tmp/inputs/1.png\nβ
/tmp/inputs/3.png\nβ
/tmp/inputs/4.png\nβ
/tmp/inputs/circles.mp4\nβ
/tmp/inputs/style.png\n====================================\nChecking weights\nIncluding weights for IPAdapter preset: PLUS (high strength)\nβ³ Downloading vae-ft-mse-840000-ema-pruned.safetensors to ComfyUI/models/vae\nβοΈ Downloaded vae-ft-mse-840000-ema-pruned.safetensors in 0.41s, size: 319.14MB\nβ
vae-ft-mse-840000-ema-pruned.safetensors\nβ³ Downloading RealESRGAN_x4.pth to ComfyUI/models/upscale_models\nβοΈ Downloaded RealESRGAN_x4.pth in 0.18s, size: 63.94MB\nβ
RealESRGAN_x4.pth\nβ³ Downloading AnimateLCM_sd15_t2v.ckpt to ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models\nβοΈ Downloaded AnimateLCM_sd15_t2v.ckpt in 1.20s, size: 1729.05MB\nβ
AnimateLCM_sd15_t2v.ckpt\nβ³ Downloading AnimateLCM_sd15_t2v_lora.safetensors to ComfyUI/models/loras\nβοΈ Downloaded AnimateLCM_sd15_t2v_lora.safetensors in 0.92s, size: 128.39MB\nβ
AnimateLCM_sd15_t2v_lora.safetensors\nβ³ Downloading ip-adapter-plus_sd15.safetensors to ComfyUI/models/ipadapter\nβοΈ Downloaded ip-adapter-plus_sd15.safetensors in 0.25s, size: 93.63MB\nβ
ip-adapter-plus_sd15.safetensors\nβ³ Downloading rcnzCartoon3d_v20.safetensors to ComfyUI/models/checkpoints\nβοΈ Downloaded rcnzCartoon3d_v20.safetensors in 2.04s, size: 2033.83MB\nβ
rcnzCartoon3d_v20.safetensors\nβ³ Downloading ip-adapter-plus_sdxl_vit-h.safetensors to ComfyUI/models/ipadapter\nβοΈ Downloaded ip-adapter-plus_sdxl_vit-h.safetensors in 0.67s, size: 808.26MB\nβ
ip-adapter-plus_sdxl_vit-h.safetensors\nβ³ Downloading CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors to ComfyUI/models/clip_vision\nβοΈ Downloaded CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors in 11.24s, size: 2411.24MB\nβ
CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\nβ³ Downloading film_net_fp32.pt to ComfyUI/custom_nodes/ComfyUI-Frame-Interpolation/ckpts/film\nβοΈ Downloaded film_net_fp32.pt in 1.16s, size: 131.53MB\nβ
film_net_fp32.pt\nβ³ Downloading control_v1p_sd15_qrcode_monster.safetensors to ComfyUI/models/controlnet\nβοΈ Downloaded control_v1p_sd15_qrcode_monster.safetensors in 0.69s, size: 689.12MB\nβ
control_v1p_sd15_qrcode_monster.safetensors\n====================================\nRunning workflow\ngot prompt\nExecuting node 564, title: Load Checkpoint, class type: CheckpointLoaderSimple\nmodel_type EPS\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nclip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']\nloaded straight to GPU\nRequested to load BaseModel\nLoading 1 new model\nExecuting node 563, title: LoraLoaderModelOnly, class type: LoraLoaderModelOnly\nExecuting node 87, title: Load AnimateDiff Model ππ
π
β‘, class type: ADE_LoadAnimateDiffModel[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Loading motion module AnimateLCM_sd15_t2v.ckpt via Gen2\nExecuting node 256, title: Motion Scale ππ
π
, class type: ADE_MultivalDynamic\nExecuting node 79, title: Apply AnimateDiff Model ππ
π
β‘, class type: ADE_ApplyAnimateDiffModelSimple\nExecuting node 156, title: Context OptionsβLooped Uniform ππ
π
, class type: ADE_LoopedUniformContextOptions\nExecuting node 77, title: Use Evolved Sampling ππ
π
β‘, class type: ADE_UseEvolvedSampling\nExecuting node 573, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader\n\u001b[33mINFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors\u001b[0m\n\u001b[33mINFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sd15.safetensors\u001b[0m\nExecuting node 142, title: Load Image, class type: LoadImage\nExecuting node 701, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nRequested to load CLIPVisionModelProjection\nLoading 1 new model\nExecuting node 545, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 135, title: Load Image, class type: LoadImage\nExecuting node 707, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 548, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 680, title: Load Image, class type: LoadImage\nExecuting node 710, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 681, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 683, title: Load Image, class type: LoadImage\nExecuting node 713, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced\nExecuting node 682, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\nExecuting node 752, title: Load Image, class type: LoadImage\nExecuting node 751, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch\n\u001b[33mINFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.\u001b[0m\nExecuting node 565, title: Positive, class type: CLIPTextEncode\nRequested to load SD1ClipModel\nLoading 1 new model\nExecuting node 566, title: Negative, class type: CLIPTextEncode\nExecuting node 127, title: Load Advanced ControlNet Model ππ
π
π
, class type: ControlNetLoaderAdvanced\nExecuting node 134, title: Empty Latent Image, class type: EmptyLatentImage\nExecuting node 569, title: π§ Batch Count, class type: BatchCount+\nExecuting node 746, title: Load Video (Upload) π₯π
₯π
π
’, class type: VHS_LoadVideo\nExecuting node 461, title: π§ Simple Math, class type: SimpleMath+\nExecuting node 454, title: RepeatImageBatch, class type: RepeatImageBatch\nExecuting node 458, title: Split Image Batch π₯π
₯π
π
’, class type: VHS_SplitImages\nExecuting node 125, title: Apply Advanced ControlNet ππ
π
π
, class type: ACN_AdvancedControlNetApply\nExecuting node 80, title: KSampler, class type: KSampler\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Sliding context window activated - latents passed in (96) greater than context_length 16.\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.\nRequested to load AnimateDiffModel\nRequested to load BaseModel\nRequested to load ControlNet\nLoading 3 new models\n 0%| | 0/11 [00:00<?, ?it/s]\n 9%|β | 1/11 [00:03<00:30, 3.10s/it]\n 18%|ββ | 2/11 [00:05<00:22, 2.50s/it]\n 27%|βββ | 3/11 [00:07<00:19, 2.43s/it]\n 36%|ββββ | 4/11 [00:09<00:16, 2.40s/it]\n 45%|βββββ | 5/11 [00:11<00:13, 2.22s/it]\n 55%|ββββββ | 6/11 [00:13<00:10, 2.11s/it]\n 64%|βββββββ | 7/11 [00:15<00:08, 2.04s/it]\n 73%|ββββββββ | 8/11 [00:17<00:05, 1.99s/it]\n 82%|βββββββββ | 9/11 [00:19<00:03, 1.96s/it]\n 91%|βββββββββ | 10/11 [00:21<00:01, 1.94s/it]\n100%|ββββββββββ| 11/11 [00:23<00:00, 1.93s/it]\n100%|ββββββββββ| 11/11 [00:23<00:00, 2.11s/it]\nExecuting node 85, title: Load VAE, class type: VAELoader\nUsing pytorch attention in VAE\nUsing pytorch attention in VAE\nRequested to load AutoencoderKL\nLoading 1 new model\nExecuting node 84, title: VAE Decode, class type: VAEDecode\nExecuting node 53, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine\nExecuting node 203, title: Upscale Image By, class type: ImageScaleBy\nExecuting node 204, title: VAE Encode, class type: VAEEncode\nExecuting node 198, title: KSampler, class type: KSampler\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Sliding context window activated - latents passed in (96) greater than context_length 16.\n[AnimateDiffEvo] - \u001b[0;32mINFO\u001b[0m - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.\n 0%| | 0/11 [00:00<?, ?it/s]\n 9%|β | 1/11 [00:03<00:38, 3.86s/it]\n 18%|ββ | 2/11 [00:07<00:34, 3.85s/it]\n 27%|βββ | 3/11 [00:11<00:30, 3.85s/it]\n 36%|ββββ | 4/11 [00:15<00:27, 3.86s/it]\n 45%|βββββ | 5/11 [00:19<00:23, 3.86s/it]\n 55%|ββββββ | 6/11 [00:23<00:19, 3.86s/it]\n 64%|βββββββ | 7/11 [00:27<00:15, 3.86s/it]\n 73%|ββββββββ | 8/11 [00:30<00:11, 3.86s/it]\n 82%|βββββββββ | 9/11 [00:34<00:07, 3.87s/it]\n 91%|βββββββββ | 10/11 [00:38<00:03, 3.87s/it]\n100%|ββββββββββ| 11/11 [00:42<00:00, 3.87s/it]\n100%|ββββββββββ| 11/11 [00:42<00:00, 3.86s/it]\nExecuting node 201, title: VAE Decode, class type: VAEDecode\nExecuting node 205, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine\nExecuting node 270, title: Load Upscale Model, class type: UpscaleModelLoader\nExecuting node 271, title: Upscale Image (using Model), class type: ImageUpscaleWithModel\nExecuting node 279, title: Upscale Image, class type: ImageScale\nExecuting node 272, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine\nExecuting node 770, title: FILM VFI, class type: FILM VFI\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Clearing cache...\nComfy-VFI: Done cache clearing\nComfy-VFI: Final clearing cache...\nComfy-VFI: Done cache clearing\nExecuting node 219, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine\nPrompt executed in 270.81 seconds\noutputs: {'53': {'gifs': [{'filename': 'preview_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '205': {'gifs': [{'filename': 'upscaled_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '272': {'gifs': [{'filename': 'upscaled_model_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '219': {'gifs': [{'filename': 'interpolated_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}}\n====================================\npreview_00001.mp4\nupscaled_00001.mp4\nupscaled_model_00001.mp4\ninterpolated_00001.mp4",
"metrics": {
"predict_time": 301.418943,
"total_time": 327.056727
},
"output": [
"https://replicate.delivery/pbxt/GPTWM5fboOShBKlFRQtMapKDoUBohf9qFxrySPt7AF4zfiblA/preview_00001.mp4",
"https://replicate.delivery/pbxt/GZV1nIJXsIJLMpIg1w6XHWxSfN3qm36kEkTIa2A1F6L6v4WJA/upscaled_00001.mp4",
"https://replicate.delivery/pbxt/N2u5EY0jm4ovC5GAztQaUrvL4KiyJTKzr1g5Ia2nKQe6v4WJA/upscaled_model_00001.mp4",
"https://replicate.delivery/pbxt/w3xiWkOFJEb7KRmQq5itovpqr1FGVZ033Nj6YBe1jiz6v4WJA/interpolated_00001.mp4"
],
"started_at": "2024-04-24T20:00:08.908784Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/9esbb1jcmxrgg0cf26rsptyyf8",
"cancel": "https://api.replicate.com/v1/predictions/9esbb1jcmxrgg0cf26rsptyyf8/cancel"
},
"version": "e70e975067d2b5dbe9e2d9022833d27230a1bdeb3f4af6fe6bb49a548a3039a7"
}
Random seed set to: 2661073672
Checking inputs
β
/tmp/inputs/2.png
β
/tmp/inputs/1.png
β
/tmp/inputs/3.png
β
/tmp/inputs/4.png
β
/tmp/inputs/circles.mp4
β
/tmp/inputs/style.png
====================================
Checking weights
Including weights for IPAdapter preset: PLUS (high strength)
β³ Downloading vae-ft-mse-840000-ema-pruned.safetensors to ComfyUI/models/vae
βοΈ Downloaded vae-ft-mse-840000-ema-pruned.safetensors in 0.41s, size: 319.14MB
β
vae-ft-mse-840000-ema-pruned.safetensors
β³ Downloading RealESRGAN_x4.pth to ComfyUI/models/upscale_models
βοΈ Downloaded RealESRGAN_x4.pth in 0.18s, size: 63.94MB
β
RealESRGAN_x4.pth
β³ Downloading AnimateLCM_sd15_t2v.ckpt to ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models
βοΈ Downloaded AnimateLCM_sd15_t2v.ckpt in 1.20s, size: 1729.05MB
β
AnimateLCM_sd15_t2v.ckpt
β³ Downloading AnimateLCM_sd15_t2v_lora.safetensors to ComfyUI/models/loras
βοΈ Downloaded AnimateLCM_sd15_t2v_lora.safetensors in 0.92s, size: 128.39MB
β
AnimateLCM_sd15_t2v_lora.safetensors
β³ Downloading ip-adapter-plus_sd15.safetensors to ComfyUI/models/ipadapter
βοΈ Downloaded ip-adapter-plus_sd15.safetensors in 0.25s, size: 93.63MB
β
ip-adapter-plus_sd15.safetensors
β³ Downloading rcnzCartoon3d_v20.safetensors to ComfyUI/models/checkpoints
βοΈ Downloaded rcnzCartoon3d_v20.safetensors in 2.04s, size: 2033.83MB
β
rcnzCartoon3d_v20.safetensors
β³ Downloading ip-adapter-plus_sdxl_vit-h.safetensors to ComfyUI/models/ipadapter
βοΈ Downloaded ip-adapter-plus_sdxl_vit-h.safetensors in 0.67s, size: 808.26MB
β
ip-adapter-plus_sdxl_vit-h.safetensors
β³ Downloading CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors to ComfyUI/models/clip_vision
βοΈ Downloaded CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors in 11.24s, size: 2411.24MB
β
CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
β³ Downloading film_net_fp32.pt to ComfyUI/custom_nodes/ComfyUI-Frame-Interpolation/ckpts/film
βοΈ Downloaded film_net_fp32.pt in 1.16s, size: 131.53MB
β
film_net_fp32.pt
β³ Downloading control_v1p_sd15_qrcode_monster.safetensors to ComfyUI/models/controlnet
βοΈ Downloaded control_v1p_sd15_qrcode_monster.safetensors in 0.69s, size: 689.12MB
β
control_v1p_sd15_qrcode_monster.safetensors
====================================
Running workflow
got prompt
Executing node 564, title: Load Checkpoint, class type: CheckpointLoaderSimple
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
clip missing: ['clip_l.logit_scale', 'clip_l.transformer.text_projection.weight']
loaded straight to GPU
Requested to load BaseModel
Loading 1 new model
Executing node 563, title: LoraLoaderModelOnly, class type: LoraLoaderModelOnly
Executing node 87, title: Load AnimateDiff Model ππ
π
β‘, class type: ADE_LoadAnimateDiffModel[AnimateDiffEvo] - INFO - Loading motion module AnimateLCM_sd15_t2v.ckpt via Gen2
Executing node 256, title: Motion Scale ππ
π
, class type: ADE_MultivalDynamic
Executing node 79, title: Apply AnimateDiff Model ππ
π
β‘, class type: ADE_ApplyAnimateDiffModelSimple
Executing node 156, title: Context OptionsβLooped Uniform ππ
π
, class type: ADE_LoopedUniformContextOptions
Executing node 77, title: Use Evolved Sampling ππ
π
β‘, class type: ADE_UseEvolvedSampling
Executing node 573, title: IPAdapter Unified Loader, class type: IPAdapterUnifiedLoader
INFO: Clip Vision model loaded from /src/ComfyUI/models/clip_vision/CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors
INFO: IPAdapter model loaded from /src/ComfyUI/models/ipadapter/ip-adapter-plus_sd15.safetensors
Executing node 142, title: Load Image, class type: LoadImage
Executing node 701, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Requested to load CLIPVisionModelProjection
Loading 1 new model
Executing node 545, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 135, title: Load Image, class type: LoadImage
Executing node 707, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 548, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 680, title: Load Image, class type: LoadImage
Executing node 710, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 681, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 683, title: Load Image, class type: LoadImage
Executing node 713, title: CreateFadeMaskAdvanced, class type: CreateFadeMaskAdvanced
Executing node 682, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
Executing node 752, title: Load Image, class type: LoadImage
Executing node 751, title: IPAdapter Batch (Adv.), class type: IPAdapterBatch
INFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.
Executing node 565, title: Positive, class type: CLIPTextEncode
Requested to load SD1ClipModel
Loading 1 new model
Executing node 566, title: Negative, class type: CLIPTextEncode
Executing node 127, title: Load Advanced ControlNet Model ππ
π
π
, class type: ControlNetLoaderAdvanced
Executing node 134, title: Empty Latent Image, class type: EmptyLatentImage
Executing node 569, title: π§ Batch Count, class type: BatchCount+
Executing node 746, title: Load Video (Upload) π₯π
₯π
π
’, class type: VHS_LoadVideo
Executing node 461, title: π§ Simple Math, class type: SimpleMath+
Executing node 454, title: RepeatImageBatch, class type: RepeatImageBatch
Executing node 458, title: Split Image Batch π₯π
₯π
π
’, class type: VHS_SplitImages
Executing node 125, title: Apply Advanced ControlNet ππ
π
π
, class type: ACN_AdvancedControlNetApply
Executing node 80, title: KSampler, class type: KSampler
[AnimateDiffEvo] - INFO - Sliding context window activated - latents passed in (96) greater than context_length 16.
[AnimateDiffEvo] - INFO - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.
Requested to load AnimateDiffModel
Requested to load BaseModel
Requested to load ControlNet
Loading 3 new models
0%| | 0/11 [00:00<?, ?it/s]
9%|β | 1/11 [00:03<00:30, 3.10s/it]
18%|ββ | 2/11 [00:05<00:22, 2.50s/it]
27%|βββ | 3/11 [00:07<00:19, 2.43s/it]
36%|ββββ | 4/11 [00:09<00:16, 2.40s/it]
45%|βββββ | 5/11 [00:11<00:13, 2.22s/it]
55%|ββββββ | 6/11 [00:13<00:10, 2.11s/it]
64%|βββββββ | 7/11 [00:15<00:08, 2.04s/it]
73%|ββββββββ | 8/11 [00:17<00:05, 1.99s/it]
82%|βββββββββ | 9/11 [00:19<00:03, 1.96s/it]
91%|βββββββββ | 10/11 [00:21<00:01, 1.94s/it]
100%|ββββββββββ| 11/11 [00:23<00:00, 1.93s/it]
100%|ββββββββββ| 11/11 [00:23<00:00, 2.11s/it]
Executing node 85, title: Load VAE, class type: VAELoader
Using pytorch attention in VAE
Using pytorch attention in VAE
Requested to load AutoencoderKL
Loading 1 new model
Executing node 84, title: VAE Decode, class type: VAEDecode
Executing node 53, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine
Executing node 203, title: Upscale Image By, class type: ImageScaleBy
Executing node 204, title: VAE Encode, class type: VAEEncode
Executing node 198, title: KSampler, class type: KSampler
[AnimateDiffEvo] - INFO - Sliding context window activated - latents passed in (96) greater than context_length 16.
[AnimateDiffEvo] - INFO - Using motion module AnimateLCM_sd15_t2v.ckpt:v2.
0%| | 0/11 [00:00<?, ?it/s]
9%|β | 1/11 [00:03<00:38, 3.86s/it]
18%|ββ | 2/11 [00:07<00:34, 3.85s/it]
27%|βββ | 3/11 [00:11<00:30, 3.85s/it]
36%|ββββ | 4/11 [00:15<00:27, 3.86s/it]
45%|βββββ | 5/11 [00:19<00:23, 3.86s/it]
55%|ββββββ | 6/11 [00:23<00:19, 3.86s/it]
64%|βββββββ | 7/11 [00:27<00:15, 3.86s/it]
73%|ββββββββ | 8/11 [00:30<00:11, 3.86s/it]
82%|βββββββββ | 9/11 [00:34<00:07, 3.87s/it]
91%|βββββββββ | 10/11 [00:38<00:03, 3.87s/it]
100%|ββββββββββ| 11/11 [00:42<00:00, 3.87s/it]
100%|ββββββββββ| 11/11 [00:42<00:00, 3.86s/it]
Executing node 201, title: VAE Decode, class type: VAEDecode
Executing node 205, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine
Executing node 270, title: Load Upscale Model, class type: UpscaleModelLoader
Executing node 271, title: Upscale Image (using Model), class type: ImageUpscaleWithModel
Executing node 279, title: Upscale Image, class type: ImageScale
Executing node 272, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine
Executing node 770, title: FILM VFI, class type: FILM VFI
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Clearing cache...
Comfy-VFI: Done cache clearing
Comfy-VFI: Final clearing cache...
Comfy-VFI: Done cache clearing
Executing node 219, title: Video Combine π₯π
₯π
π
’, class type: VHS_VideoCombine
Prompt executed in 270.81 seconds
outputs: {'53': {'gifs': [{'filename': 'preview_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '205': {'gifs': [{'filename': 'upscaled_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '272': {'gifs': [{'filename': 'upscaled_model_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}, '219': {'gifs': [{'filename': 'interpolated_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4'}]}}
====================================
preview_00001.mp4
upscaled_00001.mp4
upscaled_model_00001.mp4
interpolated_00001.mp4