You're looking at a specific version of this model. Jump to the model overview.
zsxkib /wan-squish-1000steps:eedd0c09
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run zsxkib/wan-squish-1000steps using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"zsxkib/wan-squish-1000steps:eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5",
{
input: {
image: "https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png",
frames: 81,
prompt: "SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy's fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers.",
fast_mode: "Balanced",
resolution: "480p",
aspect_ratio: "16:9",
sample_shift: 8,
sample_steps: 30,
negative_prompt: "",
lora_strength_clip: 1,
sample_guide_scale: 5,
lora_strength_model: 0.8
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run zsxkib/wan-squish-1000steps using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"zsxkib/wan-squish-1000steps:eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5",
input={
"image": "https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png",
"frames": 81,
"prompt": "SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy's fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers.",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 0.8
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run zsxkib/wan-squish-1000steps using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "zsxkib/wan-squish-1000steps:eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5",
"input": {
"image": "https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png",
"frames": 81,
"prompt": "SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy\'s fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers.",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 0.8
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/zsxkib/wan-squish-1000steps@sha256:eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5 \
-i 'image="https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png"' \
-i 'frames=81' \
-i $'prompt="SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy\'s fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers."' \
-i 'fast_mode="Balanced"' \
-i 'resolution="480p"' \
-i 'aspect_ratio="16:9"' \
-i 'sample_shift=8' \
-i 'sample_steps=30' \
-i 'negative_prompt=""' \
-i 'lora_strength_clip=1' \
-i 'sample_guide_scale=5' \
-i 'lora_strength_model=0.8'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/zsxkib/wan-squish-1000steps@sha256:eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "image": "https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png", "frames": 81, "prompt": "SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy\'s fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers.", "fast_mode": "Balanced", "resolution": "480p", "aspect_ratio": "16:9", "sample_shift": 8, "sample_steps": 30, "negative_prompt": "", "lora_strength_clip": 1, "sample_guide_scale": 5, "lora_strength_model": 0.8 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2025-03-19T13:11:44.883891Z",
"created_at": "2025-03-19T13:06:25.568000Z",
"data_removed": false,
"error": null,
"id": "3d7rtz96m1rme0cnnt1vney5yg",
"input": {
"image": "https://replicate.delivery/pbxt/MgdQczBaRzH9j7EPPCr8LR56ctlvkmdXIf1GBxGPeErPGjHA/Screenshot%202025-03-19%20at%2011.24.14.png",
"frames": 81,
"prompt": "SQUISH-IT Cute golden retriever puppy sitting in grass with flowers. Human hands enter the frame and gently begin to squish and mold the puppy like soft dough. The puppy's fluffy fur and form gradually transform into a malleable clay-like substance as the hands shape it. The final shot shows the reshaped puppy-dough creation sitting on the grass surrounded by flowers.",
"fast_mode": "Balanced",
"resolution": "480p",
"aspect_ratio": "16:9",
"sample_shift": 8,
"sample_steps": 30,
"negative_prompt": "",
"lora_strength_clip": 1,
"sample_guide_scale": 5,
"lora_strength_model": 0.8
},
"logs": "Random seed set to: 1598448377\n2025-03-19T13:07:58Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp0j3_5xxe/weights url=https://replicate.delivery/xezq/9DjUprMiHXZkFRQQqy3VbZBxAzV963P08m0RZZwkw1Dt7eMKA/trained_model.tar\n2025-03-19T13:08:02Z | INFO | [ Complete ] dest=/tmp/tmp0j3_5xxe/weights size=\"359 MB\" total_elapsed=3.376s url=https://replicate.delivery/xezq/9DjUprMiHXZkFRQQqy3VbZBxAzV963P08m0RZZwkw1Dt7eMKA/trained_model.tar\nChecking inputs\n✅ /tmp/inputs/image.png\n====================================\nChecking weights\n✅ umt5_xxl_fp16.safetensors exists in ComfyUI/models/text_encoders\n⏳ Downloading wan2.1_i2v_480p_14B_bf16.safetensors to ComfyUI/models/diffusion_models\n✅ wan2.1_i2v_480p_14B_bf16.safetensors downloaded to ComfyUI/models/diffusion_models in 18.11s, size: 31270.88MB\n✅ wan_2.1_vae.safetensors exists in ComfyUI/models/vae\n✅ clip_vision_h.safetensors exists in ComfyUI/models/clip_vision\n✅ 14b_02c3ebfa71932e569775343580ab386c.safetensors exists in loras directory\n====================================\nRunning workflow\n[ComfyUI] got prompt\nExecuting node 39, title: Load VAE, class type: VAELoader\n[ComfyUI] Using pytorch attention in VAE\n[ComfyUI] Using pytorch attention in VAE\n[ComfyUI] VAE load device: cuda:0, offload device: cpu, dtype: torch.bfloat16\nExecuting node 55, title: Load Image, class type: LoadImage\nExecuting node 56, title: Width and height for scaling image to ideal resolution 🪴, class type: Width and height for scaling image to ideal resolution 🪴\nExecuting node 57, title: 🔧 Image Resize, class type: ImageResize+\nExecuting node 60, title: Load CLIP Vision, class type: CLIPVisionLoader\n[ComfyUI] Requested to load CLIPVisionModelProjection\nExecuting node 59, title: CLIP Vision Encode, class type: CLIPVisionEncode\n[ComfyUI] loaded completely 141327.4875 1208.09814453125 True\nExecuting node 38, title: Load CLIP, class type: CLIPLoader\n[ComfyUI] CLIP/text encoder model load device: cuda:0, offload device: cpu, current: cpu, dtype: torch.float16\n[ComfyUI] Requested to load WanTEModel\nExecuting node 7, title: CLIP Text Encode (Negative Prompt), class type: CLIPTextEncode\n[ComfyUI] loaded completely 139855.3869140625 10835.4765625 True\nExecuting node 37, title: Load Diffusion Model, class type: UNETLoader\n[ComfyUI] model weight dtype torch.float16, manual cast: None\n[ComfyUI] model_type FLOW\nExecuting node 54, title: WanVideo Tea Cache (native), class type: WanVideoTeaCacheKJ\nExecuting node 49, title: Load LoRA, class type: LoraLoader\n[ComfyUI] Requested to load WanTEModel\nExecuting node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode\n[ComfyUI] loaded completely 139853.3869140625 10835.4765625 True\nExecuting node 58, title: WanImageToVideo, class type: WanImageToVideo\n[ComfyUI] Requested to load WanVAE\n[ComfyUI] loaded completely 125099.8056602478 242.02829551696777 True\nExecuting node 48, title: ModelSamplingSD3, class type: ModelSamplingSD3\nExecuting node 53, title: WanVideo Enhance A Video (native), class type: WanVideoEnhanceAVideoKJ\nExecuting node 3, title: KSampler, class type: KSampler\n[ComfyUI] Requested to load WAN21\n[ComfyUI] loaded completely 122640.68833397522 31269.802368164062 True\n[ComfyUI]\n[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]\n[ComfyUI] 3%|▎ | 1/30 [00:08<04:03, 8.41s/it]\n[ComfyUI] 7%|▋ | 2/30 [00:18<04:26, 9.53s/it]\n[ComfyUI] 10%|█ | 3/30 [00:29<04:27, 9.90s/it]\n[ComfyUI] TeaCache: Initialized\n[ComfyUI]\n[ComfyUI] 13%|█▎ | 4/30 [00:41<04:46, 11.04s/it]\n[ComfyUI] 20%|██ | 6/30 [00:52<03:12, 8.04s/it]\n[ComfyUI] 23%|██▎ | 7/30 [01:03<03:21, 8.77s/it]\n[ComfyUI] 30%|███ | 9/30 [01:14<02:32, 7.28s/it]\n[ComfyUI] 37%|███▋ | 11/30 [01:24<02:04, 6.55s/it]\n[ComfyUI] 43%|████▎ | 13/30 [01:35<01:44, 6.14s/it]\n[ComfyUI] 50%|█████ | 15/30 [01:46<01:28, 5.88s/it]\n[ComfyUI] 57%|█████▋ | 17/30 [01:57<01:14, 5.71s/it]\n[ComfyUI] 63%|██████▎ | 19/30 [02:07<01:01, 5.61s/it]\n[ComfyUI] 70%|███████ | 21/30 [02:18<00:49, 5.54s/it]\n[ComfyUI] 77%|███████▋ | 23/30 [02:18<00:26, 3.85s/it]\n[ComfyUI] 80%|████████ | 24/30 [02:29<00:30, 5.09s/it]\n[ComfyUI] 87%|████████▋ | 26/30 [02:29<00:13, 3.38s/it]\n[ComfyUI] 87%|████████▋ | 26/30 [02:40<00:13, 3.38s/it]\n[ComfyUI] 90%|█████████ | 27/30 [02:40<00:14, 4.81s/it]\n[ComfyUI] 97%|█████████▋| 29/30 [02:51<00:05, 5.01s/it]\n[ComfyUI] 100%|██████████| 30/30 [03:01<00:00, 6.16s/it]\nExecuting node 8, title: VAE Decode, class type: VAEDecode\nExecuting node 50, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine\n[ComfyUI] 100%|██████████| 30/30 [03:01<00:00, 6.06s/it]\n[ComfyUI] Prompt executed in 204.27 seconds\noutputs: {'50': {'gifs': [{'filename': 'R8_Wan_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 16.0, 'workflow': 'R8_Wan_00001.png', 'fullpath': '/tmp/outputs/R8_Wan_00001.mp4'}]}}\n====================================\nR8_Wan_00001.png\nR8_Wan_00001.mp4",
"metrics": {
"predict_time": 226.152341109,
"total_time": 319.315891
},
"output": [
"https://replicate.delivery/xezq/Cz4ndSSET2LRIRXXL9hmfaTkrGzlt41sIdfILB5IZzAQSHaUA/R8_Wan_00001.mp4"
],
"started_at": "2025-03-19T13:07:58.731550Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-snvqmrppncyylifkejayys63wvb3h3vcnptiqqre4gqwgwgfgswa",
"get": "https://api.replicate.com/v1/predictions/3d7rtz96m1rme0cnnt1vney5yg",
"cancel": "https://api.replicate.com/v1/predictions/3d7rtz96m1rme0cnnt1vney5yg/cancel"
},
"version": "eedd0c093a39ec030ceb9a31ddcc6bd705515088fe2d6da867214ac1adba07e5"
}
Random seed set to: 1598448377
2025-03-19T13:07:58Z | INFO | [ Initiating ] chunk_size=150M dest=/tmp/tmp0j3_5xxe/weights url=https://replicate.delivery/xezq/9DjUprMiHXZkFRQQqy3VbZBxAzV963P08m0RZZwkw1Dt7eMKA/trained_model.tar
2025-03-19T13:08:02Z | INFO | [ Complete ] dest=/tmp/tmp0j3_5xxe/weights size="359 MB" total_elapsed=3.376s url=https://replicate.delivery/xezq/9DjUprMiHXZkFRQQqy3VbZBxAzV963P08m0RZZwkw1Dt7eMKA/trained_model.tar
Checking inputs
✅ /tmp/inputs/image.png
====================================
Checking weights
✅ umt5_xxl_fp16.safetensors exists in ComfyUI/models/text_encoders
⏳ Downloading wan2.1_i2v_480p_14B_bf16.safetensors to ComfyUI/models/diffusion_models
✅ wan2.1_i2v_480p_14B_bf16.safetensors downloaded to ComfyUI/models/diffusion_models in 18.11s, size: 31270.88MB
✅ wan_2.1_vae.safetensors exists in ComfyUI/models/vae
✅ clip_vision_h.safetensors exists in ComfyUI/models/clip_vision
✅ 14b_02c3ebfa71932e569775343580ab386c.safetensors exists in loras directory
====================================
Running workflow
[ComfyUI] got prompt
Executing node 39, title: Load VAE, class type: VAELoader
[ComfyUI] Using pytorch attention in VAE
[ComfyUI] Using pytorch attention in VAE
[ComfyUI] VAE load device: cuda:0, offload device: cpu, dtype: torch.bfloat16
Executing node 55, title: Load Image, class type: LoadImage
Executing node 56, title: Width and height for scaling image to ideal resolution 🪴, class type: Width and height for scaling image to ideal resolution 🪴
Executing node 57, title: 🔧 Image Resize, class type: ImageResize+
Executing node 60, title: Load CLIP Vision, class type: CLIPVisionLoader
[ComfyUI] Requested to load CLIPVisionModelProjection
Executing node 59, title: CLIP Vision Encode, class type: CLIPVisionEncode
[ComfyUI] loaded completely 141327.4875 1208.09814453125 True
Executing node 38, title: Load CLIP, class type: CLIPLoader
[ComfyUI] CLIP/text encoder model load device: cuda:0, offload device: cpu, current: cpu, dtype: torch.float16
[ComfyUI] Requested to load WanTEModel
Executing node 7, title: CLIP Text Encode (Negative Prompt), class type: CLIPTextEncode
[ComfyUI] loaded completely 139855.3869140625 10835.4765625 True
Executing node 37, title: Load Diffusion Model, class type: UNETLoader
[ComfyUI] model weight dtype torch.float16, manual cast: None
[ComfyUI] model_type FLOW
Executing node 54, title: WanVideo Tea Cache (native), class type: WanVideoTeaCacheKJ
Executing node 49, title: Load LoRA, class type: LoraLoader
[ComfyUI] Requested to load WanTEModel
Executing node 6, title: CLIP Text Encode (Positive Prompt), class type: CLIPTextEncode
[ComfyUI] loaded completely 139853.3869140625 10835.4765625 True
Executing node 58, title: WanImageToVideo, class type: WanImageToVideo
[ComfyUI] Requested to load WanVAE
[ComfyUI] loaded completely 125099.8056602478 242.02829551696777 True
Executing node 48, title: ModelSamplingSD3, class type: ModelSamplingSD3
Executing node 53, title: WanVideo Enhance A Video (native), class type: WanVideoEnhanceAVideoKJ
Executing node 3, title: KSampler, class type: KSampler
[ComfyUI] Requested to load WAN21
[ComfyUI] loaded completely 122640.68833397522 31269.802368164062 True
[ComfyUI]
[ComfyUI] 0%| | 0/30 [00:00<?, ?it/s]
[ComfyUI] 3%|▎ | 1/30 [00:08<04:03, 8.41s/it]
[ComfyUI] 7%|▋ | 2/30 [00:18<04:26, 9.53s/it]
[ComfyUI] 10%|█ | 3/30 [00:29<04:27, 9.90s/it]
[ComfyUI] TeaCache: Initialized
[ComfyUI]
[ComfyUI] 13%|█▎ | 4/30 [00:41<04:46, 11.04s/it]
[ComfyUI] 20%|██ | 6/30 [00:52<03:12, 8.04s/it]
[ComfyUI] 23%|██▎ | 7/30 [01:03<03:21, 8.77s/it]
[ComfyUI] 30%|███ | 9/30 [01:14<02:32, 7.28s/it]
[ComfyUI] 37%|███▋ | 11/30 [01:24<02:04, 6.55s/it]
[ComfyUI] 43%|████▎ | 13/30 [01:35<01:44, 6.14s/it]
[ComfyUI] 50%|█████ | 15/30 [01:46<01:28, 5.88s/it]
[ComfyUI] 57%|█████▋ | 17/30 [01:57<01:14, 5.71s/it]
[ComfyUI] 63%|██████▎ | 19/30 [02:07<01:01, 5.61s/it]
[ComfyUI] 70%|███████ | 21/30 [02:18<00:49, 5.54s/it]
[ComfyUI] 77%|███████▋ | 23/30 [02:18<00:26, 3.85s/it]
[ComfyUI] 80%|████████ | 24/30 [02:29<00:30, 5.09s/it]
[ComfyUI] 87%|████████▋ | 26/30 [02:29<00:13, 3.38s/it]
[ComfyUI] 87%|████████▋ | 26/30 [02:40<00:13, 3.38s/it]
[ComfyUI] 90%|█████████ | 27/30 [02:40<00:14, 4.81s/it]
[ComfyUI] 97%|█████████▋| 29/30 [02:51<00:05, 5.01s/it]
[ComfyUI] 100%|██████████| 30/30 [03:01<00:00, 6.16s/it]
Executing node 8, title: VAE Decode, class type: VAEDecode
Executing node 50, title: Video Combine 🎥🅥🅗🅢, class type: VHS_VideoCombine
[ComfyUI] 100%|██████████| 30/30 [03:01<00:00, 6.06s/it]
[ComfyUI] Prompt executed in 204.27 seconds
outputs: {'50': {'gifs': [{'filename': 'R8_Wan_00001.mp4', 'subfolder': '', 'type': 'output', 'format': 'video/h264-mp4', 'frame_rate': 16.0, 'workflow': 'R8_Wan_00001.png', 'fullpath': '/tmp/outputs/R8_Wan_00001.mp4'}]}}
====================================
R8_Wan_00001.png
R8_Wan_00001.mp4