typefile
{
"color_fix_type": "adain",
"image": "https://replicate.delivery/pbxt/KEsL8qx725guGDgsIH3NX0G8TElQ9Fkp9JuzM0GWwCWXUi3u/19.jpg",
"sr_scale": 4,
"steps": 45,
"t_max": 0.6667,
"t_min": 0.3333,
"tile_diffusion": false,
"tile_diffusion_size": 512,
"tile_diffusion_stride": 256,
"tile_vae": false,
"vae_decoder_tile_size": 224,
"vae_encoder_tile_size": 1024
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Rz3**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run csslc/ccsr using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"csslc/ccsr:b66be6a6d779e57163b0489329f50ea4b56bcbcc7b3d48ef39bae6b812a7f947",
{
input: {
color_fix_type: "adain",
image: "https://replicate.delivery/pbxt/KEsL8qx725guGDgsIH3NX0G8TElQ9Fkp9JuzM0GWwCWXUi3u/19.jpg",
sr_scale: 4,
steps: 45,
t_max: 0.6667,
t_min: 0.3333,
tile_diffusion: false,
tile_diffusion_size: 512,
tile_diffusion_stride: 256,
tile_vae: false,
vae_decoder_tile_size: 224,
vae_encoder_tile_size: 1024
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Rz3**********************************
This is your API token. Keep it to yourself.
import replicate
Run csslc/ccsr using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"csslc/ccsr:b66be6a6d779e57163b0489329f50ea4b56bcbcc7b3d48ef39bae6b812a7f947",
input={
"color_fix_type": "adain",
"image": "https://replicate.delivery/pbxt/KEsL8qx725guGDgsIH3NX0G8TElQ9Fkp9JuzM0GWwCWXUi3u/19.jpg",
"sr_scale": 4,
"steps": 45,
"t_max": 0.6667,
"t_min": 0.3333,
"tile_diffusion": False,
"tile_diffusion_size": 512,
"tile_diffusion_stride": 256,
"tile_vae": False,
"vae_decoder_tile_size": 224,
"vae_encoder_tile_size": 1024
}
)
# To access the file URL:
print(output.url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output.read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_Rz3**********************************
This is your API token. Keep it to yourself.
Run csslc/ccsr using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "csslc/ccsr:b66be6a6d779e57163b0489329f50ea4b56bcbcc7b3d48ef39bae6b812a7f947",
"input": {
"color_fix_type": "adain",
"image": "https://replicate.delivery/pbxt/KEsL8qx725guGDgsIH3NX0G8TElQ9Fkp9JuzM0GWwCWXUi3u/19.jpg",
"sr_scale": 4,
"steps": 45,
"t_max": 0.6667,
"t_min": 0.3333,
"tile_diffusion": false,
"tile_diffusion_size": 512,
"tile_diffusion_stride": 256,
"tile_vae": false,
"vae_decoder_tile_size": 224,
"vae_encoder_tile_size": 1024
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
{
"id": "ku43wdtbeicfhxbrc3odcehdx4",
"model": "csslc/ccsr",
"version": "b66be6a6d779e57163b0489329f50ea4b56bcbcc7b3d48ef39bae6b812a7f947",
"input": {
"color_fix_type": "adain",
"image": "https://replicate.delivery/pbxt/KEsL8qx725guGDgsIH3NX0G8TElQ9Fkp9JuzM0GWwCWXUi3u/19.jpg",
"sr_scale": 4,
"steps": 45,
"t_max": 0.6667,
"t_min": 0.3333,
"tile_diffusion": false,
"tile_diffusion_size": 512,
"tile_diffusion_stride": 256,
"tile_vae": false,
"vae_decoder_tile_size": 224,
"vae_encoder_tile_size": 1024
},
"logs": "ControlLDM: Running in eps-prediction mode\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nDiffusionWrapper has 865.91 M params.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nopen_clip_pytorch_model.bin: 0%| | 0.00/3.94G [00:00<?, ?B/s]\nopen_clip_pytorch_model.bin: 0%| | 10.5M/3.94G [00:00<02:40, 24.4MB/s]\nopen_clip_pytorch_model.bin: 1%| | 41.9M/3.94G [00:00<00:44, 87.9MB/s]\nopen_clip_pytorch_model.bin: 2%|▏ | 73.4M/3.94G [00:00<00:28, 136MB/s] \nopen_clip_pytorch_model.bin: 3%|▎ | 105M/3.94G [00:00<00:22, 167MB/s] \nopen_clip_pytorch_model.bin: 3%|▎ | 136M/3.94G [00:00<00:19, 190MB/s]\nopen_clip_pytorch_model.bin: 4%|▍ | 168M/3.94G [00:01<00:18, 207MB/s]\nopen_clip_pytorch_model.bin: 5%|▌ | 199M/3.94G [00:01<00:16, 221MB/s]\nopen_clip_pytorch_model.bin: 6%|▌ | 231M/3.94G [00:01<00:16, 224MB/s]\nopen_clip_pytorch_model.bin: 7%|▋ | 262M/3.94G [00:01<00:15, 232MB/s]\nopen_clip_pytorch_model.bin: 8%|▊ | 304M/3.94G [00:01<00:14, 251MB/s]\nopen_clip_pytorch_model.bin: 9%|▊ | 336M/3.94G [00:01<00:14, 249MB/s]\nopen_clip_pytorch_model.bin: 9%|▉ | 367M/3.94G [00:01<00:15, 237MB/s]\nopen_clip_pytorch_model.bin: 10%|█ | 398M/3.94G [00:02<00:14, 239MB/s]\nopen_clip_pytorch_model.bin: 11%|█ | 430M/3.94G [00:02<00:14, 244MB/s]\nopen_clip_pytorch_model.bin: 12%|█▏ | 461M/3.94G [00:02<00:14, 240MB/s]\nopen_clip_pytorch_model.bin: 12%|█▏ | 493M/3.94G [00:02<00:14, 244MB/s]\nopen_clip_pytorch_model.bin: 13%|█▎ | 524M/3.94G [00:02<00:13, 247MB/s]\nopen_clip_pytorch_model.bin: 14%|█▍ | 556M/3.94G [00:02<00:13, 246MB/s]\nopen_clip_pytorch_model.bin: 15%|█▍ | 587M/3.94G [00:02<00:13, 252MB/s]\nopen_clip_pytorch_model.bin: 16%|█▌ | 619M/3.94G [00:02<00:13, 249MB/s]\nopen_clip_pytorch_model.bin: 16%|█▋ | 650M/3.94G [00:03<00:13, 249MB/s]\nopen_clip_pytorch_model.bin: 17%|█▋ | 682M/3.94G [00:03<00:13, 247MB/s]\nopen_clip_pytorch_model.bin: 18%|█▊ | 713M/3.94G [00:03<00:13, 248MB/s]\nopen_clip_pytorch_model.bin: 19%|█▉ | 744M/3.94G [00:03<00:12, 247MB/s]\nopen_clip_pytorch_model.bin: 20%|█▉ | 776M/3.94G [00:03<00:12, 244MB/s]\nopen_clip_pytorch_model.bin: 20%|██ | 807M/3.94G [00:03<00:12, 245MB/s]\nopen_clip_pytorch_model.bin: 21%|██▏ | 839M/3.94G [00:03<00:12, 241MB/s]\nopen_clip_pytorch_model.bin: 22%|██▏ | 870M/3.94G [00:03<00:12, 243MB/s]\nopen_clip_pytorch_model.bin: 23%|██▎ | 902M/3.94G [00:04<00:12, 247MB/s]\nopen_clip_pytorch_model.bin: 24%|██▎ | 933M/3.94G [00:04<00:12, 248MB/s]\nopen_clip_pytorch_model.bin: 24%|██▍ | 965M/3.94G [00:04<00:12, 247MB/s]\nopen_clip_pytorch_model.bin: 25%|██▌ | 996M/3.94G [00:04<00:11, 251MB/s]\nopen_clip_pytorch_model.bin: 26%|██▌ | 1.03G/3.94G [00:04<00:11, 251MB/s]\nopen_clip_pytorch_model.bin: 27%|██▋ | 1.06G/3.94G [00:04<00:11, 251MB/s]\nopen_clip_pytorch_model.bin: 28%|██▊ | 1.09G/3.94G [00:04<00:11, 242MB/s]\nopen_clip_pytorch_model.bin: 28%|██▊ | 1.12G/3.94G [00:04<00:11, 238MB/s]\nopen_clip_pytorch_model.bin: 29%|██▉ | 1.15G/3.94G [00:05<00:12, 226MB/s]\nopen_clip_pytorch_model.bin: 30%|███ | 1.18G/3.94G [00:05<00:11, 233MB/s]\nopen_clip_pytorch_model.bin: 31%|███ | 1.22G/3.94G [00:05<00:13, 210MB/s]\nopen_clip_pytorch_model.bin: 32%|███▏ | 1.25G/3.94G [00:05<00:12, 213MB/s]\nopen_clip_pytorch_model.bin: 32%|███▏ | 1.28G/3.94G [00:05<00:11, 223MB/s]\nopen_clip_pytorch_model.bin: 33%|███▎ | 1.31G/3.94G [00:05<00:13, 197MB/s]\nopen_clip_pytorch_model.bin: 34%|███▍ | 1.33G/3.94G [00:05<00:13, 198MB/s]\nopen_clip_pytorch_model.bin: 35%|███▍ | 1.36G/3.94G [00:06<00:12, 209MB/s]\nopen_clip_pytorch_model.bin: 35%|███▌ | 1.39G/3.94G [00:06<00:11, 216MB/s]\nopen_clip_pytorch_model.bin: 36%|███▌ | 1.43G/3.94G [00:06<00:13, 193MB/s]\nopen_clip_pytorch_model.bin: 37%|███▋ | 1.46G/3.94G [00:06<00:11, 208MB/s]\nopen_clip_pytorch_model.bin: 38%|███▊ | 1.49G/3.94G [00:06<00:10, 228MB/s]\nopen_clip_pytorch_model.bin: 39%|███▊ | 1.52G/3.94G [00:06<00:11, 217MB/s]\nopen_clip_pytorch_model.bin: 39%|███▉ | 1.55G/3.94G [00:07<00:13, 174MB/s]\nopen_clip_pytorch_model.bin: 40%|████ | 1.58G/3.94G [00:07<00:12, 186MB/s]\nopen_clip_pytorch_model.bin: 41%|████ | 1.61G/3.94G [00:07<00:11, 196MB/s]\nopen_clip_pytorch_model.bin: 42%|████▏ | 1.65G/3.94G [00:07<00:11, 207MB/s]\nopen_clip_pytorch_model.bin: 43%|████▎ | 1.68G/3.94G [00:07<00:10, 218MB/s]\nopen_clip_pytorch_model.bin: 43%|████▎ | 1.71G/3.94G [00:07<00:09, 227MB/s]\nopen_clip_pytorch_model.bin: 44%|████▍ | 1.74G/3.94G [00:07<00:09, 229MB/s]\nopen_clip_pytorch_model.bin: 45%|████▍ | 1.77G/3.94G [00:08<00:09, 239MB/s]\nopen_clip_pytorch_model.bin: 46%|████▌ | 1.80G/3.94G [00:08<00:08, 245MB/s]\nopen_clip_pytorch_model.bin: 47%|████▋ | 1.84G/3.94G [00:08<00:08, 241MB/s]\nopen_clip_pytorch_model.bin: 47%|████▋ | 1.87G/3.94G [00:08<00:08, 242MB/s]\nopen_clip_pytorch_model.bin: 48%|████▊ | 1.90G/3.94G [00:08<00:08, 241MB/s]\nopen_clip_pytorch_model.bin: 49%|████▉ | 1.93G/3.94G [00:08<00:08, 245MB/s]\nopen_clip_pytorch_model.bin: 50%|████▉ | 1.96G/3.94G [00:08<00:08, 242MB/s]\nopen_clip_pytorch_model.bin: 51%|█████ | 1.99G/3.94G [00:08<00:08, 241MB/s]\nopen_clip_pytorch_model.bin: 51%|█████▏ | 2.02G/3.94G [00:09<00:08, 239MB/s]\nopen_clip_pytorch_model.bin: 52%|█████▏ | 2.06G/3.94G [00:09<00:07, 242MB/s]\nopen_clip_pytorch_model.bin: 53%|█████▎ | 2.09G/3.94G [00:09<00:07, 242MB/s]\nopen_clip_pytorch_model.bin: 54%|█████▎ | 2.12G/3.94G [00:09<00:07, 235MB/s]\nopen_clip_pytorch_model.bin: 54%|█████▍ | 2.15G/3.94G [00:09<00:07, 240MB/s]\nopen_clip_pytorch_model.bin: 55%|█████▌ | 2.18G/3.94G [00:09<00:07, 245MB/s]\nopen_clip_pytorch_model.bin: 56%|█████▌ | 2.21G/3.94G [00:09<00:07, 245MB/s]\nopen_clip_pytorch_model.bin: 57%|█████▋ | 2.24G/3.94G [00:09<00:06, 247MB/s]\nopen_clip_pytorch_model.bin: 58%|█████▊ | 2.28G/3.94G [00:10<00:06, 247MB/s]\nopen_clip_pytorch_model.bin: 58%|█████▊ | 2.31G/3.94G [00:10<00:06, 248MB/s]\nopen_clip_pytorch_model.bin: 59%|█████▉ | 2.34G/3.94G [00:10<00:06, 251MB/s]\nopen_clip_pytorch_model.bin: 60%|██████ | 2.37G/3.94G [00:10<00:06, 248MB/s]\nopen_clip_pytorch_model.bin: 61%|██████ | 2.40G/3.94G [00:10<00:05, 261MB/s]\nopen_clip_pytorch_model.bin: 62%|██████▏ | 2.43G/3.94G [00:10<00:06, 242MB/s]\nopen_clip_pytorch_model.bin: 62%|██████▏ | 2.46G/3.94G [00:10<00:06, 246MB/s]\nopen_clip_pytorch_model.bin: 63%|██████▎ | 2.50G/3.94G [00:10<00:05, 250MB/s]\nopen_clip_pytorch_model.bin: 64%|██████▍ | 2.53G/3.94G [00:11<00:05, 245MB/s]\nopen_clip_pytorch_model.bin: 65%|██████▍ | 2.56G/3.94G [00:11<00:09, 151MB/s]\nopen_clip_pytorch_model.bin: 66%|██████▌ | 2.59G/3.94G [00:11<00:07, 170MB/s]\nopen_clip_pytorch_model.bin: 66%|██████▋ | 2.62G/3.94G [00:11<00:07, 184MB/s]\nopen_clip_pytorch_model.bin: 67%|██████▋ | 2.65G/3.94G [00:11<00:07, 182MB/s]\nopen_clip_pytorch_model.bin: 68%|██████▊ | 2.68G/3.94G [00:12<00:06, 192MB/s]\nopen_clip_pytorch_model.bin: 69%|██████▉ | 2.72G/3.94G [00:12<00:05, 206MB/s]\nopen_clip_pytorch_model.bin: 70%|██████▉ | 2.75G/3.94G [00:12<00:06, 191MB/s]\nopen_clip_pytorch_model.bin: 70%|███████ | 2.77G/3.94G [00:12<00:06, 192MB/s]\nopen_clip_pytorch_model.bin: 71%|███████ | 2.79G/3.94G [00:12<00:05, 195MB/s]\nopen_clip_pytorch_model.bin: 72%|███████▏ | 2.82G/3.94G [00:12<00:05, 210MB/s]\nopen_clip_pytorch_model.bin: 72%|███████▏ | 2.85G/3.94G [00:12<00:05, 216MB/s]\nopen_clip_pytorch_model.bin: 73%|███████▎ | 2.88G/3.94G [00:13<00:04, 227MB/s]\nopen_clip_pytorch_model.bin: 74%|███████▍ | 2.92G/3.94G [00:13<00:04, 237MB/s]\nopen_clip_pytorch_model.bin: 75%|███████▍ | 2.95G/3.94G [00:13<00:04, 246MB/s]\nopen_clip_pytorch_model.bin: 75%|███████▌ | 2.98G/3.94G [00:13<00:04, 229MB/s]\nopen_clip_pytorch_model.bin: 76%|███████▋ | 3.01G/3.94G [00:13<00:04, 232MB/s]\nopen_clip_pytorch_model.bin: 77%|███████▋ | 3.04G/3.94G [00:13<00:03, 236MB/s]\nopen_clip_pytorch_model.bin: 78%|███████▊ | 3.07G/3.94G [00:13<00:03, 238MB/s]\nopen_clip_pytorch_model.bin: 79%|███████▊ | 3.10G/3.94G [00:13<00:03, 226MB/s]\nopen_clip_pytorch_model.bin: 79%|███████▉ | 3.14G/3.94G [00:14<00:03, 228MB/s]\nopen_clip_pytorch_model.bin: 80%|████████ | 3.17G/3.94G [00:14<00:03, 236MB/s]\nopen_clip_pytorch_model.bin: 81%|████████ | 3.20G/3.94G [00:14<00:03, 237MB/s]\nopen_clip_pytorch_model.bin: 82%|████████▏ | 3.23G/3.94G [00:14<00:03, 236MB/s]\nopen_clip_pytorch_model.bin: 83%|████████▎ | 3.26G/3.94G [00:14<00:02, 240MB/s]\nopen_clip_pytorch_model.bin: 83%|████████▎ | 3.29G/3.94G [00:14<00:02, 236MB/s]\nopen_clip_pytorch_model.bin: 84%|████████▍ | 3.32G/3.94G [00:14<00:02, 235MB/s]\nopen_clip_pytorch_model.bin: 85%|████████▌ | 3.36G/3.94G [00:15<00:02, 238MB/s]\nopen_clip_pytorch_model.bin: 86%|████████▌ | 3.39G/3.94G [00:15<00:02, 236MB/s]\nopen_clip_pytorch_model.bin: 87%|████████▋ | 3.42G/3.94G [00:15<00:02, 241MB/s]\nopen_clip_pytorch_model.bin: 87%|████████▋ | 3.45G/3.94G [00:15<00:02, 215MB/s]\nopen_clip_pytorch_model.bin: 88%|████████▊ | 3.48G/3.94G [00:15<00:02, 186MB/s]\nopen_clip_pytorch_model.bin: 89%|████████▉ | 3.51G/3.94G [00:15<00:02, 192MB/s]\nopen_clip_pytorch_model.bin: 90%|████████▉ | 3.54G/3.94G [00:15<00:01, 205MB/s]\nopen_clip_pytorch_model.bin: 91%|█████████ | 3.58G/3.94G [00:16<00:01, 221MB/s]\nopen_clip_pytorch_model.bin: 91%|█████████▏| 3.61G/3.94G [00:16<00:01, 222MB/s]\nopen_clip_pytorch_model.bin: 92%|█████████▏| 3.64G/3.94G [00:16<00:01, 229MB/s]\nopen_clip_pytorch_model.bin: 93%|█████████▎| 3.67G/3.94G [00:16<00:01, 223MB/s]\nopen_clip_pytorch_model.bin: 94%|█████████▍| 3.70G/3.94G [00:16<00:01, 230MB/s]\nopen_clip_pytorch_model.bin: 95%|█████████▍| 3.73G/3.94G [00:16<00:00, 227MB/s]\nopen_clip_pytorch_model.bin: 95%|█████████▌| 3.76G/3.94G [00:16<00:00, 220MB/s]\nopen_clip_pytorch_model.bin: 96%|█████████▌| 3.80G/3.94G [00:17<00:00, 199MB/s]\nopen_clip_pytorch_model.bin: 97%|█████████▋| 3.82G/3.94G [00:17<00:00, 157MB/s]\nopen_clip_pytorch_model.bin: 97%|█████████▋| 3.84G/3.94G [00:17<00:00, 147MB/s]\nopen_clip_pytorch_model.bin: 98%|█████████▊| 3.87G/3.94G [00:17<00:00, 167MB/s]\nopen_clip_pytorch_model.bin: 99%|█████████▊| 3.89G/3.94G [00:17<00:00, 164MB/s]\nopen_clip_pytorch_model.bin: 99%|█████████▉| 3.91G/3.94G [00:17<00:00, 151MB/s]\nopen_clip_pytorch_model.bin: 100%|█████████▉| 3.93G/3.94G [00:18<00:00, 162MB/s]\nopen_clip_pytorch_model.bin: 100%|██████████| 3.94G/3.94G [00:18<00:00, 218MB/s]\n/root/.pyenv/versions/3.11.7/lib/python3.11/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\nwarnings.warn(\n/root/.pyenv/versions/3.11.7/lib/python3.11/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.\nwarnings.warn(msg)\nloaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nUsing seed: 60190\nGlobal seed set to 60190\ntimesteps used in spaced sampler:\n[0, 23, 45, 68, 91, 114, 136, 159, 182, 204, 227, 250, 272, 295, 318, 341, 363, 386, 409, 431, 454, 477, 499, 522, 545, 568, 590, 613, 636, 658, 681, 704, 727, 749, 772, 795, 817, 840, 863, 885, 908, 931, 954, 976, 999]\nSpaced Sampler: 0%| | 0/45 [00:00<?, ?it/s]/src/model/q_sampler.py:467: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\ntao_index = torch.tensor(torch.round(index * t_max), dtype=torch.int64)\nSpaced Sampler: 0%| | 0/45 [00:00<?, ?it/s]\u001b[A\nSpaced Sampler: 0%| | 0/45 [00:00<?, ?it/s]\nSpaced Sampler: 2%|▏ | 1/45 [00:00<00:08, 5.21it/s]\u001b[A\nSpaced Sampler: 4%|▍ | 2/45 [00:00<00:08, 5.23it/s]\u001b[A\nSpaced Sampler: 7%|▋ | 3/45 [00:00<00:08, 5.23it/s]\u001b[A\nSpaced Sampler: 9%|▉ | 4/45 [00:00<00:07, 5.23it/s]\u001b[A\nSpaced Sampler: 11%|█ | 5/45 [00:00<00:07, 5.23it/s]\u001b[A\nSpaced Sampler: 13%|█▎ | 6/45 [00:01<00:07, 5.22it/s]\u001b[A\nSpaced Sampler: 16%|█▌ | 7/45 [00:01<00:07, 5.22it/s]\u001b[A\nSpaced Sampler: 18%|█▊ | 8/45 [00:01<00:07, 5.22it/s]\u001b[A\nSpaced Sampler: 20%|██ | 9/45 [00:01<00:06, 5.21it/s]\u001b[A\nSpaced Sampler: 22%|██▏ | 10/45 [00:01<00:06, 5.22it/s]\u001b[A\nSpaced Sampler: 24%|██▍ | 11/45 [00:02<00:06, 5.22it/s]\u001b[A\nSpaced Sampler: 27%|██▋ | 12/45 [00:02<00:06, 5.22it/s]\u001b[A\nSpaced Sampler: 29%|██▉ | 13/45 [00:02<00:06, 5.22it/s]\u001b[A\nSpaced Sampler: 31%|███ | 14/45 [00:02<00:05, 5.22it/s]\u001b[A\nSpaced Sampler: 33%|███▎ | 15/45 [00:02<00:05, 5.22it/s]\u001b[A\nSpaced Sampler: 33%|███▎ | 15/45 [00:02<00:05, 5.22it/s]",
"output": "https://replicate.delivery/pbxt/uuPXTeVX8h30Wq4Bp16TJcCbI0Vf5M5sUO2nVucR4mjruMNSA/out.png",
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2024-01-17T00:02:49.77581Z",
"started_at": "2024-01-17T00:04:14.550557Z",
"completed_at": "2024-01-17T00:05:00.216889Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/ku43wdtbeicfhxbrc3odcehdx4/cancel",
"get": "https://api.replicate.com/v1/predictions/ku43wdtbeicfhxbrc3odcehdx4"
},
"metrics": {
"predict_time": 45.666332,
"total_time": 130.441079
}
}
