typefile
{
"background_upsampler": "DiffBIR",
"background_upsampler_tile": 400,
"background_upsampler_tile_stride": 400,
"color_fix_type": "wavelet",
"disable_preprocess_model": false,
"face_detection_model": "retinaface_resnet50",
"guidance_repeat": 5,
"guidance_scale": 0,
"guidance_space": "latent",
"guidance_time_start": 1001,
"guidance_time_stop": -1,
"has_aligned": false,
"input": "https://replicate.delivery/pbxt/JgdLVwRXXl4oaGqmF4Wdl7vOapnTlay32dE7B3UNgxSwylvQ/Audrey_Hepburn.jpg",
"only_center_face": false,
"reload_restoration_model": false,
"repeat_times": 1,
"restoration_model_type": "general_scenes",
"seed": 231,
"steps": 50,
"super_resolution_factor": 2,
"tile_size": 512,
"tile_stride": 256,
"tiled": false,
"upscaling_model_type": "faces",
"use_guidance": false
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_4Ud**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run zsxkib/diffbir using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"zsxkib/diffbir:51ed1464d8bbbaca811153b051d3b09ab42f0bdeb85804ae26ba323d7a66a4ac",
{
input: {
background_upsampler: "DiffBIR",
background_upsampler_tile: 400,
background_upsampler_tile_stride: 400,
color_fix_type: "wavelet",
disable_preprocess_model: false,
face_detection_model: "retinaface_resnet50",
guidance_repeat: 5,
guidance_scale: 0,
guidance_space: "latent",
guidance_time_start: 1001,
guidance_time_stop: -1,
has_aligned: false,
input: "https://replicate.delivery/pbxt/JgdLVwRXXl4oaGqmF4Wdl7vOapnTlay32dE7B3UNgxSwylvQ/Audrey_Hepburn.jpg",
only_center_face: false,
reload_restoration_model: false,
repeat_times: 1,
restoration_model_type: "general_scenes",
seed: 231,
steps: 50,
super_resolution_factor: 2,
tile_size: 512,
tile_stride: 256,
tiled: false,
upscaling_model_type: "faces",
use_guidance: false
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_4Ud**********************************
This is your API token. Keep it to yourself.
import replicate
Run zsxkib/diffbir using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"zsxkib/diffbir:51ed1464d8bbbaca811153b051d3b09ab42f0bdeb85804ae26ba323d7a66a4ac",
input={
"background_upsampler": "DiffBIR",
"background_upsampler_tile": 400,
"background_upsampler_tile_stride": 400,
"color_fix_type": "wavelet",
"disable_preprocess_model": False,
"face_detection_model": "retinaface_resnet50",
"guidance_repeat": 5,
"guidance_scale": 0,
"guidance_space": "latent",
"guidance_time_start": 1001,
"guidance_time_stop": -1,
"has_aligned": False,
"input": "https://replicate.delivery/pbxt/JgdLVwRXXl4oaGqmF4Wdl7vOapnTlay32dE7B3UNgxSwylvQ/Audrey_Hepburn.jpg",
"only_center_face": False,
"reload_restoration_model": False,
"repeat_times": 1,
"restoration_model_type": "general_scenes",
"seed": 231,
"steps": 50,
"super_resolution_factor": 2,
"tile_size": 512,
"tile_stride": 256,
"tiled": False,
"upscaling_model_type": "faces",
"use_guidance": False
}
)
# To access the file URL:
print(output[0].url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output[0].read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_4Ud**********************************
This is your API token. Keep it to yourself.
Run zsxkib/diffbir using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "zsxkib/diffbir:51ed1464d8bbbaca811153b051d3b09ab42f0bdeb85804ae26ba323d7a66a4ac",
"input": {
"background_upsampler": "DiffBIR",
"background_upsampler_tile": 400,
"background_upsampler_tile_stride": 400,
"color_fix_type": "wavelet",
"disable_preprocess_model": false,
"face_detection_model": "retinaface_resnet50",
"guidance_repeat": 5,
"guidance_scale": 0,
"guidance_space": "latent",
"guidance_time_start": 1001,
"guidance_time_stop": -1,
"has_aligned": false,
"input": "https://replicate.delivery/pbxt/JgdLVwRXXl4oaGqmF4Wdl7vOapnTlay32dE7B3UNgxSwylvQ/Audrey_Hepburn.jpg",
"only_center_face": false,
"reload_restoration_model": false,
"repeat_times": 1,
"restoration_model_type": "general_scenes",
"seed": 231,
"steps": 50,
"super_resolution_factor": 2,
"tile_size": 512,
"tile_stride": 256,
"tiled": false,
"upscaling_model_type": "faces",
"use_guidance": false
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Loading...
{
"id": "bmqkjzlb4dtxi3plebcqg6sffu",
"model": "zsxkib/diffbir",
"version": "51ed1464d8bbbaca811153b051d3b09ab42f0bdeb85804ae26ba323d7a66a4ac",
"input": {
"background_upsampler": "DiffBIR",
"background_upsampler_tile": 400,
"background_upsampler_tile_stride": 400,
"color_fix_type": "wavelet",
"disable_preprocess_model": false,
"face_detection_model": "retinaface_resnet50",
"guidance_repeat": 5,
"guidance_scale": 0,
"guidance_space": "latent",
"guidance_time_start": 1001,
"guidance_time_stop": -1,
"has_aligned": false,
"input": "https://replicate.delivery/pbxt/JgdLVwRXXl4oaGqmF4Wdl7vOapnTlay32dE7B3UNgxSwylvQ/Audrey_Hepburn.jpg",
"only_center_face": false,
"reload_restoration_model": false,
"repeat_times": 1,
"restoration_model_type": "general_scenes",
"seed": 231,
"steps": 50,
"super_resolution_factor": 2,
"tile_size": 512,
"tile_stride": 256,
"tiled": false,
"upscaling_model_type": "faces",
"use_guidance": false
},
"logs": "ckptckptckpt weights/face_full_v1.ckpt\nSwitching from mode 'FULL' to 'FACE'...\nBuilding and loading 'FACE' mode model...\nControlLDM: Running in eps-prediction mode\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nDiffusionWrapper has 865.91 M params.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off]\nLoading model from: /root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/lpips/weights/v0.1/alex.pth\nreload swinir model from weights/general_swinir_v1.ckpt\nENABLE XFORMERS!\nModel successfully switched to 'FACE' mode.\n{'bg_tile': 400,\n'bg_tile_stride': 400,\n'bg_upsampler': 'DiffBIR',\n'ckpt': 'weights/face_full_v1.ckpt',\n'color_fix_type': 'wavelet',\n'config': 'configs/model/cldm.yaml',\n'detection_model': 'retinaface_resnet50',\n'device': 'cuda',\n'disable_preprocess_model': False,\n'g_repeat': 5,\n'g_scale': 0.0,\n'g_space': 'latent',\n'g_t_start': 1001,\n'g_t_stop': -1,\n'has_aligned': False,\n'image_size': 512,\n'input': '/tmp/tmpwg3l1z7wAudrey_Hepburn.jpg',\n'only_center_face': False,\n'output': '.',\n'reload_swinir': False,\n'repeat_times': 1,\n'seed': 231,\n'show_lq': False,\n'skip_if_exist': False,\n'sr_scale': 2,\n'steps': 50,\n'swinir_ckpt': 'weights/general_swinir_v1.ckpt',\n'tile_size': 512,\n'tile_stride': 256,\n'tiled': False,\n'use_guidance': False}\nGlobal seed set to 231\n/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`.\nwarnings.warn(msg)\nDownloading: \"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth\" to /root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/facexlib/weights/detection_Resnet50_Final.pth\n 0%| | 0.00/104M [00:00<?, ?B/s]\n 37%|███▋ | 38.6M/104M [00:00<00:00, 405MB/s]\n 76%|███████▋ | 79.8M/104M [00:00<00:00, 421MB/s]\n100%|██████████| 104M/104M [00:00<00:00, 423MB/s]\nDownloading: \"https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth\" to /root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/facexlib/weights/parsing_parsenet.pth\n 0%| | 0.00/81.4M [00:00<?, ?B/s]\n 37%|███▋ | 30.4M/81.4M [00:00<00:00, 319MB/s]\n 87%|████████▋ | 70.5M/81.4M [00:00<00:00, 378MB/s]\n100%|██████████| 81.4M/81.4M [00:00<00:00, 378MB/s]\nControlLDM: Running in eps-prediction mode\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nDiffusionWrapper has 865.91 M params.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\nmaking attention of type 'vanilla-xformers' with 512 in_channels\nbuilding MemoryEfficientAttnBlock with 512 in_channels...\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is None and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 320, context_dim is 1024 and using 5 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is None and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 640, context_dim is 1024 and using 10 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is None and using 20 heads.\nSetting up MemoryEfficientCrossAttention. Query dim is 1280, context_dim is 1024 and using 20 heads.\nSetting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off]\nLoading model from: /root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/lpips/weights/v0.1/alex.pth\nreload swinir model from weights/general_swinir_v1.ckpt\ntimesteps used in spaced sampler:\n[0, 20, 41, 61, 82, 102, 122, 143, 163, 183, 204, 224, 245, 265, 285, 306, 326, 347, 367, 387, 408, 428, 449, 469, 489, 510, 530, 550, 571, 591, 612, 632, 652, 673, 693, 714, 734, 754, 775, 795, 816, 836, 856, 877, 897, 917, 938, 958, 979, 999]\nSpaced Sampler: 0%| | 0/50 [00:00<?, ?it/s]\nSpaced Sampler: 2%|▏ | 1/50 [00:00<00:10, 4.82it/s]\nSpaced Sampler: 6%|▌ | 3/50 [00:00<00:05, 8.76it/s]\nSpaced Sampler: 10%|█ | 5/50 [00:00<00:04, 10.31it/s]\nSpaced Sampler: 14%|█▍ | 7/50 [00:00<00:03, 11.08it/s]\nSpaced Sampler: 18%|█▊ | 9/50 [00:00<00:03, 11.51it/s]\nSpaced Sampler: 22%|██▏ | 11/50 [00:01<00:03, 11.78it/s]\nSpaced Sampler: 26%|██▌ | 13/50 [00:01<00:03, 11.95it/s]\nSpaced Sampler: 30%|███ | 15/50 [00:01<00:02, 12.06it/s]\nSpaced Sampler: 34%|███▍ | 17/50 [00:01<00:02, 12.11it/s]\nSpaced Sampler: 38%|███▊ | 19/50 [00:01<00:02, 12.16it/s]\nSpaced Sampler: 42%|████▏ | 21/50 [00:01<00:02, 12.20it/s]\nSpaced Sampler: 46%|████▌ | 23/50 [00:01<00:02, 12.23it/s]\nSpaced Sampler: 50%|█████ | 25/50 [00:02<00:02, 12.25it/s]\nSpaced Sampler: 54%|█████▍ | 27/50 [00:02<00:01, 12.27it/s]\nSpaced Sampler: 58%|█████▊ | 29/50 [00:02<00:01, 12.26it/s]\nSpaced Sampler: 62%|██████▏ | 31/50 [00:02<00:01, 12.25it/s]\nSpaced Sampler: 66%|██████▌ | 33/50 [00:02<00:01, 12.25it/s]\nSpaced Sampler: 70%|███████ | 35/50 [00:02<00:01, 12.26it/s]\nSpaced Sampler: 74%|███████▍ | 37/50 [00:03<00:01, 12.27it/s]\nSpaced Sampler: 78%|███████▊ | 39/50 [00:03<00:00, 12.27it/s]\nSpaced Sampler: 82%|████████▏ | 41/50 [00:03<00:00, 12.26it/s]\nSpaced Sampler: 86%|████████▌ | 43/50 [00:03<00:00, 12.24it/s]\nSpaced Sampler: 90%|█████████ | 45/50 [00:03<00:00, 12.24it/s]\nSpaced Sampler: 94%|█████████▍| 47/50 [00:03<00:00, 12.24it/s]\nSpaced Sampler: 98%|█████████▊| 49/50 [00:04<00:00, 12.25it/s]\nSpaced Sampler: 100%|██████████| 50/50 [00:04<00:00, 11.91it/s]\nupsampling the background image using DiffBIR...\ntimesteps used in spaced sampler:\n[0, 20, 41, 61, 82, 102, 122, 143, 163, 183, 204, 224, 245, 265, 285, 306, 326, 347, 367, 387, 408, 428, 449, 469, 489, 510, 530, 550, 571, 591, 612, 632, 652, 673, 693, 714, 734, 754, 775, 795, 816, 836, 856, 877, 897, 917, 938, 958, 979, 999]\nSpaced Sampler: 0%| | 0/50 [00:00<?, ?it/s]\nSpaced Sampler: 2%|▏ | 1/50 [00:00<00:44, 1.11it/s]\nSpaced Sampler: 4%|▍ | 2/50 [00:01<00:28, 1.67it/s]\nSpaced Sampler: 6%|▌ | 3/50 [00:01<00:23, 1.98it/s]\nSpaced Sampler: 8%|▊ | 4/50 [00:02<00:21, 2.18it/s]\nSpaced Sampler: 10%|█ | 5/50 [00:02<00:19, 2.30it/s]\nSpaced Sampler: 12%|█▏ | 6/50 [00:02<00:18, 2.38it/s]\nSpaced Sampler: 14%|█▍ | 7/50 [00:03<00:17, 2.44it/s]\nSpaced Sampler: 16%|█▌ | 8/50 [00:03<00:16, 2.48it/s]\nSpaced Sampler: 18%|█▊ | 9/50 [00:04<00:16, 2.51it/s]\nSpaced Sampler: 20%|██ | 10/50 [00:04<00:15, 2.53it/s]\nSpaced Sampler: 22%|██▏ | 11/50 [00:04<00:15, 2.54it/s]\nSpaced Sampler: 24%|██▍ | 12/50 [00:05<00:14, 2.55it/s]\nSpaced Sampler: 26%|██▌ | 13/50 [00:05<00:14, 2.55it/s]\nSpaced Sampler: 28%|██▊ | 14/50 [00:05<00:14, 2.56it/s]\nSpaced Sampler: 30%|███ | 15/50 [00:06<00:13, 2.56it/s]\nSpaced Sampler: 32%|███▏ | 16/50 [00:06<00:13, 2.56it/s]\nSpaced Sampler: 34%|███▍ | 17/50 [00:07<00:12, 2.56it/s]\nSpaced Sampler: 36%|███▌ | 18/50 [00:07<00:12, 2.56it/s]\nSpaced Sampler: 38%|███▊ | 19/50 [00:07<00:12, 2.56it/s]\nSpaced Sampler: 40%|████ | 20/50 [00:08<00:11, 2.56it/s]\nSpaced Sampler: 42%|████▏ | 21/50 [00:08<00:11, 2.56it/s]\nSpaced Sampler: 44%|████▍ | 22/50 [00:09<00:10, 2.56it/s]\nSpaced Sampler: 46%|████▌ | 23/50 [00:09<00:10, 2.56it/s]\nSpaced Sampler: 48%|████▊ | 24/50 [00:09<00:10, 2.56it/s]\nSpaced Sampler: 50%|█████ | 25/50 [00:10<00:09, 2.56it/s]\nSpaced Sampler: 52%|█████▏ | 26/50 [00:10<00:09, 2.56it/s]\nSpaced Sampler: 54%|█████▍ | 27/50 [00:11<00:08, 2.56it/s]\nSpaced Sampler: 56%|█████▌ | 28/50 [00:11<00:08, 2.56it/s]\nSpaced Sampler: 58%|█████▊ | 29/50 [00:11<00:08, 2.56it/s]\nSpaced Sampler: 60%|██████ | 30/50 [00:12<00:07, 2.56it/s]\nSpaced Sampler: 62%|██████▏ | 31/50 [00:12<00:07, 2.56it/s]\nSpaced Sampler: 64%|██████▍ | 32/50 [00:12<00:07, 2.56it/s]\nSpaced Sampler: 66%|██████▌ | 33/50 [00:13<00:06, 2.56it/s]\nSpaced Sampler: 68%|██████▊ | 34/50 [00:13<00:06, 2.56it/s]\nSpaced Sampler: 70%|███████ | 35/50 [00:14<00:05, 2.56it/s]\nSpaced Sampler: 72%|███████▏ | 36/50 [00:14<00:05, 2.55it/s]\nSpaced Sampler: 74%|███████▍ | 37/50 [00:14<00:05, 2.56it/s]\nSpaced Sampler: 76%|███████▌ | 38/50 [00:15<00:04, 2.55it/s]\nSpaced Sampler: 78%|███████▊ | 39/50 [00:15<00:04, 2.55it/s]\nSpaced Sampler: 80%|████████ | 40/50 [00:16<00:03, 2.55it/s]\nSpaced Sampler: 82%|████████▏ | 41/50 [00:16<00:03, 2.55it/s]\nSpaced Sampler: 84%|████████▍ | 42/50 [00:16<00:03, 2.55it/s]\nSpaced Sampler: 86%|████████▌ | 43/50 [00:17<00:02, 2.55it/s]\nSpaced Sampler: 88%|████████▊ | 44/50 [00:17<00:02, 2.55it/s]\nSpaced Sampler: 90%|█████████ | 45/50 [00:18<00:01, 2.55it/s]\nSpaced Sampler: 92%|█████████▏| 46/50 [00:18<00:01, 2.55it/s]\nSpaced Sampler: 94%|█████████▍| 47/50 [00:18<00:01, 2.55it/s]\nSpaced Sampler: 96%|█████████▌| 48/50 [00:19<00:00, 2.55it/s]\nSpaced Sampler: 98%|█████████▊| 49/50 [00:19<00:00, 2.55it/s]\nSpaced Sampler: 100%|██████████| 50/50 [00:20<00:00, 2.55it/s]\nSpaced Sampler: 100%|██████████| 50/50 [00:20<00:00, 2.49it/s]\nFace image tmpwg3l1z7wAudrey_Hepburn saved to ./..",
"output": [
"https://replicate.delivery/pbxt/3hkSakaS9qpMPxMMmfjYdr8ZLRRKiUkGwdNIlS0r5bcM7s2IA/tmpwg3l1z7wAudrey_Hepburn_00.png",
"https://replicate.delivery/pbxt/boZvG5okpewhK6FY1YUeO2sehoy1FJoXW9IxBPifL42iZn1GB/tmpwg3l1z7wAudrey_Hepburn_00.png",
"https://replicate.delivery/pbxt/NlSQp8BS4WLxL13eERn20OJzbMYfKpDx4usqAkywlgZY2ZtRA/tmpwg3l1z7wAudrey_Hepburn.png"
],
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2023-10-12T12:49:17.922019Z",
"started_at": "2023-10-12T12:49:19.920227Z",
"completed_at": "2023-10-12T12:50:32.973606Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/bmqkjzlb4dtxi3plebcqg6sffu/cancel",
"get": "https://api.replicate.com/v1/predictions/bmqkjzlb4dtxi3plebcqg6sffu"
},
"metrics": {
"predict_time": 73.053379,
"total_time": 75.051587
}
}