Readme
This model doesn't have a readme.
pixray text2image (future branch)
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run pixray/text2image-future using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"pixray/text2image-future:42615782823e77be5ef6d36270ae021c7b1883a189a9277ef699190eb24fd93a",
{
input: {
drawer: "pixel",
prompts: "evil medical device from a post apocalyptic veterinary clinic #pixelart",
settings: "# random number seed can be a word or number\nseed: reference\n# higher quality than default\nquality: better\n# smooth out the result a bit\ncustom_loss: smoothness:0.5\n# enable transparency in image\ntransparent: true\n# how much to encourage transparency (can also be negative) \ntransparent_weight: 0.1\n\n"
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run pixray/text2image-future using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"pixray/text2image-future:42615782823e77be5ef6d36270ae021c7b1883a189a9277ef699190eb24fd93a",
input={
"drawer": "pixel",
"prompts": "evil medical device from a post apocalyptic veterinary clinic #pixelart",
"settings": "# random number seed can be a word or number\nseed: reference\n# higher quality than default\nquality: better\n# smooth out the result a bit\ncustom_loss: smoothness:0.5\n# enable transparency in image\ntransparent: true\n# how much to encourage transparency (can also be negative) \ntransparent_weight: 0.1\n\n"
}
)
# The pixray/text2image-future model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/pixray/text2image-future/api#output-schema
print(item)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run pixray/text2image-future using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "42615782823e77be5ef6d36270ae021c7b1883a189a9277ef699190eb24fd93a",
"input": {
"drawer": "pixel",
"prompts": "evil medical device from a post apocalyptic veterinary clinic #pixelart",
"settings": "# random number seed can be a word or number\\nseed: reference\\n# higher quality than default\\nquality: better\\n# smooth out the result a bit\\ncustom_loss: smoothness:0.5\\n# enable transparency in image\\ntransparent: true\\n# how much to encourage transparency (can also be negative) \\ntransparent_weight: 0.1\\n\\n"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Pull and run pixray/text2image-future using Cog (this will download the full model and run it in your local environment):
cog predict r8.im/pixray/text2image-future@sha256:42615782823e77be5ef6d36270ae021c7b1883a189a9277ef699190eb24fd93a \
-i 'drawer="pixel"' \
-i 'prompts="evil medical device from a post apocalyptic veterinary clinic #pixelart"' \
-i $'settings="# random number seed can be a word or number\\nseed: reference\\n# higher quality than default\\nquality: better\\n# smooth out the result a bit\\ncustom_loss: smoothness:0.5\\n# enable transparency in image\\ntransparent: true\\n# how much to encourage transparency (can also be negative) \\ntransparent_weight: 0.1\\n\\n"'
To learn more, take a look at the Cog documentation.
Pull and run pixray/text2image-future using Docker (this will download the full model and run it in your local environment):
docker run -d -p 5000:5000 --gpus=all r8.im/pixray/text2image-future@sha256:42615782823e77be5ef6d36270ae021c7b1883a189a9277ef699190eb24fd93a
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "drawer": "pixel", "prompts": "evil medical device from a post apocalyptic veterinary clinic #pixelart", "settings": "# random number seed can be a word or number\\nseed: reference\\n# higher quality than default\\nquality: better\\n# smooth out the result a bit\\ncustom_loss: smoothness:0.5\\n# enable transparency in image\\ntransparent: true\\n# how much to encourage transparency (can also be negative) \\ntransparent_weight: 0.1\\n\\n" } }' \ http://localhost:5000/predictions
Add a payment method to run this model.
Each run costs approximately $0.14. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2022-03-10T01:25:39.824286Z",
"created_at": "2022-03-10T01:16:41.185854Z",
"data_removed": false,
"error": null,
"id": "4porzbc32fbv7ezhe7u7tprg5i",
"input": {
"drawer": "pixel",
"prompts": "evil medical device from a post apocalyptic veterinary clinic #pixelart",
"settings": "# random number seed can be a word or number\nseed: reference\n# higher quality than default\nquality: better\n# smooth out the result a bit\ncustom_loss: smoothness:0.5\n# enable transparency in image\ntransparent: true\n# how much to encourage transparency (can also be negative) \ntransparent_weight: 0.1\n\n"
},
"logs": "---> BasePixrayPredictor Predict\nUsing seed: 3903845079\nRunning pixeldrawer with 80x45 grid\n\n 0%| | 0.00/244M [00:00<?, ?iB/s]\n 0%|▏ | 1.19M/244M [00:00<00:21, 11.9MiB/s]\n 5%|█▉ | 12.6M/244M [00:00<00:14, 16.3MiB/s]\n 14%|█████▏ | 33.6M/244M [00:00<00:09, 22.6MiB/s]\n 21%|███████▉ | 50.6M/244M [00:00<00:06, 30.6MiB/s]\n 27%|██████████▍ | 67.0M/244M [00:00<00:04, 40.6MiB/s]\n 34%|████████████▊ | 82.4M/244M [00:00<00:03, 52.4MiB/s]\n 44%|████████████████▉ | 106M/244M [00:00<00:02, 68.7MiB/s]\n 53%|████████████████████▌ | 128M/244M [00:00<00:01, 87.0MiB/s]\n 61%|████████████████████████▌ | 149M/244M [00:00<00:00, 106MiB/s]\n 71%|████████████████████████████▍ | 173M/244M [00:01<00:00, 129MiB/s]\n 79%|███████████████████████████████▊ | 194M/244M [00:01<00:00, 146MiB/s]\n 88%|███████████████████████████████████▏ | 214M/244M [00:01<00:00, 161MiB/s]\n 96%|██████████████████████████████████████▍ | 235M/244M [00:01<00:00, 171MiB/s]\n100%|████████████████████████████████████████| 244M/244M [00:01<00:00, 187MiB/s]\nLoaded CLIP RN50: 224x224 and 102.01M params\nLoaded CLIP ViT-B/32: 224x224 and 151.28M params\nLoaded CLIP ViT-B/16: 224x224 and 149.62M params\nUsing device: cuda:0\n\nOptimising using: Adam\nUsing text prompts: ['evil medical device from a post apocalyptic veterinary clinic #pixelart']\nusing custom losses: smoothness:0.5\n0it [00:00, ?it/s]\niter: 0, loss: 3.23, losses: 1.01, 0.0859, 0.928, 0.0617, 0.904, 0.0643, 0.1, 0.0725 (-0=>3.227)\n\n\n0it [00:00, ?it/s]/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.88, losses: 0.916, 0.0816, 0.799, 0.061, 0.798, 0.0613, 0.0905, 0.0707 (-0=>2.878)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.73, losses: 0.874, 0.0823, 0.753, 0.0629, 0.74, 0.0628, 0.0843, 0.0704 (-0=>2.73)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.62, losses: 0.848, 0.0824, 0.724, 0.0635, 0.685, 0.0644, 0.0787, 0.0711 (-0=>2.617)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.53, losses: 0.818, 0.0824, 0.695, 0.0662, 0.66, 0.0663, 0.0731, 0.0716 (-3=>2.533)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.53, losses: 0.815, 0.0837, 0.7, 0.0643, 0.66, 0.0655, 0.0681, 0.0704 (-2=>2.493)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.46, losses: 0.786, 0.083, 0.676, 0.0673, 0.648, 0.0676, 0.0642, 0.0714 (-1=>2.428)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.46, losses: 0.781, 0.0844, 0.682, 0.0658, 0.648, 0.0672, 0.0608, 0.0695 (-1=>2.412)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.46, losses: 0.788, 0.0835, 0.682, 0.0654, 0.645, 0.0668, 0.0579, 0.0716 (-2=>2.376)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.41, losses: 0.771, 0.0833, 0.665, 0.067, 0.631, 0.0677, 0.0552, 0.0682 (-3=>2.364)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.35, losses: 0.746, 0.0847, 0.65, 0.067, 0.619, 0.0692, 0.053, 0.0624 (-0=>2.352)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.35, losses: 0.748, 0.0844, 0.651, 0.067, 0.616, 0.0691, 0.051, 0.0596 (-1=>2.331)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.36, losses: 0.756, 0.0844, 0.651, 0.0662, 0.618, 0.069, 0.0491, 0.0617 (-11=>2.331)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.36, losses: 0.756, 0.0839, 0.654, 0.0666, 0.622, 0.0688, 0.0472, 0.061 (-6=>2.31)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.36, losses: 0.753, 0.0854, 0.652, 0.0665, 0.623, 0.0677, 0.0458, 0.0638 (-6=>2.305)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.33, losses: 0.741, 0.0847, 0.646, 0.0665, 0.614, 0.0693, 0.0447, 0.0621 (-5=>2.297)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.31, losses: 0.734, 0.0842, 0.641, 0.0675, 0.609, 0.0699, 0.0437, 0.061 (-15=>2.297)\n\n\n0it [00:00, ?it/s]\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.3, losses: 0.736, 0.0851, 0.633, 0.0673, 0.606, 0.0703, 0.0428, 0.0597 (-3=>2.289)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.31, losses: 0.741, 0.0849, 0.637, 0.0668, 0.61, 0.0689, 0.0418, 0.0622 (-5=>2.28)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.32, losses: 0.745, 0.0844, 0.642, 0.0659, 0.613, 0.0683, 0.0409, 0.0629 (-15=>2.28)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.26, losses: 0.716, 0.0857, 0.624, 0.0677, 0.599, 0.0703, 0.0401, 0.0618 (-0=>2.265)\n\n\n0it [00:01, ?it/s]\n0it [00:17, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.33, losses: 0.746, 0.085, 0.646, 0.0659, 0.616, 0.0683, 0.0392, 0.0641 (-1=>2.255)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.27, losses: 0.724, 0.0844, 0.628, 0.0674, 0.601, 0.0704, 0.0386, 0.0578 (-5=>2.252)\n\n\nDropping learning rate\n0it [00:01, ?it/s]\n0it [00:17, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.3, losses: 0.739, 0.085, 0.634, 0.0671, 0.606, 0.0693, 0.0384, 0.0602 (-1=>2.264)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.26, losses: 0.716, 0.0849, 0.627, 0.0678, 0.599, 0.07, 0.0383, 0.0565 (-5=>2.25)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.25, losses: 0.718, 0.0857, 0.619, 0.0679, 0.592, 0.0703, 0.0382, 0.056 (-0=>2.247)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.27, losses: 0.719, 0.0846, 0.628, 0.067, 0.6, 0.0704, 0.0382, 0.0594 (-5=>2.244)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.27, losses: 0.724, 0.0859, 0.63, 0.0669, 0.598, 0.0703, 0.0381, 0.0555 (-1=>2.241)\n\n\n0it [00:01, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.26, losses: 0.72, 0.0853, 0.625, 0.0674, 0.597, 0.0703, 0.0381, 0.0581 (-11=>2.241)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.25, losses: 0.716, 0.0855, 0.621, 0.0681, 0.593, 0.0701, 0.0381, 0.0574 (-21=>2.241)\n\n\n0it [00:00, ?it/s]\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-31=>2.241)\n\n\n0it [00:00, ?it/s]\n0it [00:00, ?it/s]",
"metrics": {
"predict_time": 538.420743,
"total_time": 538.638432
},
"output": [
"https://replicate.delivery/mgxm/ba9efa8b-0536-484d-bb61-c22d801e7ee1/tempfile.png",
"https://replicate.delivery/mgxm/808b18a0-2632-47b3-b74e-72d62602d03a/tempfile.png",
"https://replicate.delivery/mgxm/615eb647-7688-4636-aecd-f39119ddc241/tempfile.png",
"https://replicate.delivery/mgxm/0be4c412-fbf5-43bc-a3f7-c15d0e334b09/tempfile.png",
"https://replicate.delivery/mgxm/bfdd74ed-3bf4-4696-bfbc-59bd2273cdd4/tempfile.png",
"https://replicate.delivery/mgxm/6797d76f-5e30-48cc-898c-454a2759d59b/tempfile.png",
"https://replicate.delivery/mgxm/e0c4f446-5ec0-4ca8-bd3e-a6b87c607097/tempfile.png",
"https://replicate.delivery/mgxm/9a4db59e-ed71-4d90-aee2-057ad3e813e6/tempfile.png",
"https://replicate.delivery/mgxm/173ba724-6b33-4f1d-ae90-a5749d8d8b10/tempfile.png",
"https://replicate.delivery/mgxm/8e4c4aa2-1b9a-472a-8c8d-6ef98b0ea8ba/tempfile.png",
"https://replicate.delivery/mgxm/025527a9-ca98-4731-857e-996512e4bc38/tempfile.png",
"https://replicate.delivery/mgxm/23012112-765f-4961-9a72-25f6ef716fd2/tempfile.png",
"https://replicate.delivery/mgxm/2a8dc4f5-ae14-4f96-9bec-893067d31c2c/tempfile.png",
"https://replicate.delivery/mgxm/343ed30a-ad54-410a-9f42-e7b144c4d194/tempfile.png",
"https://replicate.delivery/mgxm/2d6f7f08-1738-4280-aa4b-b1c4a4d821a9/tempfile.png",
"https://replicate.delivery/mgxm/078b8255-7ea7-4f9c-8ddb-093cf3b908d6/tempfile.png",
"https://replicate.delivery/mgxm/a35a83af-5ca2-482a-9387-eec3d2e335c7/tempfile.png",
"https://replicate.delivery/mgxm/7db822f6-2ed6-4bfa-b2aa-78c366982c39/tempfile.png",
"https://replicate.delivery/mgxm/38f81049-d86b-4bd9-b93a-41fa3b25850b/tempfile.png",
"https://replicate.delivery/mgxm/0fcc4303-c779-493b-92d1-3ec5157f274f/tempfile.png",
"https://replicate.delivery/mgxm/29c8c9dd-d7a0-41d0-97f1-23115f6a795f/tempfile.png",
"https://replicate.delivery/mgxm/e527168a-625f-496c-b949-54e647ebcfda/tempfile.png",
"https://replicate.delivery/mgxm/d75fb245-a66f-4012-b25d-5658acd2ef90/tempfile.png",
"https://replicate.delivery/mgxm/b89eef0b-9952-4c01-aa07-554600250659/tempfile.png",
"https://replicate.delivery/mgxm/213b5ab1-8013-42ad-adc9-30f42295ec34/tempfile.png",
"https://replicate.delivery/mgxm/86b6363c-21cd-4c86-a929-2d0cd098b415/tempfile.png",
"https://replicate.delivery/mgxm/9e225cb0-a6f0-456b-b2ed-116a8d7e6646/tempfile.png",
"https://replicate.delivery/mgxm/918247df-9b61-48d6-9a1c-0b26994f79fd/tempfile.png",
"https://replicate.delivery/mgxm/209549da-c442-42c1-b96f-e010ae14f07a/tempfile.png",
"https://replicate.delivery/mgxm/03247e3f-2265-48ef-9915-4a25b131a727/tempfile.png",
"https://replicate.delivery/mgxm/55ebb21e-006f-4b53-a489-1f968c7d64e6/tempfile.png"
],
"started_at": "2022-03-10T01:16:41.403543Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/4porzbc32fbv7ezhe7u7tprg5i",
"cancel": "https://api.replicate.com/v1/predictions/4porzbc32fbv7ezhe7u7tprg5i/cancel"
},
"version": "0d01ba09e8fa182455bc3ccc7b11b834356645dbcfa2712d6e5a4d5615a7a9f0"
}
---> BasePixrayPredictor Predict
Using seed: 3903845079
Running pixeldrawer with 80x45 grid
0%| | 0.00/244M [00:00<?, ?iB/s]
0%|▏ | 1.19M/244M [00:00<00:21, 11.9MiB/s]
5%|█▉ | 12.6M/244M [00:00<00:14, 16.3MiB/s]
14%|█████▏ | 33.6M/244M [00:00<00:09, 22.6MiB/s]
21%|███████▉ | 50.6M/244M [00:00<00:06, 30.6MiB/s]
27%|██████████▍ | 67.0M/244M [00:00<00:04, 40.6MiB/s]
34%|████████████▊ | 82.4M/244M [00:00<00:03, 52.4MiB/s]
44%|████████████████▉ | 106M/244M [00:00<00:02, 68.7MiB/s]
53%|████████████████████▌ | 128M/244M [00:00<00:01, 87.0MiB/s]
61%|████████████████████████▌ | 149M/244M [00:00<00:00, 106MiB/s]
71%|████████████████████████████▍ | 173M/244M [00:01<00:00, 129MiB/s]
79%|███████████████████████████████▊ | 194M/244M [00:01<00:00, 146MiB/s]
88%|███████████████████████████████████▏ | 214M/244M [00:01<00:00, 161MiB/s]
96%|██████████████████████████████████████▍ | 235M/244M [00:01<00:00, 171MiB/s]
100%|████████████████████████████████████████| 244M/244M [00:01<00:00, 187MiB/s]
Loaded CLIP RN50: 224x224 and 102.01M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Using device: cuda:0
Optimising using: Adam
Using text prompts: ['evil medical device from a post apocalyptic veterinary clinic #pixelart']
using custom losses: smoothness:0.5
0it [00:00, ?it/s]
iter: 0, loss: 3.23, losses: 1.01, 0.0859, 0.928, 0.0617, 0.904, 0.0643, 0.1, 0.0725 (-0=>3.227)
0it [00:00, ?it/s]/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.88, losses: 0.916, 0.0816, 0.799, 0.061, 0.798, 0.0613, 0.0905, 0.0707 (-0=>2.878)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.73, losses: 0.874, 0.0823, 0.753, 0.0629, 0.74, 0.0628, 0.0843, 0.0704 (-0=>2.73)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.62, losses: 0.848, 0.0824, 0.724, 0.0635, 0.685, 0.0644, 0.0787, 0.0711 (-0=>2.617)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.53, losses: 0.818, 0.0824, 0.695, 0.0662, 0.66, 0.0663, 0.0731, 0.0716 (-3=>2.533)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.53, losses: 0.815, 0.0837, 0.7, 0.0643, 0.66, 0.0655, 0.0681, 0.0704 (-2=>2.493)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.46, losses: 0.786, 0.083, 0.676, 0.0673, 0.648, 0.0676, 0.0642, 0.0714 (-1=>2.428)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.46, losses: 0.781, 0.0844, 0.682, 0.0658, 0.648, 0.0672, 0.0608, 0.0695 (-1=>2.412)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.46, losses: 0.788, 0.0835, 0.682, 0.0654, 0.645, 0.0668, 0.0579, 0.0716 (-2=>2.376)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.41, losses: 0.771, 0.0833, 0.665, 0.067, 0.631, 0.0677, 0.0552, 0.0682 (-3=>2.364)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.35, losses: 0.746, 0.0847, 0.65, 0.067, 0.619, 0.0692, 0.053, 0.0624 (-0=>2.352)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.35, losses: 0.748, 0.0844, 0.651, 0.067, 0.616, 0.0691, 0.051, 0.0596 (-1=>2.331)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.36, losses: 0.756, 0.0844, 0.651, 0.0662, 0.618, 0.069, 0.0491, 0.0617 (-11=>2.331)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.36, losses: 0.756, 0.0839, 0.654, 0.0666, 0.622, 0.0688, 0.0472, 0.061 (-6=>2.31)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.36, losses: 0.753, 0.0854, 0.652, 0.0665, 0.623, 0.0677, 0.0458, 0.0638 (-6=>2.305)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.33, losses: 0.741, 0.0847, 0.646, 0.0665, 0.614, 0.0693, 0.0447, 0.0621 (-5=>2.297)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.31, losses: 0.734, 0.0842, 0.641, 0.0675, 0.609, 0.0699, 0.0437, 0.061 (-15=>2.297)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.3, losses: 0.736, 0.0851, 0.633, 0.0673, 0.606, 0.0703, 0.0428, 0.0597 (-3=>2.289)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.31, losses: 0.741, 0.0849, 0.637, 0.0668, 0.61, 0.0689, 0.0418, 0.0622 (-5=>2.28)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.32, losses: 0.745, 0.0844, 0.642, 0.0659, 0.613, 0.0683, 0.0409, 0.0629 (-15=>2.28)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.26, losses: 0.716, 0.0857, 0.624, 0.0677, 0.599, 0.0703, 0.0401, 0.0618 (-0=>2.265)
0it [00:01, ?it/s]
0it [00:17, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.33, losses: 0.746, 0.085, 0.646, 0.0659, 0.616, 0.0683, 0.0392, 0.0641 (-1=>2.255)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.27, losses: 0.724, 0.0844, 0.628, 0.0674, 0.601, 0.0704, 0.0386, 0.0578 (-5=>2.252)
Dropping learning rate
0it [00:01, ?it/s]
0it [00:17, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.3, losses: 0.739, 0.085, 0.634, 0.0671, 0.606, 0.0693, 0.0384, 0.0602 (-1=>2.264)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.26, losses: 0.716, 0.0849, 0.627, 0.0678, 0.599, 0.07, 0.0383, 0.0565 (-5=>2.25)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.25, losses: 0.718, 0.0857, 0.619, 0.0679, 0.592, 0.0703, 0.0382, 0.056 (-0=>2.247)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.27, losses: 0.719, 0.0846, 0.628, 0.067, 0.6, 0.0704, 0.0382, 0.0594 (-5=>2.244)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.27, losses: 0.724, 0.0859, 0.63, 0.0669, 0.598, 0.0703, 0.0381, 0.0555 (-1=>2.241)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.26, losses: 0.72, 0.0853, 0.625, 0.0674, 0.597, 0.0703, 0.0381, 0.0581 (-11=>2.241)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.25, losses: 0.716, 0.0855, 0.621, 0.0681, 0.593, 0.0701, 0.0381, 0.0574 (-21=>2.241)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-31=>2.241)
0it [00:00, ?it/s]
0it [00:00, ?it/s]
This example was created by a different version, pixray/text2image-future:0d01ba09.
This model costs approximately $0.14 to run on Replicate, or 7 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia T4 GPU hardware. Predictions typically complete within 11 minutes. The predict time for this model varies significantly based on the inputs.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
---> BasePixrayPredictor Predict
Using seed: 3903845079
Running pixeldrawer with 80x45 grid
0%| | 0.00/244M [00:00<?, ?iB/s]
0%|▏ | 1.19M/244M [00:00<00:21, 11.9MiB/s]
5%|█▉ | 12.6M/244M [00:00<00:14, 16.3MiB/s]
14%|█████▏ | 33.6M/244M [00:00<00:09, 22.6MiB/s]
21%|███████▉ | 50.6M/244M [00:00<00:06, 30.6MiB/s]
27%|██████████▍ | 67.0M/244M [00:00<00:04, 40.6MiB/s]
34%|████████████▊ | 82.4M/244M [00:00<00:03, 52.4MiB/s]
44%|████████████████▉ | 106M/244M [00:00<00:02, 68.7MiB/s]
53%|████████████████████▌ | 128M/244M [00:00<00:01, 87.0MiB/s]
61%|████████████████████████▌ | 149M/244M [00:00<00:00, 106MiB/s]
71%|████████████████████████████▍ | 173M/244M [00:01<00:00, 129MiB/s]
79%|███████████████████████████████▊ | 194M/244M [00:01<00:00, 146MiB/s]
88%|███████████████████████████████████▏ | 214M/244M [00:01<00:00, 161MiB/s]
96%|██████████████████████████████████████▍ | 235M/244M [00:01<00:00, 171MiB/s]
100%|████████████████████████████████████████| 244M/244M [00:01<00:00, 187MiB/s]
Loaded CLIP RN50: 224x224 and 102.01M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Using device: cuda:0
Optimising using: Adam
Using text prompts: ['evil medical device from a post apocalyptic veterinary clinic #pixelart']
using custom losses: smoothness:0.5
0it [00:00, ?it/s]
iter: 0, loss: 3.23, losses: 1.01, 0.0859, 0.928, 0.0617, 0.904, 0.0643, 0.1, 0.0725 (-0=>3.227)
0it [00:00, ?it/s]/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.88, losses: 0.916, 0.0816, 0.799, 0.061, 0.798, 0.0613, 0.0905, 0.0707 (-0=>2.878)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.73, losses: 0.874, 0.0823, 0.753, 0.0629, 0.74, 0.0628, 0.0843, 0.0704 (-0=>2.73)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.62, losses: 0.848, 0.0824, 0.724, 0.0635, 0.685, 0.0644, 0.0787, 0.0711 (-0=>2.617)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.53, losses: 0.818, 0.0824, 0.695, 0.0662, 0.66, 0.0663, 0.0731, 0.0716 (-3=>2.533)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.53, losses: 0.815, 0.0837, 0.7, 0.0643, 0.66, 0.0655, 0.0681, 0.0704 (-2=>2.493)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.46, losses: 0.786, 0.083, 0.676, 0.0673, 0.648, 0.0676, 0.0642, 0.0714 (-1=>2.428)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.46, losses: 0.781, 0.0844, 0.682, 0.0658, 0.648, 0.0672, 0.0608, 0.0695 (-1=>2.412)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.46, losses: 0.788, 0.0835, 0.682, 0.0654, 0.645, 0.0668, 0.0579, 0.0716 (-2=>2.376)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.41, losses: 0.771, 0.0833, 0.665, 0.067, 0.631, 0.0677, 0.0552, 0.0682 (-3=>2.364)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.35, losses: 0.746, 0.0847, 0.65, 0.067, 0.619, 0.0692, 0.053, 0.0624 (-0=>2.352)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.35, losses: 0.748, 0.0844, 0.651, 0.067, 0.616, 0.0691, 0.051, 0.0596 (-1=>2.331)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.36, losses: 0.756, 0.0844, 0.651, 0.0662, 0.618, 0.069, 0.0491, 0.0617 (-11=>2.331)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.36, losses: 0.756, 0.0839, 0.654, 0.0666, 0.622, 0.0688, 0.0472, 0.061 (-6=>2.31)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.36, losses: 0.753, 0.0854, 0.652, 0.0665, 0.623, 0.0677, 0.0458, 0.0638 (-6=>2.305)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.33, losses: 0.741, 0.0847, 0.646, 0.0665, 0.614, 0.0693, 0.0447, 0.0621 (-5=>2.297)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.31, losses: 0.734, 0.0842, 0.641, 0.0675, 0.609, 0.0699, 0.0437, 0.061 (-15=>2.297)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.3, losses: 0.736, 0.0851, 0.633, 0.0673, 0.606, 0.0703, 0.0428, 0.0597 (-3=>2.289)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.31, losses: 0.741, 0.0849, 0.637, 0.0668, 0.61, 0.0689, 0.0418, 0.0622 (-5=>2.28)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.32, losses: 0.745, 0.0844, 0.642, 0.0659, 0.613, 0.0683, 0.0409, 0.0629 (-15=>2.28)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.26, losses: 0.716, 0.0857, 0.624, 0.0677, 0.599, 0.0703, 0.0401, 0.0618 (-0=>2.265)
0it [00:01, ?it/s]
0it [00:17, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.33, losses: 0.746, 0.085, 0.646, 0.0659, 0.616, 0.0683, 0.0392, 0.0641 (-1=>2.255)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.27, losses: 0.724, 0.0844, 0.628, 0.0674, 0.601, 0.0704, 0.0386, 0.0578 (-5=>2.252)
Dropping learning rate
0it [00:01, ?it/s]
0it [00:17, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.3, losses: 0.739, 0.085, 0.634, 0.0671, 0.606, 0.0693, 0.0384, 0.0602 (-1=>2.264)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.26, losses: 0.716, 0.0849, 0.627, 0.0678, 0.599, 0.07, 0.0383, 0.0565 (-5=>2.25)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.25, losses: 0.718, 0.0857, 0.619, 0.0679, 0.592, 0.0703, 0.0382, 0.056 (-0=>2.247)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.27, losses: 0.719, 0.0846, 0.628, 0.067, 0.6, 0.0704, 0.0382, 0.0594 (-5=>2.244)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.27, losses: 0.724, 0.0859, 0.63, 0.0669, 0.598, 0.0703, 0.0381, 0.0555 (-1=>2.241)
0it [00:01, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.26, losses: 0.72, 0.0853, 0.625, 0.0674, 0.597, 0.0703, 0.0381, 0.0581 (-11=>2.241)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.25, losses: 0.716, 0.0855, 0.621, 0.0681, 0.593, 0.0701, 0.0381, 0.0574 (-21=>2.241)
0it [00:00, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-31=>2.241)
0it [00:00, ?it/s]
0it [00:00, ?it/s]