Readme
See @pixray posts or join the project
Turn any description into pixel art
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run dribnet/pixray-text2pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"dribnet/pixray-text2pixel:d838a15c29f59f286c7f1caaf71db26f21f184419e80309c78a2689de319c6af",
{
input: {
aspect: "widescreen",
prompts: "Aliens destroying NYC skyline with lasers. #pixelart",
pixel_scale: 1
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run dribnet/pixray-text2pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"dribnet/pixray-text2pixel:d838a15c29f59f286c7f1caaf71db26f21f184419e80309c78a2689de319c6af",
input={
"aspect": "widescreen",
"prompts": "Aliens destroying NYC skyline with lasers. #pixelart",
"pixel_scale": 1
}
)
# The dribnet/pixray-text2pixel model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/dribnet/pixray-text2pixel/api#output-schema
print(item)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run dribnet/pixray-text2pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "d838a15c29f59f286c7f1caaf71db26f21f184419e80309c78a2689de319c6af",
"input": {
"aspect": "widescreen",
"prompts": "Aliens destroying NYC skyline with lasers. #pixelart",
"pixel_scale": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/dribnet/pixray-text2pixel@sha256:d838a15c29f59f286c7f1caaf71db26f21f184419e80309c78a2689de319c6af \
-i 'aspect="widescreen"' \
-i 'prompts="Aliens destroying NYC skyline with lasers. #pixelart"' \
-i 'pixel_scale=1'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/dribnet/pixray-text2pixel@sha256:d838a15c29f59f286c7f1caaf71db26f21f184419e80309c78a2689de319c6af
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "aspect": "widescreen", "prompts": "Aliens destroying NYC skyline with lasers. #pixelart", "pixel_scale": 1 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.17. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2021-10-18T14:04:46.052011Z",
"created_at": "2021-10-18T13:57:23.402890Z",
"data_removed": false,
"error": null,
"id": "2v3fn7tdivaxthvcihsv4qocmq",
"input": {
"aspect": "widescreen",
"prompts": "Aliens destroying NYC skyline with lasers. #pixelart",
"pixel_scale": 1
},
"logs": "---> BasePixrayPredictor Predict\nUsing seed:\n3244908265805850817\nRunning pixeldrawer with 80x45 grid\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['Aliens destroying NYC skyline with lasers. #pixelart']\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\niter: 0, loss: 2.98, losses: 0.98, 0.0797, 0.897, 0.0479, 0.925, 0.0489 (-0=>2.978)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.66, losses: 0.904, 0.0797, 0.791, 0.0474, 0.793, 0.0479 (-0=>2.663)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.55, losses: 0.861, 0.083, 0.75, 0.0482, 0.757, 0.0482 (-2=>2.536)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.41, losses: 0.812, 0.0844, 0.711, 0.0496, 0.705, 0.0487 (-2=>2.368)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.28, losses: 0.761, 0.0877, 0.662, 0.0538, 0.664, 0.0509 (-2=>2.276)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.33, losses: 0.793, 0.0868, 0.667, 0.0534, 0.68, 0.05 (-5=>2.23)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.16, losses: 0.725, 0.0875, 0.612, 0.0578, 0.626, 0.0523 (-0=>2.162)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.13, losses: 0.715, 0.0874, 0.601, 0.0588, 0.61, 0.0538 (-0=>2.126)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.12, losses: 0.71, 0.0884, 0.599, 0.0585, 0.607, 0.0546 (-2=>2.117)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.09, losses: 0.705, 0.0881, 0.592, 0.0599, 0.591, 0.0552 (-2=>2.089)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.09, losses: 0.702, 0.0885, 0.588, 0.0596, 0.594, 0.0547 (-0=>2.087)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.08, losses: 0.7, 0.0874, 0.584, 0.0601, 0.59, 0.0554 (-0=>2.076)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.19, losses: 0.744, 0.0869, 0.618, 0.0577, 0.628, 0.0533 (-10=>2.076)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.08, losses: 0.698, 0.0875, 0.588, 0.0592, 0.588, 0.055 (-0=>2.076)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.2, losses: 0.752, 0.0879, 0.618, 0.0573, 0.628, 0.0532 (-3=>2.065)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.17, losses: 0.74, 0.0879, 0.613, 0.0576, 0.621, 0.054 (-13=>2.065)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.05, losses: 0.687, 0.0876, 0.575, 0.0612, 0.582, 0.0566 (-0=>2.051)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.06, losses: 0.696, 0.0881, 0.575, 0.061, 0.58, 0.0562 (-2=>2.044)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.04, losses: 0.683, 0.0891, 0.571, 0.0618, 0.574, 0.0569 (-0=>2.036)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.05, losses: 0.687, 0.0883, 0.575, 0.0608, 0.579, 0.0561 (-10=>2.036)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.04, losses: 0.683, 0.0887, 0.579, 0.0608, 0.575, 0.0566 (-20=>2.036)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.15, losses: 0.731, 0.0867, 0.606, 0.0583, 0.611, 0.0543 (-2=>2.024)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.03, losses: 0.681, 0.0889, 0.568, 0.0617, 0.573, 0.0571 (-12=>2.024)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.17, losses: 0.735, 0.087, 0.615, 0.0577, 0.621, 0.0537 (-4=>2.02)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.15, losses: 0.728, 0.0867, 0.61, 0.0584, 0.613, 0.0539 (-14=>2.02)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.14, losses: 0.725, 0.0876, 0.603, 0.059, 0.607, 0.0547 (-24=>2.02)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.02, losses: 0.679, 0.0877, 0.568, 0.0618, 0.566, 0.0576 (-7=>2.015)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.1, losses: 0.712, 0.0872, 0.592, 0.0607, 0.597, 0.0554 (-17=>2.015)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.15, losses: 0.729, 0.0866, 0.608, 0.0584, 0.618, 0.0542 (-27=>2.015)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.16, losses: 0.723, 0.0869, 0.614, 0.0591, 0.619, 0.0542 (-1=>2.013)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-6=>2.008)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]",
"metrics": {
"total_time": 442.649121
},
"output": [
{
"file": "https://replicate.delivery/mgxm/8ffcfae6-0de2-4899-aca7-d9a04e029039/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/738ed44f-eeb9-4b2b-be40-d2efddfa60c7/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/d8eee85c-650d-48ea-85c2-3c81819c9434/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/ef3f4cae-328a-49f1-a298-3404e48e2d58/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/c1d53c1c-7650-40a5-af60-e660d69552eb/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/9ad596f1-8082-43c5-b9f4-da401f5ff320/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/8805d912-3932-47e3-ad4e-795dda711204/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/4a7fc2ac-f9fa-482e-8d56-a0b71e48fe81/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/e7c04761-109f-45b1-a948-537f70f7d015/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/144e752f-1845-47cd-a52c-6175d232309b/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/c11c8442-d1c2-47e1-8589-5731d7a70871/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/0b3b5f5b-ebf4-4849-9604-1a1f6c2dcdc7/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/16f77eb7-0313-4a5b-8caf-24fc205b946f/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/243f275f-2344-4e3a-9c7e-afeca16f2bc6/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/082f35c0-b35c-4214-8708-dad689c00304/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/62411dfb-70b3-488a-874e-58be9e97a935/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/4899efbe-e0bb-4835-8072-92b8ba8a33b6/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/1c9fdeb0-d577-4009-86b3-8273b989dc87/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/041bc6c2-2317-4de7-acf5-bc7f5a16bbe6/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/eb851523-0099-4b1d-8548-830f90b377e4/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/36f01b5e-42aa-4b84-bd61-40ad872745ee/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/16eded9e-240a-4987-8e09-5b16dbb15201/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/65e47c77-187f-4a07-86c2-5561046d2bd8/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/03c870ac-6f45-46e9-94d0-c646cc5eaf3a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/76a863dd-82c5-4236-996e-00db7ddffb57/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3bff070f-9f03-4975-9cab-2244c1ba875c/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/f63b52f6-6f84-47a0-95b1-ab530761997c/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/4131dc96-4240-4335-99b4-0689a2f0c0cd/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/b0df1f88-1ee2-4496-ba11-8becd0b93993/tempfile.png"
}
],
"started_at": "2021-11-30T17:07:40.853064Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/2v3fn7tdivaxthvcihsv4qocmq",
"cancel": "https://api.replicate.com/v1/predictions/2v3fn7tdivaxthvcihsv4qocmq/cancel"
},
"version": "70efb5e487c7bd6bad2068ff465b9dc5a410336c539b316ac4eda6f0d4e56a54"
}
---> BasePixrayPredictor Predict
Using seed:
3244908265805850817
Running pixeldrawer with 80x45 grid
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['Aliens destroying NYC skyline with lasers. #pixelart']
0it [00:00, ?it/s]
/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
iter: 0, loss: 2.98, losses: 0.98, 0.0797, 0.897, 0.0479, 0.925, 0.0489 (-0=>2.978)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.66, losses: 0.904, 0.0797, 0.791, 0.0474, 0.793, 0.0479 (-0=>2.663)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.55, losses: 0.861, 0.083, 0.75, 0.0482, 0.757, 0.0482 (-2=>2.536)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.41, losses: 0.812, 0.0844, 0.711, 0.0496, 0.705, 0.0487 (-2=>2.368)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.28, losses: 0.761, 0.0877, 0.662, 0.0538, 0.664, 0.0509 (-2=>2.276)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.33, losses: 0.793, 0.0868, 0.667, 0.0534, 0.68, 0.05 (-5=>2.23)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.16, losses: 0.725, 0.0875, 0.612, 0.0578, 0.626, 0.0523 (-0=>2.162)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.13, losses: 0.715, 0.0874, 0.601, 0.0588, 0.61, 0.0538 (-0=>2.126)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.12, losses: 0.71, 0.0884, 0.599, 0.0585, 0.607, 0.0546 (-2=>2.117)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.09, losses: 0.705, 0.0881, 0.592, 0.0599, 0.591, 0.0552 (-2=>2.089)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.09, losses: 0.702, 0.0885, 0.588, 0.0596, 0.594, 0.0547 (-0=>2.087)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.08, losses: 0.7, 0.0874, 0.584, 0.0601, 0.59, 0.0554 (-0=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.19, losses: 0.744, 0.0869, 0.618, 0.0577, 0.628, 0.0533 (-10=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.08, losses: 0.698, 0.0875, 0.588, 0.0592, 0.588, 0.055 (-0=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.2, losses: 0.752, 0.0879, 0.618, 0.0573, 0.628, 0.0532 (-3=>2.065)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.17, losses: 0.74, 0.0879, 0.613, 0.0576, 0.621, 0.054 (-13=>2.065)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.05, losses: 0.687, 0.0876, 0.575, 0.0612, 0.582, 0.0566 (-0=>2.051)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.06, losses: 0.696, 0.0881, 0.575, 0.061, 0.58, 0.0562 (-2=>2.044)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.04, losses: 0.683, 0.0891, 0.571, 0.0618, 0.574, 0.0569 (-0=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.05, losses: 0.687, 0.0883, 0.575, 0.0608, 0.579, 0.0561 (-10=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.04, losses: 0.683, 0.0887, 0.579, 0.0608, 0.575, 0.0566 (-20=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.15, losses: 0.731, 0.0867, 0.606, 0.0583, 0.611, 0.0543 (-2=>2.024)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.03, losses: 0.681, 0.0889, 0.568, 0.0617, 0.573, 0.0571 (-12=>2.024)
0it [00:00, ?it/s]
Dropping learning rate
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.17, losses: 0.735, 0.087, 0.615, 0.0577, 0.621, 0.0537 (-4=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.15, losses: 0.728, 0.0867, 0.61, 0.0584, 0.613, 0.0539 (-14=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.14, losses: 0.725, 0.0876, 0.603, 0.059, 0.607, 0.0547 (-24=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.02, losses: 0.679, 0.0877, 0.568, 0.0618, 0.566, 0.0576 (-7=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.1, losses: 0.712, 0.0872, 0.592, 0.0607, 0.597, 0.0554 (-17=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.15, losses: 0.729, 0.0866, 0.608, 0.0584, 0.618, 0.0542 (-27=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.16, losses: 0.723, 0.0869, 0.614, 0.0591, 0.619, 0.0542 (-1=>2.013)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-6=>2.008)
0it [00:00, ?it/s]
0it [00:00, ?it/s]
This example was created by a different version, dribnet/pixray-text2pixel:70efb5e4.
This model costs approximately $0.17 to run on Replicate, or 5 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia T4 GPU hardware. Predictions typically complete within 13 minutes.
See @pixray posts or join the project
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
---> BasePixrayPredictor Predict
Using seed:
3244908265805850817
Running pixeldrawer with 80x45 grid
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['Aliens destroying NYC skyline with lasers. #pixelart']
0it [00:00, ?it/s]
/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
iter: 0, loss: 2.98, losses: 0.98, 0.0797, 0.897, 0.0479, 0.925, 0.0489 (-0=>2.978)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.66, losses: 0.904, 0.0797, 0.791, 0.0474, 0.793, 0.0479 (-0=>2.663)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.55, losses: 0.861, 0.083, 0.75, 0.0482, 0.757, 0.0482 (-2=>2.536)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.41, losses: 0.812, 0.0844, 0.711, 0.0496, 0.705, 0.0487 (-2=>2.368)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.28, losses: 0.761, 0.0877, 0.662, 0.0538, 0.664, 0.0509 (-2=>2.276)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.33, losses: 0.793, 0.0868, 0.667, 0.0534, 0.68, 0.05 (-5=>2.23)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.16, losses: 0.725, 0.0875, 0.612, 0.0578, 0.626, 0.0523 (-0=>2.162)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.13, losses: 0.715, 0.0874, 0.601, 0.0588, 0.61, 0.0538 (-0=>2.126)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.12, losses: 0.71, 0.0884, 0.599, 0.0585, 0.607, 0.0546 (-2=>2.117)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.09, losses: 0.705, 0.0881, 0.592, 0.0599, 0.591, 0.0552 (-2=>2.089)
0it [00:00, ?it/s]
0it [00:12, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.09, losses: 0.702, 0.0885, 0.588, 0.0596, 0.594, 0.0547 (-0=>2.087)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.08, losses: 0.7, 0.0874, 0.584, 0.0601, 0.59, 0.0554 (-0=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.19, losses: 0.744, 0.0869, 0.618, 0.0577, 0.628, 0.0533 (-10=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.08, losses: 0.698, 0.0875, 0.588, 0.0592, 0.588, 0.055 (-0=>2.076)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.2, losses: 0.752, 0.0879, 0.618, 0.0573, 0.628, 0.0532 (-3=>2.065)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.17, losses: 0.74, 0.0879, 0.613, 0.0576, 0.621, 0.054 (-13=>2.065)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.05, losses: 0.687, 0.0876, 0.575, 0.0612, 0.582, 0.0566 (-0=>2.051)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.06, losses: 0.696, 0.0881, 0.575, 0.061, 0.58, 0.0562 (-2=>2.044)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.04, losses: 0.683, 0.0891, 0.571, 0.0618, 0.574, 0.0569 (-0=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.05, losses: 0.687, 0.0883, 0.575, 0.0608, 0.579, 0.0561 (-10=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.04, losses: 0.683, 0.0887, 0.579, 0.0608, 0.575, 0.0566 (-20=>2.036)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.15, losses: 0.731, 0.0867, 0.606, 0.0583, 0.611, 0.0543 (-2=>2.024)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.03, losses: 0.681, 0.0889, 0.568, 0.0617, 0.573, 0.0571 (-12=>2.024)
0it [00:00, ?it/s]
Dropping learning rate
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.17, losses: 0.735, 0.087, 0.615, 0.0577, 0.621, 0.0537 (-4=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.15, losses: 0.728, 0.0867, 0.61, 0.0584, 0.613, 0.0539 (-14=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.14, losses: 0.725, 0.0876, 0.603, 0.059, 0.607, 0.0547 (-24=>2.02)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.02, losses: 0.679, 0.0877, 0.568, 0.0618, 0.566, 0.0576 (-7=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.1, losses: 0.712, 0.0872, 0.592, 0.0607, 0.597, 0.0554 (-17=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.15, losses: 0.729, 0.0866, 0.608, 0.0584, 0.618, 0.0542 (-27=>2.015)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.16, losses: 0.723, 0.0869, 0.614, 0.0591, 0.619, 0.0542 (-1=>2.013)
0it [00:00, ?it/s]
0it [00:13, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-6=>2.008)
0it [00:00, ?it/s]
0it [00:00, ?it/s]