dribnet / pixray-pixel
Prediction
dribnet/pixray-pixel:1111780c2e988818da831f8f77bfced3a2413bb824ee4025a98908603ba97ce3IDfmduymupanauvkphdk4pszsmxqStatusSucceededSourceWebHardware–Total duration–CreatedInput
- aspect
- widescreen
- drawer
- pixel
- prompts
- computer love. #pixelart
{ "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "dribnet/pixray-pixel:1111780c2e988818da831f8f77bfced3a2413bb824ee4025a98908603ba97ce3", { input: { aspect: "widescreen", drawer: "pixel", prompts: "computer love. #pixelart" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "dribnet/pixray-pixel:1111780c2e988818da831f8f77bfced3a2413bb824ee4025a98908603ba97ce3", input={ "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" } ) # The dribnet/pixray-pixel model can stream output as it's running. # The predict method returns an iterator, and you can iterate over that output. for item in output: # https://replicate.com/dribnet/pixray-pixel/api#output-schema print(item)
To learn more, take a look at the guide on getting started with Python.
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "dribnet/pixray-pixel:1111780c2e988818da831f8f77bfced3a2413bb824ee4025a98908603ba97ce3", "input": { "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2021-10-10T14:48:22.270702Z", "created_at": "2021-10-10T14:37:09.657105Z", "data_removed": false, "error": null, "id": "fmduymupanauvkphdk4pszsmxq", "input": { "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" }, "logs": "---> BasePixrayPredictor Predict\nUsing seed:\n2998862719339201757\nRunning pixeldrawer with 80x45 grid\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['computer love. #pixelart']\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\niter: 0, loss: 2.89, losses: 0.96, 0.08, 0.877, 0.0475, 0.88, 0.0489 (-0=>2.894)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.69, losses: 0.912, 0.0782, 0.806, 0.048, 0.802, 0.0459 (-1=>2.675)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.59, losses: 0.889, 0.0802, 0.76, 0.0477, 0.772, 0.046 (-5=>2.582)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.49, losses: 0.868, 0.0814, 0.717, 0.0491, 0.729, 0.0461 (-1=>2.425)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.42, losses: 0.848, 0.0828, 0.7, 0.0507, 0.695, 0.0473 (-7=>2.356)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.4, losses: 0.834, 0.0825, 0.697, 0.0508, 0.69, 0.0476 (-1=>2.264)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.25, losses: 0.77, 0.0829, 0.656, 0.0521, 0.636, 0.0486 (-0=>2.245)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.37, losses: 0.813, 0.0826, 0.689, 0.0519, 0.681, 0.048 (-6=>2.232)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.34, losses: 0.804, 0.0838, 0.681, 0.0516, 0.675, 0.0479 (-5=>2.199)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.31, losses: 0.793, 0.0836, 0.673, 0.0522, 0.657, 0.0488 (-15=>2.199)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.2, losses: 0.754, 0.0838, 0.642, 0.0528, 0.623, 0.0497 (-2=>2.196)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.18, losses: 0.746, 0.0841, 0.636, 0.0539, 0.614, 0.0495 (-0=>2.184)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.19, losses: 0.751, 0.0839, 0.636, 0.0526, 0.615, 0.0495 (-9=>2.183)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.26, losses: 0.788, 0.0847, 0.648, 0.0537, 0.638, 0.0492 (-5=>2.16)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.25, losses: 0.777, 0.0837, 0.649, 0.0538, 0.636, 0.0497 (-15=>2.16)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.29, losses: 0.79, 0.0839, 0.661, 0.0522, 0.653, 0.0489 (-25=>2.16)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.28, losses: 0.788, 0.0835, 0.66, 0.0525, 0.65, 0.0487 (-35=>2.16)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.17, losses: 0.74, 0.0843, 0.631, 0.0541, 0.608, 0.05 (-1=>2.153)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.27, losses: 0.782, 0.0843, 0.658, 0.0532, 0.643, 0.0492 (-11=>2.153)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.25, losses: 0.775, 0.0836, 0.651, 0.0536, 0.637, 0.0496 (-3=>2.144)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.26, losses: 0.781, 0.0842, 0.654, 0.0535, 0.64, 0.0495 (-13=>2.144)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.26, losses: 0.778, 0.0839, 0.658, 0.0535, 0.641, 0.0496 (-23=>2.144)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.16, losses: 0.734, 0.0835, 0.627, 0.0538, 0.608, 0.0502 (-3=>2.14)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.26, losses: 0.777, 0.0836, 0.654, 0.0531, 0.639, 0.0496 (-5=>2.135)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.26, losses: 0.78, 0.0842, 0.659, 0.0535, 0.639, 0.05 (-15=>2.135)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.16, losses: 0.739, 0.0838, 0.628, 0.0541, 0.606, 0.0498 (-25=>2.135)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.16, losses: 0.74, 0.0836, 0.624, 0.0551, 0.606, 0.0503 (-35=>2.135)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.28, losses: 0.789, 0.0844, 0.654, 0.0536, 0.645, 0.0499 (-6=>2.13)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.3, losses: 0.789, 0.0841, 0.668, 0.0533, 0.655, 0.0494 (-16=>2.13)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.23, losses: 0.765, 0.084, 0.647, 0.0542, 0.629, 0.0502 (-26=>2.13)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, loss: 2.23, losses: 0.767, 0.0841, 0.642, 0.054, 0.629, 0.0499 (-0=>2.226)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 310, loss: 2.26, losses: 0.773, 0.0835, 0.655, 0.054, 0.641, 0.0504 (-4=>2.133)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 320, loss: 2.25, losses: 0.776, 0.0846, 0.654, 0.0541, 0.636, 0.0501 (-14=>2.133)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 330, loss: 2.24, losses: 0.77, 0.0848, 0.651, 0.0542, 0.631, 0.0502 (-5=>2.131)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 340, loss: 2.27, losses: 0.779, 0.0843, 0.658, 0.0534, 0.644, 0.0502 (-1=>2.106)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 350, loss: 2.21, losses: 0.759, 0.0845, 0.639, 0.0543, 0.622, 0.0506 (-11=>2.106)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 360, loss: 2.2, losses: 0.752, 0.0846, 0.635, 0.0552, 0.618, 0.0504 (-21=>2.106)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 370, loss: 2.22, losses: 0.767, 0.0848, 0.639, 0.0545, 0.628, 0.0501 (-31=>2.106)\n\n0it [00:00, ?it/s]\n---> BasePixrayPredictor Predict\nUsing seed:\n7658913081584254051\nRunning pixeldrawer with 80x45 grid\nAll CLIP models already loaded:\n['RN50', 'ViT-B/32', 'ViT-B/16']\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 380, loss: 2.23, losses: 0.77, 0.0846, 0.642, 0.0546, 0.628, 0.0505 (-41=>2.106)\n\n0it [00:00, ?it/s]\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['computer love. #pixelart']\n\n0it [00:00, ?it/s]\niter: 0, loss: 2.88, losses: 0.957, 0.0806, 0.874, 0.0471, 0.875, 0.0487 (-0=>2.883)\n\n0it [00:00, ?it/s]\n\n0it [00:14, ?it/s]\n\n0it [00:00, ?it/s]\niter: 390, loss: 2.13, losses: 0.723, 0.0854, 0.619, 0.0553, 0.597, 0.0505 (-7=>2.105)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.7, losses: 0.912, 0.0767, 0.803, 0.0472, 0.813, 0.0451 (-1=>2.671)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 400, finished (-17=>2.105)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]", "metrics": {}, "output": [ { "file": "https://replicate.delivery/mgxm/7902e4d4-2c3c-4f1c-b0e7-bca37421e564/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/9249686d-beb6-47f5-aa07-3e042857e45e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cb7e3150-7a83-4a45-b4a4-b49b5fdc46a0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e96f06e5-e4cb-4bc7-b822-6fe1efec86ab/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2fc8e3f6-a98a-47c6-887f-9eccdf2280c2/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/6e73a197-1405-49fc-a555-01c7edfe5d50/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b1bb5f8b-7ad8-4bd5-bd95-0ce6ba14a6c5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b8d394ed-c4a6-4cc0-91c2-8181f7fd5271/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/034f9ac5-63a8-4a59-bcc4-54c92a8400d6/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/17dab3a3-5fca-4837-a38f-bbd6009ab31a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/acea650f-9076-4251-b498-5f8338c4b63a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/495d5a40-1632-4f6c-b082-779c007fd8d2/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e5d20e18-e22f-4c8f-b9d9-a4ab1fed6ccc/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ad143a0f-74a5-4ec9-bd68-514e2d126e0f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2e18765b-b455-440e-974b-b281922f4067/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/631af4fb-3780-4472-9784-795ce58c9fff/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c6695315-d65d-4cda-9c3c-f52a6938d0f0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/f66e0f5d-aba8-49ca-abbb-b6ccc1da7892/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/00f2b1f9-8331-410e-9c11-02706ffd88da/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ec2db2f3-6a44-41f5-b4ad-2ac6df693de9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/69cb9d6a-6e58-4f13-a1ca-843871b012f8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b0457c70-da78-4208-a5b1-49b8581cd630/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/55cbccf0-dfbc-4d1a-82a9-0a28e9f0267e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/06b1c3d7-1be8-4296-86cc-45f648b6c8bb/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1fe17aab-e859-4206-bbdc-2152c9f45a93/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/dded31b1-4aaa-4c6b-b787-9466cebeacfd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cc4edf4f-438c-49e9-88fb-784ca2efd6c8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/f5ead865-8bbf-4901-9c76-e70758192752/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/42bc78db-3486-4256-9ee2-10115e4a1734/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d6df0748-fa9c-4e0c-8b75-5123ab1e7bc0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cf23d441-47ff-44ad-bebd-2d4d5f2f64dd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/753aaef1-1aa9-43c0-b00e-9b0121d76c92/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8410f746-4b08-482c-bbe8-eee7eaa013ee/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/48a4aed4-1bd0-4686-9b31-07615ba64ea9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/3d51ef61-72ed-4cd3-854b-e7ff867b8bd3/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/59d3ef5c-f4e4-4b57-99c2-85a992fd4e51/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/5d25bbb7-6021-4cff-8059-4bb6da9e2c0f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/f90b82af-37c8-4e4f-98b1-597ea6347140/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c7308751-bdf4-4aca-9e85-021ba3f2aa53/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/da985661-eb32-4023-884f-b4f7331e51b4/tempfile.png" } ], "started_at": null, "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/fmduymupanauvkphdk4pszsmxq", "cancel": "https://api.replicate.com/v1/predictions/fmduymupanauvkphdk4pszsmxq/cancel" }, "version": "1111780c2e988818da831f8f77bfced3a2413bb824ee4025a98908603ba97ce3" }
---> BasePixrayPredictor Predict Using seed: 2998862719339201757 Running pixeldrawer with 80x45 grid Using device: cuda:0 Optimising using: Adam Using text prompts: ['computer love. #pixelart'] 0it [00:00, ?it/s] /root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details. warnings.warn( iter: 0, loss: 2.89, losses: 0.96, 0.08, 0.877, 0.0475, 0.88, 0.0489 (-0=>2.894) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.69, losses: 0.912, 0.0782, 0.806, 0.048, 0.802, 0.0459 (-1=>2.675) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 20, loss: 2.59, losses: 0.889, 0.0802, 0.76, 0.0477, 0.772, 0.046 (-5=>2.582) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 30, loss: 2.49, losses: 0.868, 0.0814, 0.717, 0.0491, 0.729, 0.0461 (-1=>2.425) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 40, loss: 2.42, losses: 0.848, 0.0828, 0.7, 0.0507, 0.695, 0.0473 (-7=>2.356) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 50, loss: 2.4, losses: 0.834, 0.0825, 0.697, 0.0508, 0.69, 0.0476 (-1=>2.264) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 60, loss: 2.25, losses: 0.77, 0.0829, 0.656, 0.0521, 0.636, 0.0486 (-0=>2.245) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 70, loss: 2.37, losses: 0.813, 0.0826, 0.689, 0.0519, 0.681, 0.048 (-6=>2.232) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 80, loss: 2.34, losses: 0.804, 0.0838, 0.681, 0.0516, 0.675, 0.0479 (-5=>2.199) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 90, loss: 2.31, losses: 0.793, 0.0836, 0.673, 0.0522, 0.657, 0.0488 (-15=>2.199) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 100, loss: 2.2, losses: 0.754, 0.0838, 0.642, 0.0528, 0.623, 0.0497 (-2=>2.196) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 110, loss: 2.18, losses: 0.746, 0.0841, 0.636, 0.0539, 0.614, 0.0495 (-0=>2.184) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 120, loss: 2.19, losses: 0.751, 0.0839, 0.636, 0.0526, 0.615, 0.0495 (-9=>2.183) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 130, loss: 2.26, losses: 0.788, 0.0847, 0.648, 0.0537, 0.638, 0.0492 (-5=>2.16) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 140, loss: 2.25, losses: 0.777, 0.0837, 0.649, 0.0538, 0.636, 0.0497 (-15=>2.16) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 150, loss: 2.29, losses: 0.79, 0.0839, 0.661, 0.0522, 0.653, 0.0489 (-25=>2.16) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 160, loss: 2.28, losses: 0.788, 0.0835, 0.66, 0.0525, 0.65, 0.0487 (-35=>2.16) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 170, loss: 2.17, losses: 0.74, 0.0843, 0.631, 0.0541, 0.608, 0.05 (-1=>2.153) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 180, loss: 2.27, losses: 0.782, 0.0843, 0.658, 0.0532, 0.643, 0.0492 (-11=>2.153) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 190, loss: 2.25, losses: 0.775, 0.0836, 0.651, 0.0536, 0.637, 0.0496 (-3=>2.144) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 200, loss: 2.26, losses: 0.781, 0.0842, 0.654, 0.0535, 0.64, 0.0495 (-13=>2.144) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 210, loss: 2.26, losses: 0.778, 0.0839, 0.658, 0.0535, 0.641, 0.0496 (-23=>2.144) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 220, loss: 2.16, losses: 0.734, 0.0835, 0.627, 0.0538, 0.608, 0.0502 (-3=>2.14) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 230, loss: 2.26, losses: 0.777, 0.0836, 0.654, 0.0531, 0.639, 0.0496 (-5=>2.135) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 240, loss: 2.26, losses: 0.78, 0.0842, 0.659, 0.0535, 0.639, 0.05 (-15=>2.135) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 250, loss: 2.16, losses: 0.739, 0.0838, 0.628, 0.0541, 0.606, 0.0498 (-25=>2.135) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 260, loss: 2.16, losses: 0.74, 0.0836, 0.624, 0.0551, 0.606, 0.0503 (-35=>2.135) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 270, loss: 2.28, losses: 0.789, 0.0844, 0.654, 0.0536, 0.645, 0.0499 (-6=>2.13) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 280, loss: 2.3, losses: 0.789, 0.0841, 0.668, 0.0533, 0.655, 0.0494 (-16=>2.13) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 290, loss: 2.23, losses: 0.765, 0.084, 0.647, 0.0542, 0.629, 0.0502 (-26=>2.13) 0it [00:00, ?it/s] Dropping learning rate 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 300, loss: 2.23, losses: 0.767, 0.0841, 0.642, 0.054, 0.629, 0.0499 (-0=>2.226) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 310, loss: 2.26, losses: 0.773, 0.0835, 0.655, 0.054, 0.641, 0.0504 (-4=>2.133) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 320, loss: 2.25, losses: 0.776, 0.0846, 0.654, 0.0541, 0.636, 0.0501 (-14=>2.133) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 330, loss: 2.24, losses: 0.77, 0.0848, 0.651, 0.0542, 0.631, 0.0502 (-5=>2.131) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 340, loss: 2.27, losses: 0.779, 0.0843, 0.658, 0.0534, 0.644, 0.0502 (-1=>2.106) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 350, loss: 2.21, losses: 0.759, 0.0845, 0.639, 0.0543, 0.622, 0.0506 (-11=>2.106) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 360, loss: 2.2, losses: 0.752, 0.0846, 0.635, 0.0552, 0.618, 0.0504 (-21=>2.106) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 370, loss: 2.22, losses: 0.767, 0.0848, 0.639, 0.0545, 0.628, 0.0501 (-31=>2.106) 0it [00:00, ?it/s] ---> BasePixrayPredictor Predict Using seed: 7658913081584254051 Running pixeldrawer with 80x45 grid All CLIP models already loaded: ['RN50', 'ViT-B/32', 'ViT-B/16'] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 380, loss: 2.23, losses: 0.77, 0.0846, 0.642, 0.0546, 0.628, 0.0505 (-41=>2.106) 0it [00:00, ?it/s] Using device: cuda:0 Optimising using: Adam Using text prompts: ['computer love. #pixelart'] 0it [00:00, ?it/s] iter: 0, loss: 2.88, losses: 0.957, 0.0806, 0.874, 0.0471, 0.875, 0.0487 (-0=>2.883) 0it [00:00, ?it/s] 0it [00:14, ?it/s] 0it [00:00, ?it/s] iter: 390, loss: 2.13, losses: 0.723, 0.0854, 0.619, 0.0553, 0.597, 0.0505 (-7=>2.105) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.7, losses: 0.912, 0.0767, 0.803, 0.0472, 0.813, 0.0451 (-1=>2.671) 0it [00:00, ?it/s] 0it [00:15, ?it/s] 0it [00:00, ?it/s] iter: 400, finished (-17=>2.105) 0it [00:00, ?it/s] 0it [00:00, ?it/s]
Prediction
dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529beInput
- aspect
- widescreen
- drawer
- pixel
- prompts
- computer love. #pixelart
{ "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", { input: { aspect: "widescreen", drawer: "pixel", prompts: "computer love. #pixelart" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", input={ "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" } ) # The dribnet/pixray-pixel model can stream output as it's running. # The predict method returns an iterator, and you can iterate over that output. for item in output: # https://replicate.com/dribnet/pixray-pixel/api#output-schema print(item)
To learn more, take a look at the guide on getting started with Python.
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", "input": { "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2021-10-10T15:05:05.636937Z", "created_at": "2021-10-10T14:57:10.567190Z", "data_removed": false, "error": null, "id": "6defoetqx5b65nfxryqxabirdq", "input": { "aspect": "widescreen", "drawer": "pixel", "prompts": "computer love. #pixelart" }, "logs": "---> BasePixrayPredictor Predict\nUsing seed:\n10291670960213269173\nRunning pixeldrawer with 80x45 grid\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['computer love. #pixelart']\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\niter: 0, loss: 2.89, losses: 0.958, 0.0798, 0.878, 0.0475, 0.88, 0.0492 (-0=>2.892)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.7, losses: 0.909, 0.077, 0.81, 0.0498, 0.811, 0.0473 (-0=>2.704)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.65, losses: 0.9, 0.0796, 0.779, 0.048, 0.793, 0.0467 (-5=>2.615)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.54, losses: 0.878, 0.0804, 0.734, 0.0496, 0.747, 0.0461 (-4=>2.527)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.36, losses: 0.819, 0.0808, 0.68, 0.0522, 0.68, 0.0491 (-0=>2.361)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.39, losses: 0.833, 0.0826, 0.69, 0.0507, 0.688, 0.048 (-3=>2.3)\n\n0it [00:00, ?it/s]\n\n0it [00:12, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.27, losses: 0.781, 0.0831, 0.659, 0.0529, 0.642, 0.0495 (-1=>2.261)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.4, losses: 0.83, 0.0838, 0.695, 0.0513, 0.689, 0.048 (-3=>2.238)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.22, losses: 0.762, 0.0834, 0.646, 0.0533, 0.628, 0.0495 (-0=>2.222)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.32, losses: 0.81, 0.0846, 0.671, 0.052, 0.659, 0.049 (-1=>2.192)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.33, losses: 0.814, 0.0833, 0.67, 0.052, 0.662, 0.0484 (-3=>2.189)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.29, losses: 0.791, 0.0844, 0.664, 0.0527, 0.652, 0.0496 (-3=>2.167)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.18, losses: 0.747, 0.0832, 0.633, 0.0539, 0.611, 0.0508 (-13=>2.167)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.3, losses: 0.796, 0.0839, 0.671, 0.053, 0.651, 0.0494 (-23=>2.167)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.29, losses: 0.797, 0.084, 0.659, 0.0529, 0.644, 0.0493 (-33=>2.167)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.32, losses: 0.802, 0.0833, 0.672, 0.0534, 0.659, 0.0493 (-3=>2.159)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.31, losses: 0.796, 0.083, 0.667, 0.0527, 0.658, 0.0489 (-13=>2.159)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.25, losses: 0.777, 0.0849, 0.649, 0.0543, 0.635, 0.05 (-23=>2.159)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.17, losses: 0.751, 0.0836, 0.628, 0.0547, 0.605, 0.051 (-33=>2.159)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.18, losses: 0.751, 0.0833, 0.636, 0.0535, 0.61, 0.0507 (-9=>2.152)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.27, losses: 0.785, 0.083, 0.657, 0.053, 0.646, 0.0492 (-19=>2.152)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.29, losses: 0.79, 0.0839, 0.663, 0.0534, 0.653, 0.0493 (-1=>2.137)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.25, losses: 0.775, 0.0847, 0.648, 0.0546, 0.637, 0.0497 (-5=>2.135)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.23, losses: 0.767, 0.0848, 0.647, 0.0546, 0.627, 0.0505 (-4=>2.167)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.3, losses: 0.798, 0.0824, 0.662, 0.053, 0.653, 0.0489 (-1=>2.133)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.26, losses: 0.78, 0.0838, 0.651, 0.0544, 0.639, 0.0495 (-1=>2.131)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.22, losses: 0.764, 0.0849, 0.64, 0.055, 0.622, 0.0505 (-9=>2.122)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.28, losses: 0.791, 0.083, 0.659, 0.0542, 0.646, 0.0495 (-3=>2.118)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.25, losses: 0.773, 0.0848, 0.654, 0.0543, 0.638, 0.0506 (-13=>2.118)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.25, losses: 0.779, 0.085, 0.646, 0.0544, 0.632, 0.0496 (-23=>2.118)\n\n0it [00:00, ?it/s]\n\n0it [00:13, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-5=>2.117)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]", "metrics": { "total_time": 475.069747 }, "output": [ { "file": "https://replicate.delivery/mgxm/27104066-d53e-416e-8bed-33ed9abcb85e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e3c42482-9329-48a6-97a9-657edb9da42d/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/86c39abf-b453-4fed-b316-5a252f4a758a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2a81654e-6a90-4336-b901-72a8cb4d6e78/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cc50d190-b4a8-4814-bf14-3dbbad584977/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c694646a-a541-4270-967e-b37b6953d587/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8286480a-d2db-43f5-8b8e-7f36f7ee2eb7/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/120e0bab-33b8-4e6c-9f35-c6bd3f345c9a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/bb94e195-a213-4317-99cf-4e7a7d404ad8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e55c1d86-b394-4426-ab44-936f4399bc94/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/700a9589-2b40-4231-8e41-8454d5ec6bae/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/da3e34e5-1ecd-4042-8adb-8f4b00e10706/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/490393a1-00eb-40fa-ae09-e345a8dcc3be/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2ba593c9-c43b-4be3-92f1-3cd431f122b0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cdff67f6-0dfc-4b48-bb01-ae2c9799e8d6/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ad7c31b4-8ab1-4a69-8593-739dafe8a526/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/9163bffe-6d77-4ebb-9491-0986a24f84d9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1683882b-1234-4759-a7b8-47059f8d7f7d/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e6bc1ce2-5d9f-4069-ad34-6f91a2f995a5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/7fe0a4b0-ce5c-496a-9f1d-7d168f041a53/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/beded07e-adad-4e87-b4eb-e265383779b8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/308d56c8-4185-4df7-b7d7-440998274b48/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1f724770-e74e-43c0-9fc8-dd49add9db97/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ec5678a0-c5d5-46d5-9f90-ef5ffca26dd6/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/84a686f4-8efd-416d-9547-b048248d0df9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ffcae656-5261-437c-8df8-bf0590566d63/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/4cdb31c8-ecbc-4239-936b-3b791ea27ff6/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/aab642e8-9e01-4d06-b318-025bca3c743f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2c93c4e6-7660-456b-a7cc-747fbbc23532/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/4039227c-199b-470d-a2cc-a9ec561109b6/tempfile.png" } ], "started_at": "2021-11-30T18:44:48.137429Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/6defoetqx5b65nfxryqxabirdq", "cancel": "https://api.replicate.com/v1/predictions/6defoetqx5b65nfxryqxabirdq/cancel" }, "version": "8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be" }
---> BasePixrayPredictor Predict Using seed: 10291670960213269173 Running pixeldrawer with 80x45 grid Using device: cuda:0 Optimising using: Adam Using text prompts: ['computer love. #pixelart'] 0it [00:00, ?it/s] /root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details. warnings.warn( iter: 0, loss: 2.89, losses: 0.958, 0.0798, 0.878, 0.0475, 0.88, 0.0492 (-0=>2.892) 0it [00:00, ?it/s] 0it [00:12, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.7, losses: 0.909, 0.077, 0.81, 0.0498, 0.811, 0.0473 (-0=>2.704) 0it [00:00, ?it/s] 0it [00:12, ?it/s] 0it [00:00, ?it/s] iter: 20, loss: 2.65, losses: 0.9, 0.0796, 0.779, 0.048, 0.793, 0.0467 (-5=>2.615) 0it [00:00, ?it/s] 0it [00:12, ?it/s] 0it [00:00, ?it/s] iter: 30, loss: 2.54, losses: 0.878, 0.0804, 0.734, 0.0496, 0.747, 0.0461 (-4=>2.527) 0it [00:00, ?it/s] 0it [00:12, ?it/s] 0it [00:00, ?it/s] iter: 40, loss: 2.36, losses: 0.819, 0.0808, 0.68, 0.0522, 0.68, 0.0491 (-0=>2.361) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 50, loss: 2.39, losses: 0.833, 0.0826, 0.69, 0.0507, 0.688, 0.048 (-3=>2.3) 0it [00:00, ?it/s] 0it [00:12, ?it/s] 0it [00:00, ?it/s] iter: 60, loss: 2.27, losses: 0.781, 0.0831, 0.659, 0.0529, 0.642, 0.0495 (-1=>2.261) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 70, loss: 2.4, losses: 0.83, 0.0838, 0.695, 0.0513, 0.689, 0.048 (-3=>2.238) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 80, loss: 2.22, losses: 0.762, 0.0834, 0.646, 0.0533, 0.628, 0.0495 (-0=>2.222) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 90, loss: 2.32, losses: 0.81, 0.0846, 0.671, 0.052, 0.659, 0.049 (-1=>2.192) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 100, loss: 2.33, losses: 0.814, 0.0833, 0.67, 0.052, 0.662, 0.0484 (-3=>2.189) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 110, loss: 2.29, losses: 0.791, 0.0844, 0.664, 0.0527, 0.652, 0.0496 (-3=>2.167) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 120, loss: 2.18, losses: 0.747, 0.0832, 0.633, 0.0539, 0.611, 0.0508 (-13=>2.167) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 130, loss: 2.3, losses: 0.796, 0.0839, 0.671, 0.053, 0.651, 0.0494 (-23=>2.167) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 140, loss: 2.29, losses: 0.797, 0.084, 0.659, 0.0529, 0.644, 0.0493 (-33=>2.167) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 150, loss: 2.32, losses: 0.802, 0.0833, 0.672, 0.0534, 0.659, 0.0493 (-3=>2.159) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 160, loss: 2.31, losses: 0.796, 0.083, 0.667, 0.0527, 0.658, 0.0489 (-13=>2.159) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 170, loss: 2.25, losses: 0.777, 0.0849, 0.649, 0.0543, 0.635, 0.05 (-23=>2.159) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 180, loss: 2.17, losses: 0.751, 0.0836, 0.628, 0.0547, 0.605, 0.051 (-33=>2.159) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 190, loss: 2.18, losses: 0.751, 0.0833, 0.636, 0.0535, 0.61, 0.0507 (-9=>2.152) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 200, loss: 2.27, losses: 0.785, 0.083, 0.657, 0.053, 0.646, 0.0492 (-19=>2.152) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 210, loss: 2.29, losses: 0.79, 0.0839, 0.663, 0.0534, 0.653, 0.0493 (-1=>2.137) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 220, loss: 2.25, losses: 0.775, 0.0847, 0.648, 0.0546, 0.637, 0.0497 (-5=>2.135) 0it [00:00, ?it/s] Dropping learning rate 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 230, loss: 2.23, losses: 0.767, 0.0848, 0.647, 0.0546, 0.627, 0.0505 (-4=>2.167) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 240, loss: 2.3, losses: 0.798, 0.0824, 0.662, 0.053, 0.653, 0.0489 (-1=>2.133) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 250, loss: 2.26, losses: 0.78, 0.0838, 0.651, 0.0544, 0.639, 0.0495 (-1=>2.131) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 260, loss: 2.22, losses: 0.764, 0.0849, 0.64, 0.055, 0.622, 0.0505 (-9=>2.122) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 270, loss: 2.28, losses: 0.791, 0.083, 0.659, 0.0542, 0.646, 0.0495 (-3=>2.118) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 280, loss: 2.25, losses: 0.773, 0.0848, 0.654, 0.0543, 0.638, 0.0506 (-13=>2.118) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 290, loss: 2.25, losses: 0.779, 0.085, 0.646, 0.0544, 0.632, 0.0496 (-23=>2.118) 0it [00:00, ?it/s] 0it [00:13, ?it/s] 0it [00:00, ?it/s] iter: 300, finished (-5=>2.117) 0it [00:00, ?it/s] 0it [00:00, ?it/s]
Prediction
dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529beID4zeyg4tz5rcltfdmxr4nur5nvyStatusSucceededSourceWebHardware–Total duration–CreatedInput
- aspect
- widescreen
- drawer
- line_sketch
- prompts
- A pear on a plate.
{ "aspect": "widescreen", "drawer": "line_sketch", "prompts": "A pear on a plate." }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", { input: { aspect: "widescreen", drawer: "line_sketch", prompts: "A pear on a plate." } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", input={ "aspect": "widescreen", "drawer": "line_sketch", "prompts": "A pear on a plate." } ) # The dribnet/pixray-pixel model can stream output as it's running. # The predict method returns an iterator, and you can iterate over that output. for item in output: # https://replicate.com/dribnet/pixray-pixel/api#output-schema print(item)
To learn more, take a look at the guide on getting started with Python.
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", "input": { "aspect": "widescreen", "drawer": "line_sketch", "prompts": "A pear on a plate." } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2021-10-10T15:06:09.579643Z", "created_at": "2021-10-10T15:01:05.841067Z", "data_removed": false, "error": null, "id": "4zeyg4tz5rcltfdmxr4nur5nvy", "input": { "aspect": "widescreen", "drawer": "line_sketch", "prompts": "A pear on a plate." }, "logs": "---> BasePixrayPredictor Predict\nUsing seed:\n16682679504546433992\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['A pear on a plate.']\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\niter: 0, loss: 3.09, losses: 1.01, 0.0752, 0.954, 0.0442, 0.964, 0.0471 (-0=>3.095)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.89, losses: 0.953, 0.076, 0.892, 0.0478, 0.875, 0.0484 (-1=>2.883)\n\n0it [00:00, ?it/s]\n\n0it [00:05, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.85, losses: 0.935, 0.0756, 0.889, 0.0477, 0.859, 0.0487 (-1=>2.832)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.78, losses: 0.916, 0.0757, 0.861, 0.048, 0.828, 0.0491 (-4=>2.732)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.78, losses: 0.909, 0.0763, 0.864, 0.0483, 0.834, 0.0495 (-3=>2.708)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.76, losses: 0.909, 0.0768, 0.855, 0.0481, 0.825, 0.0495 (-8=>2.69)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.67, losses: 0.877, 0.0735, 0.825, 0.0486, 0.797, 0.0504 (-2=>2.657)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.72, losses: 0.9, 0.0765, 0.836, 0.049, 0.804, 0.0499 (-1=>2.636)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.71, losses: 0.894, 0.0761, 0.831, 0.0493, 0.811, 0.0506 (-2=>2.633)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.61, losses: 0.868, 0.0747, 0.79, 0.0498, 0.776, 0.0519 (-5=>2.607)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.68, losses: 0.891, 0.0761, 0.814, 0.0491, 0.796, 0.051 (-8=>2.606)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.67, losses: 0.884, 0.0762, 0.813, 0.0494, 0.8, 0.0509 (-18=>2.606)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.67, losses: 0.888, 0.0761, 0.813, 0.0498, 0.794, 0.0513 (-3=>2.578)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.68, losses: 0.884, 0.0758, 0.817, 0.0494, 0.799, 0.0511 (-4=>2.57)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.59, losses: 0.856, 0.0755, 0.785, 0.0505, 0.768, 0.051 (-14=>2.57)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.68, losses: 0.884, 0.0759, 0.815, 0.0494, 0.801, 0.0514 (-1=>2.568)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.65, losses: 0.877, 0.0769, 0.811, 0.0493, 0.789, 0.0514 (-11=>2.568)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.65, losses: 0.878, 0.077, 0.806, 0.0503, 0.785, 0.0515 (-7=>2.548)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.64, losses: 0.875, 0.0778, 0.8, 0.0501, 0.781, 0.0515 (-7=>2.531)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.63, losses: 0.87, 0.0765, 0.794, 0.0511, 0.783, 0.051 (-17=>2.531)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.57, losses: 0.852, 0.0762, 0.777, 0.0513, 0.765, 0.052 (-27=>2.531)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.65, losses: 0.878, 0.0766, 0.805, 0.0495, 0.786, 0.0508 (-37=>2.531)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.57, losses: 0.851, 0.0741, 0.774, 0.0512, 0.765, 0.0524 (-47=>2.531)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.63, losses: 0.878, 0.0769, 0.788, 0.0508, 0.782, 0.0519 (-1=>2.622)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.63, losses: 0.869, 0.0771, 0.794, 0.0508, 0.783, 0.0519 (-7=>2.511)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.58, losses: 0.861, 0.0754, 0.777, 0.051, 0.765, 0.0523 (-17=>2.511)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.6, losses: 0.865, 0.0761, 0.787, 0.0501, 0.765, 0.0522 (-5=>2.506)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.51, losses: 0.827, 0.0747, 0.761, 0.0509, 0.747, 0.0529 (-15=>2.506)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.51, losses: 0.835, 0.0739, 0.754, 0.0528, 0.743, 0.0527 (-7=>2.494)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.62, losses: 0.87, 0.0764, 0.793, 0.0491, 0.778, 0.0515 (-17=>2.494)\n\n0it [00:00, ?it/s]\n\n0it [00:07, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-27=>2.494)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]", "metrics": { "total_time": 303.738576 }, "output": [ { "file": "https://replicate.delivery/mgxm/eef8edeb-f447-497f-832d-792f56248b81/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/9eb1531e-2e6c-4ada-b91a-c55ec6bb4acf/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/71f0d129-cd41-4b0f-b4cc-69ddcedf506a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/f082c1fd-fc83-42da-8d77-7a7c794cc9ab/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/dab5065c-2363-4631-9ce4-6cfbf70c1f5d/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cc6fa7ef-9ea9-42c3-a999-baabb5bc2aa5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ab520cfe-583b-4ca6-98b8-0730be60e601/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2ea96b33-ed76-4645-824c-5d742b736a61/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d932310d-774e-48de-b733-f8404e6fb5cd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c3176612-c092-4a3c-9664-5785607ca2cc/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/416c745f-79c8-4c49-a6b4-76c6d9dee7a7/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e091632a-94ca-410d-b946-03c29ba9fff2/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/51d2fb0f-16f5-4a68-b101-9a68134520da/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d38a1d4e-7749-4039-8a86-3fba7e3241cc/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/af7d02cf-f012-412b-bbbe-e3e932a5dfa3/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8664fb75-1b9f-49f0-967c-e8be5f8bca8f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c878be01-8205-4946-873d-3acee211ea51/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1c9a9d1b-74dd-488b-9c9b-70c865d00845/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/3c4a7e5c-cc96-495f-99d4-19c8edd32620/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/bf56cade-f66c-4b2e-865b-658b74b8b9b8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/72ef5a0f-f4e9-424b-b7dd-9d6e20b3c9b5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/3b6b28a5-7334-4831-b80b-9ef9c11f7aaa/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e10b41f8-7dd0-4dae-97b2-bde1b2e24878/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8fd3622b-60c8-4f37-9ea6-e8c8cb50549e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/10617ecd-2310-42e4-9b4a-de9994ef0fc2/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/a45489ab-ac73-4be4-8eae-ad44c691b5b6/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/fa1d9c7c-1ae1-4e32-8fd3-9a399f00db02/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/f9cc1b4d-785b-46fc-8dd2-ead79c249e0c/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2e43c1e2-631d-4251-8e0c-40e2b23eb0c0/tempfile.png" } ], "started_at": "2021-12-11T15:45:45.234661Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/4zeyg4tz5rcltfdmxr4nur5nvy", "cancel": "https://api.replicate.com/v1/predictions/4zeyg4tz5rcltfdmxr4nur5nvy/cancel" }, "version": "8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be" }
---> BasePixrayPredictor Predict Using seed: 16682679504546433992 Using device: cuda:0 Optimising using: Adam Using text prompts: ['A pear on a plate.'] 0it [00:00, ?it/s] /root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details. warnings.warn( iter: 0, loss: 3.09, losses: 1.01, 0.0752, 0.954, 0.0442, 0.964, 0.0471 (-0=>3.095) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.89, losses: 0.953, 0.076, 0.892, 0.0478, 0.875, 0.0484 (-1=>2.883) 0it [00:00, ?it/s] 0it [00:05, ?it/s] 0it [00:00, ?it/s] iter: 20, loss: 2.85, losses: 0.935, 0.0756, 0.889, 0.0477, 0.859, 0.0487 (-1=>2.832) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 30, loss: 2.78, losses: 0.916, 0.0757, 0.861, 0.048, 0.828, 0.0491 (-4=>2.732) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 40, loss: 2.78, losses: 0.909, 0.0763, 0.864, 0.0483, 0.834, 0.0495 (-3=>2.708) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 50, loss: 2.76, losses: 0.909, 0.0768, 0.855, 0.0481, 0.825, 0.0495 (-8=>2.69) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 60, loss: 2.67, losses: 0.877, 0.0735, 0.825, 0.0486, 0.797, 0.0504 (-2=>2.657) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 70, loss: 2.72, losses: 0.9, 0.0765, 0.836, 0.049, 0.804, 0.0499 (-1=>2.636) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 80, loss: 2.71, losses: 0.894, 0.0761, 0.831, 0.0493, 0.811, 0.0506 (-2=>2.633) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 90, loss: 2.61, losses: 0.868, 0.0747, 0.79, 0.0498, 0.776, 0.0519 (-5=>2.607) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 100, loss: 2.68, losses: 0.891, 0.0761, 0.814, 0.0491, 0.796, 0.051 (-8=>2.606) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 110, loss: 2.67, losses: 0.884, 0.0762, 0.813, 0.0494, 0.8, 0.0509 (-18=>2.606) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 120, loss: 2.67, losses: 0.888, 0.0761, 0.813, 0.0498, 0.794, 0.0513 (-3=>2.578) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 130, loss: 2.68, losses: 0.884, 0.0758, 0.817, 0.0494, 0.799, 0.0511 (-4=>2.57) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 140, loss: 2.59, losses: 0.856, 0.0755, 0.785, 0.0505, 0.768, 0.051 (-14=>2.57) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 150, loss: 2.68, losses: 0.884, 0.0759, 0.815, 0.0494, 0.801, 0.0514 (-1=>2.568) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 160, loss: 2.65, losses: 0.877, 0.0769, 0.811, 0.0493, 0.789, 0.0514 (-11=>2.568) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 170, loss: 2.65, losses: 0.878, 0.077, 0.806, 0.0503, 0.785, 0.0515 (-7=>2.548) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 180, loss: 2.64, losses: 0.875, 0.0778, 0.8, 0.0501, 0.781, 0.0515 (-7=>2.531) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 190, loss: 2.63, losses: 0.87, 0.0765, 0.794, 0.0511, 0.783, 0.051 (-17=>2.531) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 200, loss: 2.57, losses: 0.852, 0.0762, 0.777, 0.0513, 0.765, 0.052 (-27=>2.531) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 210, loss: 2.65, losses: 0.878, 0.0766, 0.805, 0.0495, 0.786, 0.0508 (-37=>2.531) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 220, loss: 2.57, losses: 0.851, 0.0741, 0.774, 0.0512, 0.765, 0.0524 (-47=>2.531) 0it [00:00, ?it/s] Dropping learning rate 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 230, loss: 2.63, losses: 0.878, 0.0769, 0.788, 0.0508, 0.782, 0.0519 (-1=>2.622) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 240, loss: 2.63, losses: 0.869, 0.0771, 0.794, 0.0508, 0.783, 0.0519 (-7=>2.511) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 250, loss: 2.58, losses: 0.861, 0.0754, 0.777, 0.051, 0.765, 0.0523 (-17=>2.511) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 260, loss: 2.6, losses: 0.865, 0.0761, 0.787, 0.0501, 0.765, 0.0522 (-5=>2.506) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 270, loss: 2.51, losses: 0.827, 0.0747, 0.761, 0.0509, 0.747, 0.0529 (-15=>2.506) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 280, loss: 2.51, losses: 0.835, 0.0739, 0.754, 0.0528, 0.743, 0.0527 (-7=>2.494) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 290, loss: 2.62, losses: 0.87, 0.0764, 0.793, 0.0491, 0.778, 0.0515 (-17=>2.494) 0it [00:00, ?it/s] 0it [00:07, ?it/s] 0it [00:00, ?it/s] iter: 300, finished (-27=>2.494) 0it [00:00, ?it/s] 0it [00:00, ?it/s]
Prediction
dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529beIDtqhbnpeq3rfn5k3zq3hgfrst5iStatusSucceededSourceWebHardware–Total duration–CreatedInput
- aspect
- widescreen
- drawer
- line_sketch
- prompts
- Nude Descending a Staircase
{ "aspect": "widescreen", "drawer": "line_sketch", "prompts": "Nude Descending a Staircase" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", { input: { aspect: "widescreen", drawer: "line_sketch", prompts: "Nude Descending a Staircase" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", input={ "aspect": "widescreen", "drawer": "line_sketch", "prompts": "Nude Descending a Staircase" } ) # The dribnet/pixray-pixel model can stream output as it's running. # The predict method returns an iterator, and you can iterate over that output. for item in output: # https://replicate.com/dribnet/pixray-pixel/api#output-schema print(item)
To learn more, take a look at the guide on getting started with Python.
Run dribnet/pixray-pixel using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "dribnet/pixray-pixel:8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be", "input": { "aspect": "widescreen", "drawer": "line_sketch", "prompts": "Nude Descending a Staircase" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2021-10-10T15:10:30.464228Z", "created_at": "2021-10-10T15:07:00.288334Z", "data_removed": false, "error": null, "id": "tqhbnpeq3rfn5k3zq3hgfrst5i", "input": { "aspect": "widescreen", "drawer": "line_sketch", "prompts": "Nude Descending a Staircase" }, "logs": "---> BasePixrayPredictor Predict\nUsing seed:\n16913510046358283677\nAll CLIP models already loaded:\n['RN50', 'ViT-B/32', 'ViT-B/16']\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['Nude Descending a Staircase']\n\n0it [00:00, ?it/s]\niter: 0, loss: 2.89, losses: 0.989, 0.075, 0.86, 0.0453, 0.867, 0.0489 (-0=>2.886)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.75, losses: 0.942, 0.0786, 0.828, 0.0476, 0.806, 0.0486 (-0=>2.75)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.67, losses: 0.912, 0.0806, 0.801, 0.0491, 0.783, 0.0475 (-1=>2.616)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.53, losses: 0.874, 0.079, 0.76, 0.0504, 0.718, 0.0478 (-2=>2.521)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.57, losses: 0.882, 0.0823, 0.765, 0.0492, 0.738, 0.0483 (-7=>2.487)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.47, losses: 0.849, 0.0812, 0.741, 0.0502, 0.7, 0.0494 (-0=>2.471)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.52, losses: 0.871, 0.0814, 0.753, 0.0496, 0.716, 0.0489 (-10=>2.471)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.47, losses: 0.843, 0.0809, 0.744, 0.0502, 0.701, 0.049 (-9=>2.437)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.46, losses: 0.843, 0.081, 0.736, 0.05, 0.697, 0.0492 (-5=>2.429)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.49, losses: 0.859, 0.0815, 0.744, 0.05, 0.709, 0.0496 (-1=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.46, losses: 0.841, 0.0812, 0.742, 0.0506, 0.694, 0.0512 (-11=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.44, losses: 0.831, 0.0818, 0.737, 0.0506, 0.694, 0.0494 (-21=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.52, losses: 0.866, 0.0815, 0.758, 0.0504, 0.713, 0.0507 (-31=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.49, losses: 0.854, 0.0827, 0.749, 0.0514, 0.706, 0.0502 (-41=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.48, losses: 0.841, 0.0816, 0.753, 0.0503, 0.701, 0.0505 (-51=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.51, losses: 0.871, 0.0822, 0.753, 0.0497, 0.709, 0.0493 (-61=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.54, losses: 0.868, 0.0828, 0.756, 0.0498, 0.729, 0.0501 (-71=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.55, losses: 0.874, 0.0821, 0.767, 0.0488, 0.728, 0.0496 (-81=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.49, losses: 0.859, 0.0835, 0.745, 0.0504, 0.704, 0.0504 (-91=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.49, losses: 0.857, 0.0825, 0.753, 0.05, 0.699, 0.05 (-101=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.54, losses: 0.872, 0.0825, 0.759, 0.0501, 0.722, 0.0498 (-111=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.54, losses: 0.876, 0.0822, 0.755, 0.0493, 0.725, 0.049 (-121=>2.419)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.46, losses: 0.839, 0.0821, 0.742, 0.0502, 0.7, 0.0503 (-131=>2.419)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.49, losses: 0.862, 0.0817, 0.742, 0.0494, 0.704, 0.05 (-3=>2.406)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.46, losses: 0.839, 0.0819, 0.739, 0.0505, 0.695, 0.051 (-13=>2.406)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.45, losses: 0.835, 0.0821, 0.737, 0.0499, 0.697, 0.0508 (-7=>2.4)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.5, losses: 0.863, 0.082, 0.742, 0.0495, 0.714, 0.05 (-17=>2.4)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.49, losses: 0.855, 0.0812, 0.746, 0.0501, 0.708, 0.0498 (-9=>2.4)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.48, losses: 0.851, 0.0821, 0.743, 0.0494, 0.701, 0.0506 (-1=>2.398)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.48, losses: 0.852, 0.0817, 0.741, 0.0496, 0.709, 0.0509 (-9=>2.394)\n\n0it [00:00, ?it/s]\n\n0it [00:06, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-19=>2.394)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]", "metrics": { "total_time": 210.175894 }, "output": [ { "file": "https://replicate.delivery/mgxm/f0461a12-765c-4051-8571-c0e9402e4391/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/245c2b79-202a-4cc2-9fba-b81abeecddc7/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/45eae77e-e408-4bf9-b424-ec6a9552b829/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c6e17996-694f-4000-980b-685141ff66d1/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/83b81075-4206-4907-a74e-80ab3d30652d/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ffa5f0bb-2a25-4ab3-9550-05c14359570e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/a0ce862e-933a-4ae1-afb9-818f37e74192/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d1d15fc2-c6e5-4557-b63b-9ea5782f6959/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/fb386933-18c3-4681-8406-7dbd0af5b08f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/5f91feee-dc6c-43a5-8d1d-92ce167941e5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/24698682-4d27-4943-815f-5ec14d4f941f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d9ab4e48-3f0e-4185-98e2-659b1aa7028a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/191f0d63-29f1-45d1-9b86-47bce43f6988/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e239a338-e562-489e-9727-e968627c9adb/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/0a55301c-6d07-4baf-9215-85e31e87753b/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/ff36a96d-9a35-4ef3-99cb-ad657554a680/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1653f5d9-655e-4aa2-8773-d54cc619856e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/286d4cd4-5077-410e-80a9-065923dd575b/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/87acaa8c-9a34-4332-bf99-516da7bf43dd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b943f92e-07e2-49ed-b576-f57883a73a26/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/5cccec4f-bcd2-4ee3-baa6-4f0860380f93/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b27203fd-14b3-40c5-a890-94cc582bc999/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/6863fad3-5bc5-4148-be7a-68a7de8ac898/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/23adb17e-a8c4-4bb6-a01a-574dde17aa7f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/0a160f10-0e3d-4d0e-bfe6-87eae588e2c4/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/28d62f67-1d5a-4644-b11d-98fa3dc0a742/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/c31c6140-13f4-4a5f-9df0-50a4d445cde5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/9eaf862b-13bd-44f9-b19f-2cc93d75b4bd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/45033fbc-69cf-49a0-b3d4-fcdf9bad2287/tempfile.png" } ], "started_at": "2022-02-07T02:01:08.622971Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/tqhbnpeq3rfn5k3zq3hgfrst5i", "cancel": "https://api.replicate.com/v1/predictions/tqhbnpeq3rfn5k3zq3hgfrst5i/cancel" }, "version": "8f42ef6bd4cc784a374b91df72d718539f7b27d8535b5fb13678cfc1830529be" }
---> BasePixrayPredictor Predict Using seed: 16913510046358283677 All CLIP models already loaded: ['RN50', 'ViT-B/32', 'ViT-B/16'] Using device: cuda:0 Optimising using: Adam Using text prompts: ['Nude Descending a Staircase'] 0it [00:00, ?it/s] iter: 0, loss: 2.89, losses: 0.989, 0.075, 0.86, 0.0453, 0.867, 0.0489 (-0=>2.886) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.75, losses: 0.942, 0.0786, 0.828, 0.0476, 0.806, 0.0486 (-0=>2.75) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 20, loss: 2.67, losses: 0.912, 0.0806, 0.801, 0.0491, 0.783, 0.0475 (-1=>2.616) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 30, loss: 2.53, losses: 0.874, 0.079, 0.76, 0.0504, 0.718, 0.0478 (-2=>2.521) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 40, loss: 2.57, losses: 0.882, 0.0823, 0.765, 0.0492, 0.738, 0.0483 (-7=>2.487) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 50, loss: 2.47, losses: 0.849, 0.0812, 0.741, 0.0502, 0.7, 0.0494 (-0=>2.471) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 60, loss: 2.52, losses: 0.871, 0.0814, 0.753, 0.0496, 0.716, 0.0489 (-10=>2.471) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 70, loss: 2.47, losses: 0.843, 0.0809, 0.744, 0.0502, 0.701, 0.049 (-9=>2.437) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 80, loss: 2.46, losses: 0.843, 0.081, 0.736, 0.05, 0.697, 0.0492 (-5=>2.429) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 90, loss: 2.49, losses: 0.859, 0.0815, 0.744, 0.05, 0.709, 0.0496 (-1=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 100, loss: 2.46, losses: 0.841, 0.0812, 0.742, 0.0506, 0.694, 0.0512 (-11=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 110, loss: 2.44, losses: 0.831, 0.0818, 0.737, 0.0506, 0.694, 0.0494 (-21=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 120, loss: 2.52, losses: 0.866, 0.0815, 0.758, 0.0504, 0.713, 0.0507 (-31=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 130, loss: 2.49, losses: 0.854, 0.0827, 0.749, 0.0514, 0.706, 0.0502 (-41=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 140, loss: 2.48, losses: 0.841, 0.0816, 0.753, 0.0503, 0.701, 0.0505 (-51=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 150, loss: 2.51, losses: 0.871, 0.0822, 0.753, 0.0497, 0.709, 0.0493 (-61=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 160, loss: 2.54, losses: 0.868, 0.0828, 0.756, 0.0498, 0.729, 0.0501 (-71=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 170, loss: 2.55, losses: 0.874, 0.0821, 0.767, 0.0488, 0.728, 0.0496 (-81=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 180, loss: 2.49, losses: 0.859, 0.0835, 0.745, 0.0504, 0.704, 0.0504 (-91=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 190, loss: 2.49, losses: 0.857, 0.0825, 0.753, 0.05, 0.699, 0.05 (-101=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 200, loss: 2.54, losses: 0.872, 0.0825, 0.759, 0.0501, 0.722, 0.0498 (-111=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 210, loss: 2.54, losses: 0.876, 0.0822, 0.755, 0.0493, 0.725, 0.049 (-121=>2.419) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 220, loss: 2.46, losses: 0.839, 0.0821, 0.742, 0.0502, 0.7, 0.0503 (-131=>2.419) 0it [00:00, ?it/s] Dropping learning rate 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 230, loss: 2.49, losses: 0.862, 0.0817, 0.742, 0.0494, 0.704, 0.05 (-3=>2.406) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 240, loss: 2.46, losses: 0.839, 0.0819, 0.739, 0.0505, 0.695, 0.051 (-13=>2.406) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 250, loss: 2.45, losses: 0.835, 0.0821, 0.737, 0.0499, 0.697, 0.0508 (-7=>2.4) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 260, loss: 2.5, losses: 0.863, 0.082, 0.742, 0.0495, 0.714, 0.05 (-17=>2.4) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 270, loss: 2.49, losses: 0.855, 0.0812, 0.746, 0.0501, 0.708, 0.0498 (-9=>2.4) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 280, loss: 2.48, losses: 0.851, 0.0821, 0.743, 0.0494, 0.701, 0.0506 (-1=>2.398) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 290, loss: 2.48, losses: 0.852, 0.0817, 0.741, 0.0496, 0.709, 0.0509 (-9=>2.394) 0it [00:00, ?it/s] 0it [00:06, ?it/s] 0it [00:00, ?it/s] iter: 300, finished (-19=>2.394) 0it [00:00, ?it/s] 0it [00:00, ?it/s]
Want to make some of these yourself?
Run this model