Prediction
dribnet/pixray:ebddb054Input
- prompts
- japan bookstore
- settings
{
"prompts": "japan bookstore",
"settings": "\n"
}
Install Replicate’s Node.js client library:
npm install replicate
Set the
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run dribnet/pixray using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"dribnet/pixray:ebddb054d51a1ec5f4853440f27f10a12b52ff4989f4db00738ea7aa2dc5dc18",
{
input: {
prompts: "japan bookstore",
settings: "\n"
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:
pip install replicate
Set the
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:
import replicate
Run dribnet/pixray using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"dribnet/pixray:ebddb054d51a1ec5f4853440f27f10a12b52ff4989f4db00738ea7aa2dc5dc18",
input={
"prompts": "japan bookstore",
"settings": "\n"
}
)
# The dribnet/pixray model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/dribnet/pixray/api#output-schema
print(item)
To learn more, take a look at the guide on getting started with Python.
Set the
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run dribnet/pixray using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "ebddb054d51a1ec5f4853440f27f10a12b52ff4989f4db00738ea7aa2dc5dc18",
"input": {
"prompts": "japan bookstore",
"settings": "\\n"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{
"completed_at": "2022-01-02T06:23:22.210308Z",
"created_at": "2022-01-02T06:19:50.251079Z",
"data_removed": false,
"error": null,
"id": "jiqeiybms5bardvun6hdageds4",
"input": {
"prompts": "japan bookstore",
"settings": "\n"
},
"logs": "---> BasePixrayPredictor Predict\nUsing seed:\n12077557150837813484\nreusing cached copy of model\nmodels/vqgan_imagenet_f16_16384.ckpt\nAll CLIP models already loaded:\n['ViT-B/32', 'ViT-B/16']\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['japan bookstore']\n\n0it [00:00, ?it/s]\niter: 0, loss: 2.03, losses: 0.967, 0.0459, 0.967, 0.0472 (-0=>2.027)\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\niter: 10, loss: 1.73, losses: 0.818, 0.0518, 0.806, 0.0541 (-0=>1.729)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 1.65, losses: 0.777, 0.0534, 0.767, 0.0545 (-0=>1.652)\n\n0it [00:00, ?it/s]\niter: 30, loss: 1.62, losses: 0.767, 0.0526, 0.752, 0.0525 (-0=>1.623)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 1.6, losses: 0.75, 0.0566, 0.741, 0.0557 (-0=>1.603)\n\n0it [00:00, ?it/s]\niter: 50, loss: 1.62, losses: 0.759, 0.0546, 0.756, 0.0547 (-10=>1.603)\n\n0it [00:09, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 1.61, losses: 0.747, 0.0563, 0.748, 0.0542 (-4=>1.589)\n\n0it [00:00, ?it/s]\niter: 70, loss: 1.57, losses: 0.735, 0.0572, 0.725, 0.0566 (-0=>1.574)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 1.57, losses: 0.732, 0.0565, 0.722, 0.0549 (-0=>1.565)\n\n0it [00:00, ?it/s]\niter: 90, loss: 1.61, losses: 0.749, 0.0542, 0.753, 0.052 (-10=>1.565)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 1.59, losses: 0.745, 0.0556, 0.74, 0.0525 (-1=>1.564)\n\n0it [00:00, ?it/s]\niter: 110, loss: 1.59, losses: 0.748, 0.0549, 0.739, 0.0527 (-6=>1.56)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 1.58, losses: 0.741, 0.0561, 0.729, 0.0536 (-7=>1.554)\n\n0it [00:00, ?it/s]\niter: 130, loss: 1.56, losses: 0.728, 0.0574, 0.716, 0.0538 (-7=>1.543)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 1.6, losses: 0.749, 0.0548, 0.743, 0.0531 (-17=>1.543)\n\n0it [00:00, ?it/s]\niter: 150, loss: 1.59, losses: 0.745, 0.0548, 0.736, 0.0527 (-4=>1.539)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 1.59, losses: 0.745, 0.0547, 0.736, 0.053 (-14=>1.539)\n\n0it [00:00, ?it/s]\niter: 170, loss: 1.55, losses: 0.727, 0.0573, 0.711, 0.0531 (-24=>1.539)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 1.54, losses: 0.726, 0.0556, 0.705, 0.0525 (-3=>1.531)\n\n0it [00:00, ?it/s]\nDropping learning rate\niter: 190, loss: 1.57, losses: 0.74, 0.0547, 0.727, 0.053 (-3=>1.539)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 1.58, losses: 0.739, 0.0559, 0.73, 0.0539 (-9=>1.531)\n\n0it [00:00, ?it/s]\niter: 210, loss: 1.57, losses: 0.734, 0.0563, 0.722, 0.0527 (-7=>1.527)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 1.58, losses: 0.744, 0.0544, 0.732, 0.0529 (-1=>1.525)\n\n0it [00:00, ?it/s]\niter: 230, loss: 1.56, losses: 0.734, 0.0553, 0.721, 0.0522 (-11=>1.525)\n\n0it [00:08, ?it/s]\n\n0it [00:16, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 1.53, losses: 0.719, 0.0579, 0.698, 0.0544 (-7=>1.52)\n\n0it [00:00, ?it/s]\niter: 250, finished (-17=>1.52)\n\n0it [00:08, ?it/s]\n\n0it [00:08, ?it/s]",
"metrics": {
"predict_time": 211.784682,
"total_time": 211.959229
},
"output": [
{
"file": "https://replicate.delivery/mgxm/2918d9ee-d687-4b5d-ab4e-e4d15775f7eb/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/dfc7a1f8-8d33-429e-9d65-d3dbcc7d9019/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/094376c9-5637-431a-b08b-e129fcd9d96a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/41e4ca8d-2b69-4775-8a28-39f596e870fb/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/a5ba325e-239d-4c27-b94f-dfe9f5cde62a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/c400ebd9-fea6-4cdf-a91f-b9939ca6a131/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/148d304b-728a-456e-a68c-9778e25e1d35/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/698088a7-06f1-47d1-97f8-068aee216879/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/dbea3456-aa4c-4014-9d1e-de44203a4c92/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/83c96ed8-b643-4725-95aa-3795cdcd3771/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/8b91a8af-c32c-4e89-ad0f-5b8a195ef7f5/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/5054ea39-2b97-4483-bd72-d2a17100393c/tempfile.png"
}
],
"started_at": "2022-01-02T06:19:50.425626Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/jiqeiybms5bardvun6hdageds4",
"cancel": "https://api.replicate.com/v1/predictions/jiqeiybms5bardvun6hdageds4/cancel"
},
"version": "ebddb054d51a1ec5f4853440f27f10a12b52ff4989f4db00738ea7aa2dc5dc18"
}
Generated in
---> BasePixrayPredictor Predict
Using seed:
12077557150837813484
reusing cached copy of model
models/vqgan_imagenet_f16_16384.ckpt
All CLIP models already loaded:
['ViT-B/32', 'ViT-B/16']
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['japan bookstore']
0it [00:00, ?it/s]
iter: 0, loss: 2.03, losses: 0.967, 0.0459, 0.967, 0.0472 (-0=>2.027)
0it [00:00, ?it/s]
/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
iter: 10, loss: 1.73, losses: 0.818, 0.0518, 0.806, 0.0541 (-0=>1.729)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 1.65, losses: 0.777, 0.0534, 0.767, 0.0545 (-0=>1.652)
0it [00:00, ?it/s]
iter: 30, loss: 1.62, losses: 0.767, 0.0526, 0.752, 0.0525 (-0=>1.623)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 1.6, losses: 0.75, 0.0566, 0.741, 0.0557 (-0=>1.603)
0it [00:00, ?it/s]
iter: 50, loss: 1.62, losses: 0.759, 0.0546, 0.756, 0.0547 (-10=>1.603)
0it [00:09, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 1.61, losses: 0.747, 0.0563, 0.748, 0.0542 (-4=>1.589)
0it [00:00, ?it/s]
iter: 70, loss: 1.57, losses: 0.735, 0.0572, 0.725, 0.0566 (-0=>1.574)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 1.57, losses: 0.732, 0.0565, 0.722, 0.0549 (-0=>1.565)
0it [00:00, ?it/s]
iter: 90, loss: 1.61, losses: 0.749, 0.0542, 0.753, 0.052 (-10=>1.565)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 1.59, losses: 0.745, 0.0556, 0.74, 0.0525 (-1=>1.564)
0it [00:00, ?it/s]
iter: 110, loss: 1.59, losses: 0.748, 0.0549, 0.739, 0.0527 (-6=>1.56)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 1.58, losses: 0.741, 0.0561, 0.729, 0.0536 (-7=>1.554)
0it [00:00, ?it/s]
iter: 130, loss: 1.56, losses: 0.728, 0.0574, 0.716, 0.0538 (-7=>1.543)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 1.6, losses: 0.749, 0.0548, 0.743, 0.0531 (-17=>1.543)
0it [00:00, ?it/s]
iter: 150, loss: 1.59, losses: 0.745, 0.0548, 0.736, 0.0527 (-4=>1.539)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 1.59, losses: 0.745, 0.0547, 0.736, 0.053 (-14=>1.539)
0it [00:00, ?it/s]
iter: 170, loss: 1.55, losses: 0.727, 0.0573, 0.711, 0.0531 (-24=>1.539)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 1.54, losses: 0.726, 0.0556, 0.705, 0.0525 (-3=>1.531)
0it [00:00, ?it/s]
Dropping learning rate
iter: 190, loss: 1.57, losses: 0.74, 0.0547, 0.727, 0.053 (-3=>1.539)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 1.58, losses: 0.739, 0.0559, 0.73, 0.0539 (-9=>1.531)
0it [00:00, ?it/s]
iter: 210, loss: 1.57, losses: 0.734, 0.0563, 0.722, 0.0527 (-7=>1.527)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 1.58, losses: 0.744, 0.0544, 0.732, 0.0529 (-1=>1.525)
0it [00:00, ?it/s]
iter: 230, loss: 1.56, losses: 0.734, 0.0553, 0.721, 0.0522 (-11=>1.525)
0it [00:08, ?it/s]
0it [00:16, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 1.53, losses: 0.719, 0.0579, 0.698, 0.0544 (-7=>1.52)
0it [00:00, ?it/s]
iter: 250, finished (-17=>1.52)
0it [00:08, ?it/s]
0it [00:08, ?it/s]