Readme
This model doesn't have a readme.
bare pixray for API use
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run pixray/api using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"pixray/api:1f798d587c8c726f7ffedfb80908ab625d742b7277c141a9eb233df8661939a5",
{
input: {
settings: "prompts: 'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.'\nquality: best\ndrawer: vqgan\naspect: square\nvqgan_model: wikiart_16384\ncustom_loss: aesthetic\n"
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run pixray/api using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"pixray/api:1f798d587c8c726f7ffedfb80908ab625d742b7277c141a9eb233df8661939a5",
input={
"settings": "prompts: 'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.'\nquality: best\ndrawer: vqgan\naspect: square\nvqgan_model: wikiart_16384\ncustom_loss: aesthetic\n"
}
)
# The pixray/api model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/pixray/api/api#output-schema
print(item)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run pixray/api using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "1f798d587c8c726f7ffedfb80908ab625d742b7277c141a9eb233df8661939a5",
"input": {
"settings": "prompts: \'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.\'\\nquality: best\\ndrawer: vqgan\\naspect: square\\nvqgan_model: wikiart_16384\\ncustom_loss: aesthetic\\n"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/pixray/api@sha256:1f798d587c8c726f7ffedfb80908ab625d742b7277c141a9eb233df8661939a5 \
-i $'settings="prompts: \'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.\'\\nquality: best\\ndrawer: vqgan\\naspect: square\\nvqgan_model: wikiart_16384\\ncustom_loss: aesthetic\\n"'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/pixray/api@sha256:1f798d587c8c726f7ffedfb80908ab625d742b7277c141a9eb233df8661939a5
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "settings": "prompts: \'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.\'\\nquality: best\\ndrawer: vqgan\\naspect: square\\nvqgan_model: wikiart_16384\\ncustom_loss: aesthetic\\n" } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.11. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2022-02-07T23:21:38.406843Z",
"created_at": "2022-02-07T23:05:38.293169Z",
"data_removed": false,
"error": null,
"id": "hb5z7ctvynftdoixyjh534w4qu",
"input": {
"settings": "prompts: 'a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.'\nquality: best\ndrawer: vqgan\naspect: square\nvqgan_model: wikiart_16384\ncustom_loss: aesthetic\n"
},
"logs": "---> BasePixrayPredictor Predict\nUsing seed:\n11914490315746453492\nWorking with z of shape (1, 256, 16, 16) = 65536 dimensions.\nloaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth\nVQLPIPSWithDiscriminator running with hinge loss.\nRestored from models/vqgan_wikiart_16384.ckpt\nLoaded CLIP RN50x4: 288x288 and 178.30M params\nLoaded CLIP ViT-B/32: 224x224 and 151.28M params\nLoaded CLIP ViT-B/16: 224x224 and 149.62M params\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']\nusing custom losses: aesthetic\n\n0it [00:00, ?it/s]\niter: 0, loss: 3.66, losses: 0.843, 0.0817, 0.98, 0.0605, 0.962, 0.0651, 0.67 (-0=>3.663)\n\n0it [00:01, ?it/s]\niter: 10, loss: 3.07, losses: 0.711, 0.0857, 0.839, 0.0636, 0.816, 0.0634, 0.489 (-0=>3.068)\n\n0it [00:26, ?it/s]\n\n0it [00:50, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.75, losses: 0.646, 0.0879, 0.778, 0.0662, 0.741, 0.0637, 0.368 (-1=>2.728)\n\n0it [00:01, ?it/s]\niter: 30, loss: 2.59, losses: 0.572, 0.0926, 0.728, 0.0686, 0.702, 0.0645, 0.363 (-0=>2.59)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.51, losses: 0.555, 0.0916, 0.717, 0.0697, 0.692, 0.0672, 0.319 (-2=>2.475)\n\n0it [00:01, ?it/s]\niter: 50, loss: 2.44, losses: 0.543, 0.0893, 0.708, 0.0702, 0.669, 0.0684, 0.292 (-3=>2.401)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.38, losses: 0.534, 0.0881, 0.682, 0.0704, 0.662, 0.0671, 0.273 (-1=>2.305)\n\n0it [00:01, ?it/s]\niter: 70, loss: 2.3, losses: 0.509, 0.0895, 0.671, 0.0727, 0.651, 0.0691, 0.238 (-5=>2.282)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.28, losses: 0.504, 0.0912, 0.66, 0.0709, 0.635, 0.07, 0.25 (-4=>2.264)\n\n0it [00:01, ?it/s]\niter: 90, loss: 2.25, losses: 0.488, 0.0919, 0.66, 0.073, 0.623, 0.0719, 0.238 (-7=>2.207)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.27, losses: 0.496, 0.0932, 0.658, 0.0729, 0.624, 0.0717, 0.255 (-17=>2.207)\n\n0it [00:01, ?it/s]\niter: 110, loss: 2.28, losses: 0.487, 0.0932, 0.667, 0.0716, 0.631, 0.0702, 0.255 (-8=>2.189)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.16, losses: 0.486, 0.0916, 0.633, 0.074, 0.609, 0.0727, 0.195 (-0=>2.16)\n\n0it [00:01, ?it/s]\niter: 130, loss: 2.18, losses: 0.477, 0.0904, 0.641, 0.0747, 0.605, 0.0747, 0.222 (-10=>2.16)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.2, losses: 0.467, 0.0919, 0.647, 0.0748, 0.617, 0.0745, 0.223 (-20=>2.16)\n\n0it [00:01, ?it/s]\niter: 150, loss: 2.19, losses: 0.473, 0.0926, 0.641, 0.0736, 0.61, 0.0746, 0.221 (-6=>2.156)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.18, losses: 0.484, 0.0902, 0.641, 0.0727, 0.614, 0.0736, 0.206 (-3=>2.136)\n\n0it [00:01, ?it/s]\niter: 170, loss: 2.18, losses: 0.474, 0.0916, 0.638, 0.0722, 0.618, 0.0739, 0.21 (-6=>2.128)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.21, losses: 0.462, 0.0913, 0.652, 0.0741, 0.625, 0.0746, 0.228 (-16=>2.128)\n\n0it [00:01, ?it/s]\niter: 190, loss: 2.19, losses: 0.473, 0.0909, 0.637, 0.074, 0.622, 0.0736, 0.219 (-7=>2.124)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.15, losses: 0.462, 0.0917, 0.636, 0.0755, 0.611, 0.0747, 0.198 (-2=>2.094)\n\n0it [00:01, ?it/s]\niter: 210, loss: 2.19, losses: 0.461, 0.0909, 0.645, 0.0737, 0.618, 0.074, 0.225 (-12=>2.094)\n\n0it [00:28, ?it/s]\n---> BasePixrayPredictor Predict\nUsing seed:\n3383883393288688316\nWorking with z of shape (1, 256, 16, 16) = 65536 dimensions.\nloaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth\nVQLPIPSWithDiscriminator running with hinge loss.\nRestored from models/vqgan_wikiart_16384.ckpt\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.14, losses: 0.468, 0.0934, 0.629, 0.0744, 0.615, 0.0755, 0.18 (-22=>2.094)\n\n0it [00:01, ?it/s]\nLoaded CLIP RN50x4: 288x288 and 178.30M params\nLoaded CLIP ViT-B/32: 224x224 and 151.28M params\nLoaded CLIP ViT-B/16: 224x224 and 149.62M params\nCaught SIGTERM, exiting...\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']\nusing custom losses: aesthetic\n\n0it [00:00, ?it/s]\niter: 0, loss: 3.7, losses: 0.837, 0.0829, 0.982, 0.0609, 0.96, 0.0644, 0.713 (-0=>3.7)\n\n0it [00:01, ?it/s]\niter: 230, loss: 2.17, losses: 0.466, 0.0911, 0.639, 0.074, 0.617, 0.0744, 0.206 (-3=>2.08)\n\n0it [00:28, ?it/s]\niter: 10, loss: 2.79, losses: 0.663, 0.0885, 0.791, 0.0675, 0.783, 0.0647, 0.328 (-0=>2.785)\n\n0it [00:27, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.12, losses: 0.468, 0.0919, 0.631, 0.0749, 0.605, 0.0761, 0.178 (-13=>2.08)\n\n0it [00:01, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.53, losses: 0.585, 0.0894, 0.71, 0.0687, 0.726, 0.0664, 0.287 (-0=>2.533)\n\n0it [00:01, ?it/s]\niter: 250, loss: 2.14, losses: 0.468, 0.0914, 0.629, 0.073, 0.607, 0.075, 0.201 (-23=>2.08)\n\n0it [00:28, ?it/s]\niter: 30, loss: 2.38, losses: 0.519, 0.0932, 0.678, 0.0723, 0.665, 0.0712, 0.281 (-1=>2.379)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.19, losses: 0.477, 0.0916, 0.638, 0.0751, 0.609, 0.0741, 0.22 (-33=>2.08)\n\n0it [00:01, ?it/s]\nDropping learning rate\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.25, losses: 0.479, 0.0956, 0.659, 0.0732, 0.64, 0.0729, 0.231 (-0=>2.25)\n\n0it [00:01, ?it/s]\niter: 270, loss: 2.07, losses: 0.451, 0.0942, 0.624, 0.076, 0.587, 0.077, 0.159 (-0=>2.069)\n\n0it [00:28, ?it/s]\niter: 50, loss: 2.21, losses: 0.478, 0.0951, 0.65, 0.0719, 0.627, 0.0723, 0.22 (-0=>2.215)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.07, losses: 0.462, 0.0913, 0.611, 0.077, 0.592, 0.0763, 0.162 (-9=>2.051)\n\n0it [00:01, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.22, losses: 0.482, 0.0947, 0.641, 0.0727, 0.629, 0.0734, 0.226 (-1=>2.198)\n\n0it [00:01, ?it/s]\niter: 290, loss: 2.04, losses: 0.446, 0.0906, 0.612, 0.0752, 0.59, 0.0778, 0.144 (-0=>2.036)\n\n0it [00:28, ?it/s]\niter: 70, loss: 2.18, losses: 0.463, 0.0964, 0.646, 0.0734, 0.621, 0.0748, 0.202 (-2=>2.149)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, loss: 2.11, losses: 0.443, 0.0911, 0.628, 0.074, 0.613, 0.075, 0.187 (-10=>2.036)\n\n0it [00:01, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.18, losses: 0.47, 0.0946, 0.64, 0.0733, 0.613, 0.0755, 0.212 (-4=>2.104)\n\n0it [00:01, ?it/s]\niter: 310, loss: 2.13, losses: 0.468, 0.0939, 0.632, 0.0744, 0.603, 0.0755, 0.184 (-20=>2.036)\n\n0it [00:28, ?it/s]\niter: 90, loss: 2.08, losses: 0.451, 0.0943, 0.617, 0.0746, 0.598, 0.076, 0.174 (-0=>2.085)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 320, loss: 2.09, losses: 0.473, 0.093, 0.615, 0.0746, 0.595, 0.076, 0.163 (-30=>2.036)\n\n0it [00:01, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.12, losses: 0.446, 0.0965, 0.626, 0.0738, 0.602, 0.078, 0.201 (-10=>2.085)\n\n0it [00:01, ?it/s]\niter: 330, loss: 2.09, losses: 0.453, 0.091, 0.623, 0.0753, 0.596, 0.0769, 0.177 (-40=>2.036)\n\n0it [00:28, ?it/s]\niter: 110, loss: 2.18, losses: 0.465, 0.0956, 0.631, 0.0748, 0.616, 0.0743, 0.225 (-20=>2.085)\n\n0it [00:28, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 340, loss: 2.07, losses: 0.46, 0.0909, 0.608, 0.0768, 0.587, 0.0764, 0.174 (-50=>2.036)\n\n0it [00:01, ?it/s]\n\n0it [00:53, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.13, losses: 0.449, 0.0964, 0.632, 0.074, 0.603, 0.0759, 0.199 (-2=>2.066)\n\n0it [00:01, ?it/s]\niter: 350, finished (-60=>2.036)\n\n0it [00:27, ?it/s]\n\n0it [00:27, ?it/s]",
"metrics": {
"predict_time": 959.955818,
"total_time": 960.113674
},
"output": [
{
"file": "https://replicate.delivery/mgxm/b4f9b0f6-75e6-4ba2-a0d0-d9dce8b69052/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/011e40b5-4e1f-4202-9b71-2eadabeab48f/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/0119cfaa-bb01-4829-9e46-77e196779d01/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/847355e3-ce3e-41bb-aea0-49201a7cf7e3/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/519aa258-57fe-4424-8e40-b3a3c7e4e34e/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/a2f4ca8e-ebfd-4438-bac5-dbb28f107bd5/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/710eac9e-5c56-44a9-912b-366cde72d690/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/a0e646eb-4659-46de-b458-042d1e045f64/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/a17d7c2f-de2f-4b58-bbbf-c67ae7943e36/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/7f8831a8-5232-41d0-bec3-56bd0034e48f/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/8705a4a2-1069-4dff-84ae-22fa575a261b/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3ff0e75d-c23d-4a21-8e98-26715e882c53/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/84f6dc4d-87af-44f9-9d92-524bbd9c3d51/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/acacde31-c2c8-4cef-b809-1ce25afd1b32/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/cebd49fb-0441-4cc5-ab08-d4b4fbd72808/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/8bdce1a1-5447-4e07-9c0c-6701f168f49e/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/34211b18-54e9-478d-8512-5f7d2912c0a5/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/261457bf-d315-4b0b-81bd-d861db3d97e0/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/9bd04b8b-98c7-49f8-8770-546d5cb65922/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/7e0198ed-4b94-4339-b1ae-2e17b371cd4a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/379e8de5-567c-4732-96a1-3697813fe6a2/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/0195547c-b501-49ba-96b8-f000f957a587/tempfile.png"
}
],
"started_at": "2022-02-07T23:05:38.451025Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/hb5z7ctvynftdoixyjh534w4qu",
"cancel": "https://api.replicate.com/v1/predictions/hb5z7ctvynftdoixyjh534w4qu/cancel"
},
"version": "6addca4edde007986704548c11ec6e606ffc6121ebe66e2ba021475ee586fc09"
}
---> BasePixrayPredictor Predict
Using seed:
11914490315746453492
Working with z of shape (1, 256, 16, 16) = 65536 dimensions.
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from models/vqgan_wikiart_16384.ckpt
Loaded CLIP RN50x4: 288x288 and 178.30M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']
using custom losses: aesthetic
0it [00:00, ?it/s]
iter: 0, loss: 3.66, losses: 0.843, 0.0817, 0.98, 0.0605, 0.962, 0.0651, 0.67 (-0=>3.663)
0it [00:01, ?it/s]
iter: 10, loss: 3.07, losses: 0.711, 0.0857, 0.839, 0.0636, 0.816, 0.0634, 0.489 (-0=>3.068)
0it [00:26, ?it/s]
0it [00:50, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.75, losses: 0.646, 0.0879, 0.778, 0.0662, 0.741, 0.0637, 0.368 (-1=>2.728)
0it [00:01, ?it/s]
iter: 30, loss: 2.59, losses: 0.572, 0.0926, 0.728, 0.0686, 0.702, 0.0645, 0.363 (-0=>2.59)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.51, losses: 0.555, 0.0916, 0.717, 0.0697, 0.692, 0.0672, 0.319 (-2=>2.475)
0it [00:01, ?it/s]
iter: 50, loss: 2.44, losses: 0.543, 0.0893, 0.708, 0.0702, 0.669, 0.0684, 0.292 (-3=>2.401)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.38, losses: 0.534, 0.0881, 0.682, 0.0704, 0.662, 0.0671, 0.273 (-1=>2.305)
0it [00:01, ?it/s]
iter: 70, loss: 2.3, losses: 0.509, 0.0895, 0.671, 0.0727, 0.651, 0.0691, 0.238 (-5=>2.282)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.28, losses: 0.504, 0.0912, 0.66, 0.0709, 0.635, 0.07, 0.25 (-4=>2.264)
0it [00:01, ?it/s]
iter: 90, loss: 2.25, losses: 0.488, 0.0919, 0.66, 0.073, 0.623, 0.0719, 0.238 (-7=>2.207)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.27, losses: 0.496, 0.0932, 0.658, 0.0729, 0.624, 0.0717, 0.255 (-17=>2.207)
0it [00:01, ?it/s]
iter: 110, loss: 2.28, losses: 0.487, 0.0932, 0.667, 0.0716, 0.631, 0.0702, 0.255 (-8=>2.189)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.16, losses: 0.486, 0.0916, 0.633, 0.074, 0.609, 0.0727, 0.195 (-0=>2.16)
0it [00:01, ?it/s]
iter: 130, loss: 2.18, losses: 0.477, 0.0904, 0.641, 0.0747, 0.605, 0.0747, 0.222 (-10=>2.16)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.2, losses: 0.467, 0.0919, 0.647, 0.0748, 0.617, 0.0745, 0.223 (-20=>2.16)
0it [00:01, ?it/s]
iter: 150, loss: 2.19, losses: 0.473, 0.0926, 0.641, 0.0736, 0.61, 0.0746, 0.221 (-6=>2.156)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.18, losses: 0.484, 0.0902, 0.641, 0.0727, 0.614, 0.0736, 0.206 (-3=>2.136)
0it [00:01, ?it/s]
iter: 170, loss: 2.18, losses: 0.474, 0.0916, 0.638, 0.0722, 0.618, 0.0739, 0.21 (-6=>2.128)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.21, losses: 0.462, 0.0913, 0.652, 0.0741, 0.625, 0.0746, 0.228 (-16=>2.128)
0it [00:01, ?it/s]
iter: 190, loss: 2.19, losses: 0.473, 0.0909, 0.637, 0.074, 0.622, 0.0736, 0.219 (-7=>2.124)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.15, losses: 0.462, 0.0917, 0.636, 0.0755, 0.611, 0.0747, 0.198 (-2=>2.094)
0it [00:01, ?it/s]
iter: 210, loss: 2.19, losses: 0.461, 0.0909, 0.645, 0.0737, 0.618, 0.074, 0.225 (-12=>2.094)
0it [00:28, ?it/s]
---> BasePixrayPredictor Predict
Using seed:
3383883393288688316
Working with z of shape (1, 256, 16, 16) = 65536 dimensions.
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from models/vqgan_wikiart_16384.ckpt
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.14, losses: 0.468, 0.0934, 0.629, 0.0744, 0.615, 0.0755, 0.18 (-22=>2.094)
0it [00:01, ?it/s]
Loaded CLIP RN50x4: 288x288 and 178.30M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Caught SIGTERM, exiting...
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']
using custom losses: aesthetic
0it [00:00, ?it/s]
iter: 0, loss: 3.7, losses: 0.837, 0.0829, 0.982, 0.0609, 0.96, 0.0644, 0.713 (-0=>3.7)
0it [00:01, ?it/s]
iter: 230, loss: 2.17, losses: 0.466, 0.0911, 0.639, 0.074, 0.617, 0.0744, 0.206 (-3=>2.08)
0it [00:28, ?it/s]
iter: 10, loss: 2.79, losses: 0.663, 0.0885, 0.791, 0.0675, 0.783, 0.0647, 0.328 (-0=>2.785)
0it [00:27, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.12, losses: 0.468, 0.0919, 0.631, 0.0749, 0.605, 0.0761, 0.178 (-13=>2.08)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.53, losses: 0.585, 0.0894, 0.71, 0.0687, 0.726, 0.0664, 0.287 (-0=>2.533)
0it [00:01, ?it/s]
iter: 250, loss: 2.14, losses: 0.468, 0.0914, 0.629, 0.073, 0.607, 0.075, 0.201 (-23=>2.08)
0it [00:28, ?it/s]
iter: 30, loss: 2.38, losses: 0.519, 0.0932, 0.678, 0.0723, 0.665, 0.0712, 0.281 (-1=>2.379)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.19, losses: 0.477, 0.0916, 0.638, 0.0751, 0.609, 0.0741, 0.22 (-33=>2.08)
0it [00:01, ?it/s]
Dropping learning rate
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.25, losses: 0.479, 0.0956, 0.659, 0.0732, 0.64, 0.0729, 0.231 (-0=>2.25)
0it [00:01, ?it/s]
iter: 270, loss: 2.07, losses: 0.451, 0.0942, 0.624, 0.076, 0.587, 0.077, 0.159 (-0=>2.069)
0it [00:28, ?it/s]
iter: 50, loss: 2.21, losses: 0.478, 0.0951, 0.65, 0.0719, 0.627, 0.0723, 0.22 (-0=>2.215)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.07, losses: 0.462, 0.0913, 0.611, 0.077, 0.592, 0.0763, 0.162 (-9=>2.051)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.22, losses: 0.482, 0.0947, 0.641, 0.0727, 0.629, 0.0734, 0.226 (-1=>2.198)
0it [00:01, ?it/s]
iter: 290, loss: 2.04, losses: 0.446, 0.0906, 0.612, 0.0752, 0.59, 0.0778, 0.144 (-0=>2.036)
0it [00:28, ?it/s]
iter: 70, loss: 2.18, losses: 0.463, 0.0964, 0.646, 0.0734, 0.621, 0.0748, 0.202 (-2=>2.149)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 300, loss: 2.11, losses: 0.443, 0.0911, 0.628, 0.074, 0.613, 0.075, 0.187 (-10=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.18, losses: 0.47, 0.0946, 0.64, 0.0733, 0.613, 0.0755, 0.212 (-4=>2.104)
0it [00:01, ?it/s]
iter: 310, loss: 2.13, losses: 0.468, 0.0939, 0.632, 0.0744, 0.603, 0.0755, 0.184 (-20=>2.036)
0it [00:28, ?it/s]
iter: 90, loss: 2.08, losses: 0.451, 0.0943, 0.617, 0.0746, 0.598, 0.076, 0.174 (-0=>2.085)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 320, loss: 2.09, losses: 0.473, 0.093, 0.615, 0.0746, 0.595, 0.076, 0.163 (-30=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.12, losses: 0.446, 0.0965, 0.626, 0.0738, 0.602, 0.078, 0.201 (-10=>2.085)
0it [00:01, ?it/s]
iter: 330, loss: 2.09, losses: 0.453, 0.091, 0.623, 0.0753, 0.596, 0.0769, 0.177 (-40=>2.036)
0it [00:28, ?it/s]
iter: 110, loss: 2.18, losses: 0.465, 0.0956, 0.631, 0.0748, 0.616, 0.0743, 0.225 (-20=>2.085)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 340, loss: 2.07, losses: 0.46, 0.0909, 0.608, 0.0768, 0.587, 0.0764, 0.174 (-50=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.13, losses: 0.449, 0.0964, 0.632, 0.074, 0.603, 0.0759, 0.199 (-2=>2.066)
0it [00:01, ?it/s]
iter: 350, finished (-60=>2.036)
0it [00:27, ?it/s]
0it [00:27, ?it/s]
This example was created by a different version, pixray/api:6addca4e.
This model costs approximately $0.11 to run on Replicate, or 9 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia T4 GPU hardware. Predictions typically complete within 9 minutes. The predict time for this model varies significantly based on the inputs.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
---> BasePixrayPredictor Predict
Using seed:
11914490315746453492
Working with z of shape (1, 256, 16, 16) = 65536 dimensions.
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from models/vqgan_wikiart_16384.ckpt
Loaded CLIP RN50x4: 288x288 and 178.30M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']
using custom losses: aesthetic
0it [00:00, ?it/s]
iter: 0, loss: 3.66, losses: 0.843, 0.0817, 0.98, 0.0605, 0.962, 0.0651, 0.67 (-0=>3.663)
0it [00:01, ?it/s]
iter: 10, loss: 3.07, losses: 0.711, 0.0857, 0.839, 0.0636, 0.816, 0.0634, 0.489 (-0=>3.068)
0it [00:26, ?it/s]
0it [00:50, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.75, losses: 0.646, 0.0879, 0.778, 0.0662, 0.741, 0.0637, 0.368 (-1=>2.728)
0it [00:01, ?it/s]
iter: 30, loss: 2.59, losses: 0.572, 0.0926, 0.728, 0.0686, 0.702, 0.0645, 0.363 (-0=>2.59)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.51, losses: 0.555, 0.0916, 0.717, 0.0697, 0.692, 0.0672, 0.319 (-2=>2.475)
0it [00:01, ?it/s]
iter: 50, loss: 2.44, losses: 0.543, 0.0893, 0.708, 0.0702, 0.669, 0.0684, 0.292 (-3=>2.401)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.38, losses: 0.534, 0.0881, 0.682, 0.0704, 0.662, 0.0671, 0.273 (-1=>2.305)
0it [00:01, ?it/s]
iter: 70, loss: 2.3, losses: 0.509, 0.0895, 0.671, 0.0727, 0.651, 0.0691, 0.238 (-5=>2.282)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.28, losses: 0.504, 0.0912, 0.66, 0.0709, 0.635, 0.07, 0.25 (-4=>2.264)
0it [00:01, ?it/s]
iter: 90, loss: 2.25, losses: 0.488, 0.0919, 0.66, 0.073, 0.623, 0.0719, 0.238 (-7=>2.207)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.27, losses: 0.496, 0.0932, 0.658, 0.0729, 0.624, 0.0717, 0.255 (-17=>2.207)
0it [00:01, ?it/s]
iter: 110, loss: 2.28, losses: 0.487, 0.0932, 0.667, 0.0716, 0.631, 0.0702, 0.255 (-8=>2.189)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.16, losses: 0.486, 0.0916, 0.633, 0.074, 0.609, 0.0727, 0.195 (-0=>2.16)
0it [00:01, ?it/s]
iter: 130, loss: 2.18, losses: 0.477, 0.0904, 0.641, 0.0747, 0.605, 0.0747, 0.222 (-10=>2.16)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.2, losses: 0.467, 0.0919, 0.647, 0.0748, 0.617, 0.0745, 0.223 (-20=>2.16)
0it [00:01, ?it/s]
iter: 150, loss: 2.19, losses: 0.473, 0.0926, 0.641, 0.0736, 0.61, 0.0746, 0.221 (-6=>2.156)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.18, losses: 0.484, 0.0902, 0.641, 0.0727, 0.614, 0.0736, 0.206 (-3=>2.136)
0it [00:01, ?it/s]
iter: 170, loss: 2.18, losses: 0.474, 0.0916, 0.638, 0.0722, 0.618, 0.0739, 0.21 (-6=>2.128)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.21, losses: 0.462, 0.0913, 0.652, 0.0741, 0.625, 0.0746, 0.228 (-16=>2.128)
0it [00:01, ?it/s]
iter: 190, loss: 2.19, losses: 0.473, 0.0909, 0.637, 0.074, 0.622, 0.0736, 0.219 (-7=>2.124)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.15, losses: 0.462, 0.0917, 0.636, 0.0755, 0.611, 0.0747, 0.198 (-2=>2.094)
0it [00:01, ?it/s]
iter: 210, loss: 2.19, losses: 0.461, 0.0909, 0.645, 0.0737, 0.618, 0.074, 0.225 (-12=>2.094)
0it [00:28, ?it/s]
---> BasePixrayPredictor Predict
Using seed:
3383883393288688316
Working with z of shape (1, 256, 16, 16) = 65536 dimensions.
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from models/vqgan_wikiart_16384.ckpt
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.14, losses: 0.468, 0.0934, 0.629, 0.0744, 0.615, 0.0755, 0.18 (-22=>2.094)
0it [00:01, ?it/s]
Loaded CLIP RN50x4: 288x288 and 178.30M params
Loaded CLIP ViT-B/32: 224x224 and 151.28M params
Loaded CLIP ViT-B/16: 224x224 and 149.62M params
Caught SIGTERM, exiting...
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['a day glo acrylic portrait of a cyberpunk empress, by Yasutomo Oka.']
using custom losses: aesthetic
0it [00:00, ?it/s]
iter: 0, loss: 3.7, losses: 0.837, 0.0829, 0.982, 0.0609, 0.96, 0.0644, 0.713 (-0=>3.7)
0it [00:01, ?it/s]
iter: 230, loss: 2.17, losses: 0.466, 0.0911, 0.639, 0.074, 0.617, 0.0744, 0.206 (-3=>2.08)
0it [00:28, ?it/s]
iter: 10, loss: 2.79, losses: 0.663, 0.0885, 0.791, 0.0675, 0.783, 0.0647, 0.328 (-0=>2.785)
0it [00:27, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.12, losses: 0.468, 0.0919, 0.631, 0.0749, 0.605, 0.0761, 0.178 (-13=>2.08)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.53, losses: 0.585, 0.0894, 0.71, 0.0687, 0.726, 0.0664, 0.287 (-0=>2.533)
0it [00:01, ?it/s]
iter: 250, loss: 2.14, losses: 0.468, 0.0914, 0.629, 0.073, 0.607, 0.075, 0.201 (-23=>2.08)
0it [00:28, ?it/s]
iter: 30, loss: 2.38, losses: 0.519, 0.0932, 0.678, 0.0723, 0.665, 0.0712, 0.281 (-1=>2.379)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.19, losses: 0.477, 0.0916, 0.638, 0.0751, 0.609, 0.0741, 0.22 (-33=>2.08)
0it [00:01, ?it/s]
Dropping learning rate
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.25, losses: 0.479, 0.0956, 0.659, 0.0732, 0.64, 0.0729, 0.231 (-0=>2.25)
0it [00:01, ?it/s]
iter: 270, loss: 2.07, losses: 0.451, 0.0942, 0.624, 0.076, 0.587, 0.077, 0.159 (-0=>2.069)
0it [00:28, ?it/s]
iter: 50, loss: 2.21, losses: 0.478, 0.0951, 0.65, 0.0719, 0.627, 0.0723, 0.22 (-0=>2.215)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.07, losses: 0.462, 0.0913, 0.611, 0.077, 0.592, 0.0763, 0.162 (-9=>2.051)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.22, losses: 0.482, 0.0947, 0.641, 0.0727, 0.629, 0.0734, 0.226 (-1=>2.198)
0it [00:01, ?it/s]
iter: 290, loss: 2.04, losses: 0.446, 0.0906, 0.612, 0.0752, 0.59, 0.0778, 0.144 (-0=>2.036)
0it [00:28, ?it/s]
iter: 70, loss: 2.18, losses: 0.463, 0.0964, 0.646, 0.0734, 0.621, 0.0748, 0.202 (-2=>2.149)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 300, loss: 2.11, losses: 0.443, 0.0911, 0.628, 0.074, 0.613, 0.075, 0.187 (-10=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.18, losses: 0.47, 0.0946, 0.64, 0.0733, 0.613, 0.0755, 0.212 (-4=>2.104)
0it [00:01, ?it/s]
iter: 310, loss: 2.13, losses: 0.468, 0.0939, 0.632, 0.0744, 0.603, 0.0755, 0.184 (-20=>2.036)
0it [00:28, ?it/s]
iter: 90, loss: 2.08, losses: 0.451, 0.0943, 0.617, 0.0746, 0.598, 0.076, 0.174 (-0=>2.085)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 320, loss: 2.09, losses: 0.473, 0.093, 0.615, 0.0746, 0.595, 0.076, 0.163 (-30=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.12, losses: 0.446, 0.0965, 0.626, 0.0738, 0.602, 0.078, 0.201 (-10=>2.085)
0it [00:01, ?it/s]
iter: 330, loss: 2.09, losses: 0.453, 0.091, 0.623, 0.0753, 0.596, 0.0769, 0.177 (-40=>2.036)
0it [00:28, ?it/s]
iter: 110, loss: 2.18, losses: 0.465, 0.0956, 0.631, 0.0748, 0.616, 0.0743, 0.225 (-20=>2.085)
0it [00:28, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 340, loss: 2.07, losses: 0.46, 0.0909, 0.608, 0.0768, 0.587, 0.0764, 0.174 (-50=>2.036)
0it [00:01, ?it/s]
0it [00:53, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.13, losses: 0.449, 0.0964, 0.632, 0.074, 0.603, 0.0759, 0.199 (-2=>2.066)
0it [00:01, ?it/s]
iter: 350, finished (-60=>2.036)
0it [00:27, ?it/s]
0it [00:27, ?it/s]