Readme
This model doesn't have a readme.
Turn any description into wallpaper tiles
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run dribnet/pixray-tiler using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"dribnet/pixray-tiler:416d8c260a1181579a2dae2898ede45f91726ddaed13d2da7ea1482f3bf8c931",
{
input: {
mirror: false,
prompts: "colorful marble texture",
pixelart: true,
settings: "\n"
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run dribnet/pixray-tiler using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"dribnet/pixray-tiler:416d8c260a1181579a2dae2898ede45f91726ddaed13d2da7ea1482f3bf8c931",
input={
"mirror": False,
"prompts": "colorful marble texture",
"pixelart": True,
"settings": "\n"
}
)
# The dribnet/pixray-tiler model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/dribnet/pixray-tiler/api#output-schema
print(item)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run dribnet/pixray-tiler using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "416d8c260a1181579a2dae2898ede45f91726ddaed13d2da7ea1482f3bf8c931",
"input": {
"mirror": false,
"prompts": "colorful marble texture",
"pixelart": true,
"settings": "\\n"
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2021-11-23T12:56:32.093772Z",
"created_at": "2021-11-23T12:44:48.587564Z",
"data_removed": false,
"error": null,
"id": "5oklpy3n3fasfnp2moeqzzil2u",
"input": {
"prompts": "colorful marble texture",
"pixelart": true
},
"logs": "---> BasePixrayPredictor Predict\nUsing seed:\n9379428758220384575\nRunning pixeldrawer with 64x64 grid\nAll CLIP models already loaded:\n['RN50', 'ViT-B/32', 'ViT-B/16']\n--> RN50 normal encoding colorful marble texture #pixelart\n--> ViT-B/32 normal encoding colorful marble texture #pixelart\n--> ViT-B/16 normal encoding colorful marble texture #pixelart\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['colorful marble texture #pixelart']\n\n0it [00:00, ?it/s]\niter: 0, loss: 2.71, losses: 0, 0.903, 0.0783, 0.808, 0.0476, 0.828, 0.0496 (-0=>2.715)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.53, losses: 0, 0.863, 0.0774, 0.739, 0.0482, 0.759, 0.0484 (-0=>2.535)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.44, losses: 0, 0.841, 0.0782, 0.7, 0.05, 0.723, 0.0483 (-0=>2.441)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.35, losses: 0, 0.803, 0.0771, 0.682, 0.0489, 0.69, 0.048 (-0=>2.349)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.37, losses: 0, 0.807, 0.0786, 0.683, 0.0485, 0.704, 0.0478 (-2=>2.322)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.34, losses: 0, 0.794, 0.0789, 0.674, 0.0495, 0.699, 0.0484 (-8=>2.315)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.31, losses: 0, 0.777, 0.0796, 0.667, 0.0494, 0.688, 0.0484 (-2=>2.281)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.31, losses: 0, 0.773, 0.079, 0.667, 0.0493, 0.69, 0.0493 (-4=>2.264)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.29, losses: 0, 0.763, 0.0803, 0.659, 0.0501, 0.683, 0.0499 (-8=>2.248)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.23, losses: 0, 0.738, 0.0787, 0.652, 0.0488, 0.663, 0.0489 (-0=>2.229)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.25, losses: 0, 0.753, 0.0792, 0.652, 0.0501, 0.671, 0.05 (-10=>2.229)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.27, losses: 0, 0.761, 0.0793, 0.654, 0.0494, 0.678, 0.0492 (-6=>2.221)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.26, losses: 0, 0.76, 0.0795, 0.655, 0.0489, 0.67, 0.049 (-16=>2.221)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.3, losses: 0, 0.774, 0.0778, 0.665, 0.0488, 0.687, 0.0486 (-8=>2.199)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.28, losses: 0, 0.763, 0.0793, 0.655, 0.0508, 0.68, 0.0504 (-18=>2.199)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.19, losses: 0, 0.721, 0.0778, 0.642, 0.05, 0.649, 0.0512 (-4=>2.189)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.23, losses: 0, 0.748, 0.0805, 0.643, 0.0506, 0.661, 0.0504 (-14=>2.189)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.22, losses: 0, 0.743, 0.0792, 0.644, 0.0501, 0.656, 0.0502 (-8=>2.176)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.17, losses: 0, 0.717, 0.078, 0.632, 0.0524, 0.637, 0.0522 (-0=>2.168)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.23, losses: 0, 0.742, 0.0798, 0.646, 0.0505, 0.664, 0.0506 (-10=>2.168)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.21, losses: 0, 0.74, 0.0779, 0.643, 0.0493, 0.653, 0.0507 (-4=>2.146)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.24, losses: 0, 0.747, 0.0793, 0.652, 0.0497, 0.665, 0.0501 (-4=>2.141)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.19, losses: 0, 0.732, 0.0775, 0.64, 0.0494, 0.642, 0.0506 (-14=>2.141)\n\n0it [00:00, ?it/s]\nDropping learning rate\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.27, losses: 0, 0.761, 0.0801, 0.661, 0.0499, 0.67, 0.0507 (-4=>2.162)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.24, losses: 0, 0.747, 0.079, 0.647, 0.0503, 0.665, 0.0506 (-2=>2.146)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.16, losses: 0, 0.707, 0.0794, 0.634, 0.0515, 0.634, 0.0523 (-2=>2.142)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.2, losses: 0, 0.73, 0.0807, 0.641, 0.0518, 0.647, 0.0515 (-12=>2.142)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.21, losses: 0, 0.73, 0.079, 0.645, 0.0495, 0.653, 0.051 (-22=>2.142)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.15, losses: 0, 0.71, 0.0778, 0.631, 0.0496, 0.626, 0.0539 (-32=>2.142)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.14, losses: 0, 0.696, 0.0792, 0.629, 0.0515, 0.631, 0.0521 (-0=>2.138)\n\n0it [00:00, ?it/s]\n\n0it [00:15, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-10=>2.138)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]",
"metrics": {
"total_time": 703.506208
},
"output": [
{
"file": "https://replicate.delivery/mgxm/99efe411-5ba5-4860-9d88-5b084ad2338d/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/648b9027-48b8-41f0-a674-41ac58aa03ea/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/efaa3800-46cb-4d99-a7ea-3c5040fce497/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/c6de75bf-e50b-484e-86d9-8fbc08fffb19/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3f18c624-e1e6-4b18-96bd-24df3be76557/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/18b1848a-87c5-4cd0-b27d-f5a2ff0b205a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/0f2cb309-5923-488c-8597-868c1c2bf0ed/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/fcff04df-76b5-476a-af2d-665f695e12d1/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/304cc3c1-f4f8-4ea7-989b-7c0962c19613/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/babdf857-1450-43de-9a19-019ca419674a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/b1d50b71-4c85-4aac-a35e-3ac8d237170e/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3978f96d-332c-4119-94df-9ed9c7a62e32/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3fb7c681-8a33-427d-ba88-4110a1081848/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/b74dfe2d-8780-4c6a-87cf-0df21912e409/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/1020888c-4dbf-4e8b-bad4-f975e8df0850/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/bf0df153-44c8-4e4f-8813-0d7625f09ce3/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/f20d0da8-b499-4dcf-a929-02248581c7be/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3aac5d71-876b-4cef-98fb-28d1bf65b053/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/979bf604-2422-4dc0-b02a-ebe582d10452/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/36369e07-f75d-4c7c-b76d-5664d02eef7f/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/56423b6f-49ed-432f-b31a-ac15c08c0c29/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/44d8bcbc-dd3d-4358-87d5-8e37d394fe4c/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/3db04cb4-f7f6-4821-a8cd-1840384cec0d/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/8dfd6065-06f6-4dcc-af0a-03113b0e8e8a/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/4e5c83a3-ed51-4adf-8637-b7f30a0388af/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/da00917d-f7a4-4dae-b56c-4ae973a81ece/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/11f76249-e321-448f-a1a7-cb34fd39d775/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/d28bed53-b90e-4faf-abe1-b93fcfb35c00/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/45b51831-8345-4ba9-9e64-645df7e88bbf/tempfile.png"
},
{
"file": "https://replicate.delivery/mgxm/f9d1eba2-2fb4-483f-8f43-f179c3e13d27/tempfile.png"
}
],
"started_at": "2022-01-18T17:34:12.394074Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/5oklpy3n3fasfnp2moeqzzil2u",
"cancel": "https://api.replicate.com/v1/predictions/5oklpy3n3fasfnp2moeqzzil2u/cancel"
},
"version": "03b32f4a28b311ac1a8b2eb63fdde6e42fda5aee7cfd4b53cfdcb5001a430c8d"
}
---> BasePixrayPredictor Predict
Using seed:
9379428758220384575
Running pixeldrawer with 64x64 grid
All CLIP models already loaded:
['RN50', 'ViT-B/32', 'ViT-B/16']
--> RN50 normal encoding colorful marble texture #pixelart
--> ViT-B/32 normal encoding colorful marble texture #pixelart
--> ViT-B/16 normal encoding colorful marble texture #pixelart
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['colorful marble texture #pixelart']
0it [00:00, ?it/s]
iter: 0, loss: 2.71, losses: 0, 0.903, 0.0783, 0.808, 0.0476, 0.828, 0.0496 (-0=>2.715)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.53, losses: 0, 0.863, 0.0774, 0.739, 0.0482, 0.759, 0.0484 (-0=>2.535)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.44, losses: 0, 0.841, 0.0782, 0.7, 0.05, 0.723, 0.0483 (-0=>2.441)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.35, losses: 0, 0.803, 0.0771, 0.682, 0.0489, 0.69, 0.048 (-0=>2.349)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.37, losses: 0, 0.807, 0.0786, 0.683, 0.0485, 0.704, 0.0478 (-2=>2.322)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.34, losses: 0, 0.794, 0.0789, 0.674, 0.0495, 0.699, 0.0484 (-8=>2.315)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.31, losses: 0, 0.777, 0.0796, 0.667, 0.0494, 0.688, 0.0484 (-2=>2.281)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.31, losses: 0, 0.773, 0.079, 0.667, 0.0493, 0.69, 0.0493 (-4=>2.264)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.29, losses: 0, 0.763, 0.0803, 0.659, 0.0501, 0.683, 0.0499 (-8=>2.248)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.23, losses: 0, 0.738, 0.0787, 0.652, 0.0488, 0.663, 0.0489 (-0=>2.229)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.25, losses: 0, 0.753, 0.0792, 0.652, 0.0501, 0.671, 0.05 (-10=>2.229)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.27, losses: 0, 0.761, 0.0793, 0.654, 0.0494, 0.678, 0.0492 (-6=>2.221)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.26, losses: 0, 0.76, 0.0795, 0.655, 0.0489, 0.67, 0.049 (-16=>2.221)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.3, losses: 0, 0.774, 0.0778, 0.665, 0.0488, 0.687, 0.0486 (-8=>2.199)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.28, losses: 0, 0.763, 0.0793, 0.655, 0.0508, 0.68, 0.0504 (-18=>2.199)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.19, losses: 0, 0.721, 0.0778, 0.642, 0.05, 0.649, 0.0512 (-4=>2.189)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.23, losses: 0, 0.748, 0.0805, 0.643, 0.0506, 0.661, 0.0504 (-14=>2.189)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.22, losses: 0, 0.743, 0.0792, 0.644, 0.0501, 0.656, 0.0502 (-8=>2.176)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.17, losses: 0, 0.717, 0.078, 0.632, 0.0524, 0.637, 0.0522 (-0=>2.168)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.23, losses: 0, 0.742, 0.0798, 0.646, 0.0505, 0.664, 0.0506 (-10=>2.168)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.21, losses: 0, 0.74, 0.0779, 0.643, 0.0493, 0.653, 0.0507 (-4=>2.146)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.24, losses: 0, 0.747, 0.0793, 0.652, 0.0497, 0.665, 0.0501 (-4=>2.141)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.19, losses: 0, 0.732, 0.0775, 0.64, 0.0494, 0.642, 0.0506 (-14=>2.141)
0it [00:00, ?it/s]
Dropping learning rate
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.27, losses: 0, 0.761, 0.0801, 0.661, 0.0499, 0.67, 0.0507 (-4=>2.162)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.24, losses: 0, 0.747, 0.079, 0.647, 0.0503, 0.665, 0.0506 (-2=>2.146)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.16, losses: 0, 0.707, 0.0794, 0.634, 0.0515, 0.634, 0.0523 (-2=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.2, losses: 0, 0.73, 0.0807, 0.641, 0.0518, 0.647, 0.0515 (-12=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.21, losses: 0, 0.73, 0.079, 0.645, 0.0495, 0.653, 0.051 (-22=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.15, losses: 0, 0.71, 0.0778, 0.631, 0.0496, 0.626, 0.0539 (-32=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.14, losses: 0, 0.696, 0.0792, 0.629, 0.0515, 0.631, 0.0521 (-0=>2.138)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-10=>2.138)
0it [00:00, ?it/s]
0it [00:00, ?it/s]
This example was created by a different version, dribnet/pixray-tiler:03b32f4a.
This model costs approximately $0.14 to run on Replicate, or 7 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia T4 GPU hardware. Predictions typically complete within 11 minutes. The predict time for this model varies significantly based on the inputs.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
---> BasePixrayPredictor Predict
Using seed:
9379428758220384575
Running pixeldrawer with 64x64 grid
All CLIP models already loaded:
['RN50', 'ViT-B/32', 'ViT-B/16']
--> RN50 normal encoding colorful marble texture #pixelart
--> ViT-B/32 normal encoding colorful marble texture #pixelart
--> ViT-B/16 normal encoding colorful marble texture #pixelart
Using device:
cuda:0
Optimising using:
Adam
Using text prompts:
['colorful marble texture #pixelart']
0it [00:00, ?it/s]
iter: 0, loss: 2.71, losses: 0, 0.903, 0.0783, 0.808, 0.0476, 0.828, 0.0496 (-0=>2.715)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 10, loss: 2.53, losses: 0, 0.863, 0.0774, 0.739, 0.0482, 0.759, 0.0484 (-0=>2.535)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 20, loss: 2.44, losses: 0, 0.841, 0.0782, 0.7, 0.05, 0.723, 0.0483 (-0=>2.441)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 30, loss: 2.35, losses: 0, 0.803, 0.0771, 0.682, 0.0489, 0.69, 0.048 (-0=>2.349)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 40, loss: 2.37, losses: 0, 0.807, 0.0786, 0.683, 0.0485, 0.704, 0.0478 (-2=>2.322)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 50, loss: 2.34, losses: 0, 0.794, 0.0789, 0.674, 0.0495, 0.699, 0.0484 (-8=>2.315)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 60, loss: 2.31, losses: 0, 0.777, 0.0796, 0.667, 0.0494, 0.688, 0.0484 (-2=>2.281)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 70, loss: 2.31, losses: 0, 0.773, 0.079, 0.667, 0.0493, 0.69, 0.0493 (-4=>2.264)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 80, loss: 2.29, losses: 0, 0.763, 0.0803, 0.659, 0.0501, 0.683, 0.0499 (-8=>2.248)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 90, loss: 2.23, losses: 0, 0.738, 0.0787, 0.652, 0.0488, 0.663, 0.0489 (-0=>2.229)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 100, loss: 2.25, losses: 0, 0.753, 0.0792, 0.652, 0.0501, 0.671, 0.05 (-10=>2.229)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 110, loss: 2.27, losses: 0, 0.761, 0.0793, 0.654, 0.0494, 0.678, 0.0492 (-6=>2.221)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 120, loss: 2.26, losses: 0, 0.76, 0.0795, 0.655, 0.0489, 0.67, 0.049 (-16=>2.221)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 130, loss: 2.3, losses: 0, 0.774, 0.0778, 0.665, 0.0488, 0.687, 0.0486 (-8=>2.199)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 140, loss: 2.28, losses: 0, 0.763, 0.0793, 0.655, 0.0508, 0.68, 0.0504 (-18=>2.199)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 150, loss: 2.19, losses: 0, 0.721, 0.0778, 0.642, 0.05, 0.649, 0.0512 (-4=>2.189)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 160, loss: 2.23, losses: 0, 0.748, 0.0805, 0.643, 0.0506, 0.661, 0.0504 (-14=>2.189)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 170, loss: 2.22, losses: 0, 0.743, 0.0792, 0.644, 0.0501, 0.656, 0.0502 (-8=>2.176)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 180, loss: 2.17, losses: 0, 0.717, 0.078, 0.632, 0.0524, 0.637, 0.0522 (-0=>2.168)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 190, loss: 2.23, losses: 0, 0.742, 0.0798, 0.646, 0.0505, 0.664, 0.0506 (-10=>2.168)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 200, loss: 2.21, losses: 0, 0.74, 0.0779, 0.643, 0.0493, 0.653, 0.0507 (-4=>2.146)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 210, loss: 2.24, losses: 0, 0.747, 0.0793, 0.652, 0.0497, 0.665, 0.0501 (-4=>2.141)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 220, loss: 2.19, losses: 0, 0.732, 0.0775, 0.64, 0.0494, 0.642, 0.0506 (-14=>2.141)
0it [00:00, ?it/s]
Dropping learning rate
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 230, loss: 2.27, losses: 0, 0.761, 0.0801, 0.661, 0.0499, 0.67, 0.0507 (-4=>2.162)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 240, loss: 2.24, losses: 0, 0.747, 0.079, 0.647, 0.0503, 0.665, 0.0506 (-2=>2.146)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 250, loss: 2.16, losses: 0, 0.707, 0.0794, 0.634, 0.0515, 0.634, 0.0523 (-2=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 260, loss: 2.2, losses: 0, 0.73, 0.0807, 0.641, 0.0518, 0.647, 0.0515 (-12=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 270, loss: 2.21, losses: 0, 0.73, 0.079, 0.645, 0.0495, 0.653, 0.051 (-22=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 280, loss: 2.15, losses: 0, 0.71, 0.0778, 0.631, 0.0496, 0.626, 0.0539 (-32=>2.142)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 290, loss: 2.14, losses: 0, 0.696, 0.0792, 0.629, 0.0515, 0.631, 0.0521 (-0=>2.138)
0it [00:00, ?it/s]
0it [00:15, ?it/s]
0it [00:00, ?it/s]
iter: 300, finished (-10=>2.138)
0it [00:00, ?it/s]
0it [00:00, ?it/s]