dribnet/pixray-text2pixel-0x42

Uses pixray to generate an image from text prompt

Image generation with CLIP + VQGAN / PixelDraw



Uses pixray to generate an image from text prompt

Turn any description into pixel art

Pixray with custom settings


Uses pixray with raw settings.

Turn any description into wallpaper tiles



A pixray tool for 24x24 pixelart

Homage to the Pixel: text prompt to 6 color squares
Prediction
dribnet/pixray-text2pixel-0x42:d22600e086598aacf688b94b15eae093985e77352b0e4bb9ecf31b8214b9c7ccInput
- aspect
- widescreen
- prompts
- No bots. No Spam. Be Kind. ❤️
- quality
- better
{ "aspect": "widescreen", "prompts": "No bots. No Spam. Be Kind. ❤️", "quality": "better" }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run dribnet/pixray-text2pixel-0x42 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "dribnet/pixray-text2pixel-0x42:d22600e086598aacf688b94b15eae093985e77352b0e4bb9ecf31b8214b9c7cc", { input: { aspect: "widescreen", prompts: "No bots. No Spam. Be Kind. ❤️", quality: "better" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run dribnet/pixray-text2pixel-0x42 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "dribnet/pixray-text2pixel-0x42:d22600e086598aacf688b94b15eae093985e77352b0e4bb9ecf31b8214b9c7cc", input={ "aspect": "widescreen", "prompts": "No bots. No Spam. Be Kind. ❤️", "quality": "better" } ) # The dribnet/pixray-text2pixel-0x42 model can stream output as it's running. # The predict method returns an iterator, and you can iterate over that output. for item in output: # https://replicate.com/dribnet/pixray-text2pixel-0x42/api#output-schema print(item)
To learn more, take a look at the guide on getting started with Python.
Run dribnet/pixray-text2pixel-0x42 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "dribnet/pixray-text2pixel-0x42:d22600e086598aacf688b94b15eae093985e77352b0e4bb9ecf31b8214b9c7cc", "input": { "aspect": "widescreen", "prompts": "No bots. No Spam. Be Kind. ❤️", "quality": "better" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Output
{ "completed_at": "2021-12-09T23:16:23.390059Z", "created_at": "2021-12-09T23:02:53.066894Z", "data_removed": false, "error": null, "id": "an6xkrotebdlbjvpudtbpvgn44", "input": { "aspect": "widescreen", "prompts": "No bots. No Spam. Be Kind. ❤️", "quality": "better" }, "logs": "---> BasePixrayPredictor Predict\nUsing seed:\n17099965292127082386\nreusing cached copy of model\nmodels/vqgan_imagenet_f16_16384.ckpt\nUsing device:\ncuda:0\nOptimising using:\nAdam\nUsing text prompts:\n['No bots. No Spam. Be Kind. ❤️']\n\n0it [00:00, ?it/s]\niter: 0, loss: 3.03, losses: 1.01, 0.0759, 0.922, 0.0467, 0.927, 0.0476 (-0=>3.031)\n\n0it [00:00, ?it/s]\n/root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n warnings.warn(\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 10, loss: 2.9, losses: 0.987, 0.0776, 0.853, 0.0491, 0.882, 0.0475 (-0=>2.897)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 20, loss: 2.83, losses: 0.95, 0.0811, 0.836, 0.0506, 0.866, 0.0474 (-0=>2.831)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 30, loss: 2.8, losses: 0.933, 0.0829, 0.829, 0.0524, 0.858, 0.047 (-0=>2.802)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 40, loss: 2.82, losses: 0.946, 0.0821, 0.835, 0.0502, 0.858, 0.0466 (-6=>2.797)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 50, loss: 2.81, losses: 0.946, 0.081, 0.832, 0.0503, 0.85, 0.0476 (-4=>2.774)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 60, loss: 2.8, losses: 0.942, 0.0829, 0.828, 0.0508, 0.848, 0.047 (-6=>2.766)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 70, loss: 2.74, losses: 0.924, 0.0836, 0.811, 0.0536, 0.823, 0.0495 (-0=>2.744)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 80, loss: 2.73, losses: 0.92, 0.0824, 0.806, 0.053, 0.823, 0.0484 (-0=>2.733)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 90, loss: 2.79, losses: 0.941, 0.0845, 0.827, 0.0501, 0.836, 0.0485 (-6=>2.712)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 100, loss: 2.75, losses: 0.934, 0.0837, 0.811, 0.0534, 0.822, 0.0502 (-8=>2.71)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 110, loss: 2.75, losses: 0.939, 0.0839, 0.814, 0.0523, 0.811, 0.0523 (-1=>2.69)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 120, loss: 2.73, losses: 0.932, 0.0875, 0.805, 0.054, 0.803, 0.0529 (-4=>2.677)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 130, loss: 2.66, losses: 0.906, 0.0857, 0.773, 0.0582, 0.779, 0.0553 (-0=>2.657)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 140, loss: 2.64, losses: 0.901, 0.0876, 0.77, 0.0575, 0.769, 0.0555 (-0=>2.64)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 150, loss: 2.62, losses: 0.888, 0.0889, 0.764, 0.0594, 0.765, 0.0577 (-0=>2.623)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 160, loss: 2.7, losses: 0.924, 0.0864, 0.786, 0.0551, 0.793, 0.054 (-10=>2.623)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 170, loss: 2.62, losses: 0.889, 0.0884, 0.764, 0.0586, 0.766, 0.0565 (-0=>2.623)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 180, loss: 2.62, losses: 0.887, 0.0894, 0.758, 0.0599, 0.772, 0.0571 (-10=>2.623)\n\n0it [00:00, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 190, loss: 2.69, losses: 0.914, 0.0882, 0.784, 0.0573, 0.787, 0.0549 (-20=>2.623)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 200, loss: 2.69, losses: 0.91, 0.0882, 0.787, 0.0567, 0.788, 0.0562 (-30=>2.623)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 210, loss: 2.69, losses: 0.92, 0.0874, 0.786, 0.0578, 0.786, 0.0567 (-8=>2.611)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 220, loss: 2.7, losses: 0.926, 0.086, 0.793, 0.055, 0.792, 0.053 (-18=>2.611)\n\n0it [00:01, ?it/s]\nDropping learning rate\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 230, loss: 2.69, losses: 0.913, 0.0869, 0.79, 0.0564, 0.788, 0.0572 (-3=>2.608)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 240, loss: 2.68, losses: 0.912, 0.0865, 0.786, 0.0566, 0.781, 0.056 (-2=>2.6)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 250, loss: 2.7, losses: 0.915, 0.0866, 0.791, 0.056, 0.791, 0.0559 (-2=>2.589)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 260, loss: 2.69, losses: 0.916, 0.0875, 0.791, 0.0558, 0.789, 0.0553 (-4=>2.584)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 270, loss: 2.66, losses: 0.903, 0.0867, 0.776, 0.0581, 0.775, 0.0576 (-14=>2.584)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 280, loss: 2.64, losses: 0.894, 0.0885, 0.771, 0.0604, 0.765, 0.0585 (-24=>2.584)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 290, loss: 2.67, losses: 0.907, 0.0886, 0.782, 0.0586, 0.777, 0.0581 (-34=>2.584)\n\n0it [00:01, ?it/s]\n\n0it [00:11, ?it/s]\n\n0it [00:00, ?it/s]\niter: 300, finished (-44=>2.584)\n\n0it [00:00, ?it/s]\n\n0it [00:00, ?it/s]", "metrics": { "predict_time": 354.132628, "total_time": 810.323165 }, "output": [ { "file": "https://replicate.delivery/mgxm/c8a14106-b2b5-4cea-a544-1ee981906d53/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d8abc932-9a15-463b-adba-f360957cbf3c/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/45d81a3a-7d92-4854-ad50-dfb722ca2f8c/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/1a24659d-673c-403e-898a-e890301936d9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2aed8101-92ac-47cf-a163-fd575f363c9a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/eaafadf5-8f66-48da-8e1a-bc2e095ac30f/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8ba75fdc-27ed-46cb-a8ad-ce05aef457cc/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/24a26c7a-76f7-49d6-8e49-cf16b37a2716/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/cfb23953-db88-4a5b-86f9-de09b4c522aa/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/22628133-060d-47c7-ba8d-ad3955291d27/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2788d274-4191-453f-a8d8-b00962a77dd2/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/0276a98b-8b0e-46b1-b4bb-99d54fcbc391/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/6d3094a7-3cb6-408b-934c-36eaa484bd9d/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b3b8229b-67f6-44cd-8a3c-d05da2e4a34e/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2aa6c916-d6d0-4e28-845a-82af9e40ac6a/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e683c5b1-09cd-4e73-9494-e0d9a678bbb9/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/8c695985-f61f-4619-bb46-39c34d0ad3ea/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/647210a9-8fbf-4d85-a812-05609e1e49db/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/d1c6179f-458a-4ba0-bc44-d9569411d1fd/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/e52fd62c-26a1-4841-9ec1-af3e0a64874b/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/00c8d6e7-bef0-444f-9cd3-9baa9836f6b0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/eb88e34f-0cc7-4bb1-946b-24f5fd6a8850/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/87f421b8-4a60-4fd7-8851-ecfad32d80f5/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/18a8a02d-f0d3-425e-9638-90e88a01d1f3/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/2fd28ba8-68e7-4003-99c7-4083448bf864/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/b97b9d26-4d91-4f09-bfb5-6133b6b58432/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/6b4dfb49-f851-45e6-8c09-83e62782efb0/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/4e35db82-f63a-4ab7-9302-47b9a43110b1/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/60e2c4bb-e8e0-40ee-be60-b018d4f466f8/tempfile.png" }, { "file": "https://replicate.delivery/mgxm/3f86ab3b-225c-4056-b39e-50c26ee85a71/tempfile.png" } ], "started_at": "2021-12-09T23:10:29.257431Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/an6xkrotebdlbjvpudtbpvgn44", "cancel": "https://api.replicate.com/v1/predictions/an6xkrotebdlbjvpudtbpvgn44/cancel" }, "version": "d22600e086598aacf688b94b15eae093985e77352b0e4bb9ecf31b8214b9c7cc" }
Generated in---> BasePixrayPredictor Predict Using seed: 17099965292127082386 reusing cached copy of model models/vqgan_imagenet_f16_16384.ckpt Using device: cuda:0 Optimising using: Adam Using text prompts: ['No bots. No Spam. Be Kind. ❤️'] 0it [00:00, ?it/s] iter: 0, loss: 3.03, losses: 1.01, 0.0759, 0.922, 0.0467, 0.927, 0.0476 (-0=>3.031) 0it [00:00, ?it/s] /root/.pyenv/versions/3.8.12/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details. warnings.warn( 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 10, loss: 2.9, losses: 0.987, 0.0776, 0.853, 0.0491, 0.882, 0.0475 (-0=>2.897) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 20, loss: 2.83, losses: 0.95, 0.0811, 0.836, 0.0506, 0.866, 0.0474 (-0=>2.831) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 30, loss: 2.8, losses: 0.933, 0.0829, 0.829, 0.0524, 0.858, 0.047 (-0=>2.802) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 40, loss: 2.82, losses: 0.946, 0.0821, 0.835, 0.0502, 0.858, 0.0466 (-6=>2.797) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 50, loss: 2.81, losses: 0.946, 0.081, 0.832, 0.0503, 0.85, 0.0476 (-4=>2.774) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 60, loss: 2.8, losses: 0.942, 0.0829, 0.828, 0.0508, 0.848, 0.047 (-6=>2.766) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 70, loss: 2.74, losses: 0.924, 0.0836, 0.811, 0.0536, 0.823, 0.0495 (-0=>2.744) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 80, loss: 2.73, losses: 0.92, 0.0824, 0.806, 0.053, 0.823, 0.0484 (-0=>2.733) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 90, loss: 2.79, losses: 0.941, 0.0845, 0.827, 0.0501, 0.836, 0.0485 (-6=>2.712) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 100, loss: 2.75, losses: 0.934, 0.0837, 0.811, 0.0534, 0.822, 0.0502 (-8=>2.71) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 110, loss: 2.75, losses: 0.939, 0.0839, 0.814, 0.0523, 0.811, 0.0523 (-1=>2.69) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 120, loss: 2.73, losses: 0.932, 0.0875, 0.805, 0.054, 0.803, 0.0529 (-4=>2.677) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 130, loss: 2.66, losses: 0.906, 0.0857, 0.773, 0.0582, 0.779, 0.0553 (-0=>2.657) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 140, loss: 2.64, losses: 0.901, 0.0876, 0.77, 0.0575, 0.769, 0.0555 (-0=>2.64) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 150, loss: 2.62, losses: 0.888, 0.0889, 0.764, 0.0594, 0.765, 0.0577 (-0=>2.623) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 160, loss: 2.7, losses: 0.924, 0.0864, 0.786, 0.0551, 0.793, 0.054 (-10=>2.623) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 170, loss: 2.62, losses: 0.889, 0.0884, 0.764, 0.0586, 0.766, 0.0565 (-0=>2.623) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 180, loss: 2.62, losses: 0.887, 0.0894, 0.758, 0.0599, 0.772, 0.0571 (-10=>2.623) 0it [00:00, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 190, loss: 2.69, losses: 0.914, 0.0882, 0.784, 0.0573, 0.787, 0.0549 (-20=>2.623) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 200, loss: 2.69, losses: 0.91, 0.0882, 0.787, 0.0567, 0.788, 0.0562 (-30=>2.623) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 210, loss: 2.69, losses: 0.92, 0.0874, 0.786, 0.0578, 0.786, 0.0567 (-8=>2.611) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 220, loss: 2.7, losses: 0.926, 0.086, 0.793, 0.055, 0.792, 0.053 (-18=>2.611) 0it [00:01, ?it/s] Dropping learning rate 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 230, loss: 2.69, losses: 0.913, 0.0869, 0.79, 0.0564, 0.788, 0.0572 (-3=>2.608) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 240, loss: 2.68, losses: 0.912, 0.0865, 0.786, 0.0566, 0.781, 0.056 (-2=>2.6) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 250, loss: 2.7, losses: 0.915, 0.0866, 0.791, 0.056, 0.791, 0.0559 (-2=>2.589) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 260, loss: 2.69, losses: 0.916, 0.0875, 0.791, 0.0558, 0.789, 0.0553 (-4=>2.584) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 270, loss: 2.66, losses: 0.903, 0.0867, 0.776, 0.0581, 0.775, 0.0576 (-14=>2.584) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 280, loss: 2.64, losses: 0.894, 0.0885, 0.771, 0.0604, 0.765, 0.0585 (-24=>2.584) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 290, loss: 2.67, losses: 0.907, 0.0886, 0.782, 0.0586, 0.777, 0.0581 (-34=>2.584) 0it [00:01, ?it/s] 0it [00:11, ?it/s] 0it [00:00, ?it/s] iter: 300, finished (-44=>2.584) 0it [00:00, ?it/s] 0it [00:00, ?it/s]
Want to make some of these yourself?
Run this model