stability-ai
/
stable-diffusion
A latent text-to-image diffusion model capable of generating photo-realistic images given any text input
Prediction
stability-ai/stable-diffusion:ac732df8Input
- prompt
- multicolor hyperspace
- num_outputs
- "1"
- guidance_scale
- 7.5
- num_inference_steps
- 50
{ "prompt": "multicolor hyperspace", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { prompt: "multicolor hyperspace", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "prompt": "multicolor hyperspace", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "prompt": "multicolor hyperspace", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'prompt="multicolor hyperspace"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "prompt": "multicolor hyperspace", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-22T22:48:08Z", "created_at": "2022-08-22T22:47:56.927485Z", "data_removed": false, "error": "", "id": "qffyxjvmbvfdbao7vvv2oss2gq", "input": { "prompt": "multicolor hyperspace", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 }, "logs": "Using seed: 1841\r\n\r\n0it [00:00, ?it/s]\r\n2it [00:00, 10.34it/s]\r\n4it [00:00, 9.29it/s]\r\n6it [00:00, 9.19it/s]\r\n7it [00:00, 9.12it/s]\r\n8it [00:00, 9.29it/s]\r\n9it [00:00, 9.25it/s]\r\n10it [00:01, 9.12it/s]\r\n11it [00:01, 9.22it/s]\r\n12it [00:01, 9.17it/s]\r\n13it [00:01, 9.27it/s]\r\n14it [00:01, 9.35it/s]\r\n15it [00:01, 9.18it/s]\r\n16it [00:01, 9.23it/s]\r\n17it [00:01, 8.99it/s]\r\n19it [00:02, 9.45it/s]\r\n20it [00:02, 9.16it/s]\r\n21it [00:02, 9.19it/s]\r\n23it [00:02, 9.27it/s]\r\n24it [00:02, 9.23it/s]\r\n25it [00:02, 9.34it/s]\r\n26it [00:02, 9.29it/s]\r\n27it [00:02, 9.29it/s]\r\n28it [00:03, 9.20it/s]\r\n29it [00:03, 9.15it/s]\r\n30it [00:03, 9.23it/s]\r\n32it [00:03, 9.36it/s]\r\n33it [00:03, 9.26it/s]\r\n34it [00:03, 9.19it/s]\r\n35it [00:03, 9.24it/s]\r\n36it [00:03, 9.30it/s]\r\n37it [00:04, 9.13it/s]\r\n38it [00:04, 9.26it/s]\r\n39it [00:04, 9.22it/s]\r\n40it [00:04, 9.10it/s]\r\n41it [00:04, 9.06it/s]\r\n42it [00:04, 9.13it/s]\r\n43it [00:04, 9.24it/s]\r\n44it [00:04, 9.41it/s]\r\n45it [00:04, 9.31it/s]\r\n46it [00:04, 9.21it/s]\r\n47it [00:05, 8.73it/s]\r\n48it [00:05, 9.01it/s]\r\n49it [00:05, 9.25it/s]\r\n50it [00:05, 8.80it/s]\r\n51it [00:05, 8.90it/s]\r\n52it [00:05, 9.15it/s]\r\n53it [00:05, 8.95it/s]\r\n54it [00:05, 9.10it/s]\r\n55it [00:05, 9.17it/s]\r\n57it [00:06, 9.49it/s]\r\n58it [00:06, 9.48it/s]\r\n59it [00:06, 9.24it/s]\r\n60it [00:06, 9.33it/s]\r\n61it [00:06, 9.20it/s]\r\n62it [00:06, 9.26it/s]\r\n63it [00:06, 9.22it/s]\r\n64it [00:06, 9.26it/s]\r\n65it [00:07, 9.07it/s]\r\n67it [00:07, 9.31it/s]\r\n68it [00:07, 9.21it/s]\r\n69it [00:07, 9.30it/s]\r\n70it [00:07, 9.24it/s]\r\n71it [00:07, 9.21it/s]\r\n72it [00:07, 9.20it/s]\r\n73it [00:07, 9.21it/s]\r\n74it [00:08, 9.21it/s]\r\n75it [00:08, 9.24it/s]\r\n76it [00:08, 9.09it/s]\r\n78it [00:08, 9.14it/s]\r\n79it [00:08, 9.27it/s]\r\n80it [00:08, 9.17it/s]\r\n82it [00:08, 9.38it/s]\r\n83it [00:09, 9.15it/s]\r\n84it [00:09, 9.28it/s]\r\n85it [00:09, 9.22it/s]\r\n86it [00:09, 9.15it/s]\r\n87it [00:09, 9.37it/s]\r\n88it [00:09, 9.17it/s]\r\n89it [00:09, 9.28it/s]\r\n90it [00:09, 9.24it/s]\r\n91it [00:09, 9.21it/s]\r\n92it [00:09, 9.04it/s]\r\n93it [00:10, 9.28it/s]\r\n94it [00:10, 9.34it/s]\r\n95it [00:10, 9.25it/s]\r\n96it [00:10, 9.18it/s]\r\n97it [00:10, 9.13it/s]\r\n98it [00:10, 9.12it/s]\r\n99it [00:10, 9.04it/s]\r\n100it [00:10, 9.21it/s]\r\n100it [00:10, 9.22it/s]", "metrics": { "predict_time": 11, "total_time": 11.072515 }, "output": [ "https://replicate.delivery/mgxm/980bca24-f8d8-41bb-9f52-fe84bca9c251/out-0.png" ], "started_at": "2022-08-22T22:47:57Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/qffyxjvmbvfdbao7vvv2oss2gq", "cancel": "https://api.replicate.com/v1/predictions/qffyxjvmbvfdbao7vvv2oss2gq/cancel" }, "version": "d70beb400d223e6432425a5299910329c6050c6abcf97b8c70537d6a1fcb269a" }
Generated inUsing seed: 1841 0it [00:00, ?it/s] 2it [00:00, 10.34it/s] 4it [00:00, 9.29it/s] 6it [00:00, 9.19it/s] 7it [00:00, 9.12it/s] 8it [00:00, 9.29it/s] 9it [00:00, 9.25it/s] 10it [00:01, 9.12it/s] 11it [00:01, 9.22it/s] 12it [00:01, 9.17it/s] 13it [00:01, 9.27it/s] 14it [00:01, 9.35it/s] 15it [00:01, 9.18it/s] 16it [00:01, 9.23it/s] 17it [00:01, 8.99it/s] 19it [00:02, 9.45it/s] 20it [00:02, 9.16it/s] 21it [00:02, 9.19it/s] 23it [00:02, 9.27it/s] 24it [00:02, 9.23it/s] 25it [00:02, 9.34it/s] 26it [00:02, 9.29it/s] 27it [00:02, 9.29it/s] 28it [00:03, 9.20it/s] 29it [00:03, 9.15it/s] 30it [00:03, 9.23it/s] 32it [00:03, 9.36it/s] 33it [00:03, 9.26it/s] 34it [00:03, 9.19it/s] 35it [00:03, 9.24it/s] 36it [00:03, 9.30it/s] 37it [00:04, 9.13it/s] 38it [00:04, 9.26it/s] 39it [00:04, 9.22it/s] 40it [00:04, 9.10it/s] 41it [00:04, 9.06it/s] 42it [00:04, 9.13it/s] 43it [00:04, 9.24it/s] 44it [00:04, 9.41it/s] 45it [00:04, 9.31it/s] 46it [00:04, 9.21it/s] 47it [00:05, 8.73it/s] 48it [00:05, 9.01it/s] 49it [00:05, 9.25it/s] 50it [00:05, 8.80it/s] 51it [00:05, 8.90it/s] 52it [00:05, 9.15it/s] 53it [00:05, 8.95it/s] 54it [00:05, 9.10it/s] 55it [00:05, 9.17it/s] 57it [00:06, 9.49it/s] 58it [00:06, 9.48it/s] 59it [00:06, 9.24it/s] 60it [00:06, 9.33it/s] 61it [00:06, 9.20it/s] 62it [00:06, 9.26it/s] 63it [00:06, 9.22it/s] 64it [00:06, 9.26it/s] 65it [00:07, 9.07it/s] 67it [00:07, 9.31it/s] 68it [00:07, 9.21it/s] 69it [00:07, 9.30it/s] 70it [00:07, 9.24it/s] 71it [00:07, 9.21it/s] 72it [00:07, 9.20it/s] 73it [00:07, 9.21it/s] 74it [00:08, 9.21it/s] 75it [00:08, 9.24it/s] 76it [00:08, 9.09it/s] 78it [00:08, 9.14it/s] 79it [00:08, 9.27it/s] 80it [00:08, 9.17it/s] 82it [00:08, 9.38it/s] 83it [00:09, 9.15it/s] 84it [00:09, 9.28it/s] 85it [00:09, 9.22it/s] 86it [00:09, 9.15it/s] 87it [00:09, 9.37it/s] 88it [00:09, 9.17it/s] 89it [00:09, 9.28it/s] 90it [00:09, 9.24it/s] 91it [00:09, 9.21it/s] 92it [00:09, 9.04it/s] 93it [00:10, 9.28it/s] 94it [00:10, 9.34it/s] 95it [00:10, 9.25it/s] 96it [00:10, 9.18it/s] 97it [00:10, 9.13it/s] 98it [00:10, 9.12it/s] 99it [00:10, 9.04it/s] 100it [00:10, 9.21it/s] 100it [00:10, 9.22it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- prompt
- a gentleman otter in a 19th century portrait
- num_outputs
- 1
- guidance_scale
- 7.5
- num_inference_steps
- 100
{ "prompt": "a gentleman otter in a 19th century portrait", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 100 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { prompt: "a gentleman otter in a 19th century portrait", num_outputs: 1, guidance_scale: 7.5, num_inference_steps: 100 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "prompt": "a gentleman otter in a 19th century portrait", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 100 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "prompt": "a gentleman otter in a 19th century portrait", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 100 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'prompt="a gentleman otter in a 19th century portrait"' \ -i 'num_outputs=1' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=100'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "prompt": "a gentleman otter in a 19th century portrait", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 100 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-22T22:53:49.771482Z", "created_at": "2022-08-22T22:53:37.724733Z", "data_removed": false, "error": null, "id": "rktab6ur7ve3dfuvh7efbo3j54", "input": { "prompt": "a gentleman otter in a 19th century portrait", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 100 }, "logs": "Using seed: 45586\n\n0it [00:00, ?it/s]\n2it [00:00, 9.55it/s]\n3it [00:00, 8.54it/s]\n5it [00:00, 9.26it/s]\n6it [00:00, 9.27it/s]\n7it [00:00, 9.27it/s]\n8it [00:00, 9.26it/s]\n9it [00:00, 9.23it/s]\n10it [00:01, 9.12it/s]\n11it [00:01, 9.32it/s]\n12it [00:01, 9.31it/s]\n13it [00:01, 9.32it/s]\n14it [00:01, 9.30it/s]\n15it [00:01, 9.13it/s]\n16it [00:01, 9.24it/s]\n17it [00:01, 9.34it/s]\n18it [00:01, 9.29it/s]\n19it [00:02, 9.30it/s]\n20it [00:02, 9.23it/s]\n21it [00:02, 9.26it/s]\n22it [00:02, 9.29it/s]\n23it [00:02, 9.28it/s]\n24it [00:02, 9.13it/s]\n25it [00:02, 9.33it/s]\n26it [00:02, 9.30it/s]\n27it [00:02, 9.32it/s]\n28it [00:03, 9.29it/s]\n29it [00:03, 9.19it/s]\n30it [00:03, 9.27it/s]\n31it [00:03, 9.30it/s]\n32it [00:03, 9.31it/s]\n33it [00:03, 9.20it/s]\n34it [00:03, 9.33it/s]\n35it [00:03, 9.25it/s]\n36it [00:03, 9.34it/s]\n37it [00:03, 9.30it/s]\n38it [00:04, 9.16it/s]\n39it [00:04, 9.31it/s]\n40it [00:04, 9.32it/s]\n41it [00:04, 9.29it/s]\n42it [00:04, 9.32it/s]\n43it [00:04, 9.12it/s]\n44it [00:04, 9.31it/s]\n45it [00:04, 9.35it/s]\n46it [00:04, 9.33it/s]\n47it [00:05, 9.29it/s]\n48it [00:05, 9.33it/s]\n49it [00:05, 9.33it/s]\n50it [00:05, 9.26it/s]\n51it [00:05, 9.27it/s]\n52it [00:05, 9.19it/s]\n53it [00:05, 9.30it/s]\n54it [00:05, 9.20it/s]\n55it [00:05, 9.27it/s]\n56it [00:06, 9.31it/s]\n57it [00:06, 9.30it/s]\n58it [00:06, 9.23it/s]\n59it [00:06, 9.30it/s]\n60it [00:06, 9.28it/s]\n61it [00:06, 9.26it/s]\n62it [00:06, 9.34it/s]\n63it [00:06, 9.13it/s]\n64it [00:06, 9.36it/s]\n65it [00:07, 9.33it/s]\n66it [00:07, 9.32it/s]\n67it [00:07, 9.31it/s]\n68it [00:07, 9.29it/s]\n69it [00:07, 9.32it/s]\n70it [00:07, 9.22it/s]\n71it [00:07, 9.30it/s]\n72it [00:07, 9.31it/s]\n73it [00:07, 9.30it/s]\n74it [00:07, 9.31it/s]\n75it [00:08, 9.31it/s]\n76it [00:08, 9.29it/s]\n77it [00:08, 9.20it/s]\n78it [00:08, 9.30it/s]\n79it [00:08, 9.31it/s]\n80it [00:08, 9.18it/s]\n81it [00:08, 9.27it/s]\n82it [00:08, 9.32it/s]\n83it [00:08, 9.29it/s]\n84it [00:09, 9.24it/s]\n85it [00:09, 9.26it/s]\n86it [00:09, 9.28it/s]\n87it [00:09, 9.26it/s]\n88it [00:09, 9.28it/s]\n89it [00:09, 9.12it/s]\n90it [00:09, 9.32it/s]\n91it [00:09, 9.32it/s]\n92it [00:09, 8.92it/s]\n94it [00:10, 9.32it/s]\n95it [00:10, 9.37it/s]\n96it [00:10, 9.33it/s]\n97it [00:10, 9.34it/s]\n98it [00:10, 9.33it/s]\n99it [00:10, 9.33it/s]\n100it [00:10, 9.31it/s]\n100it [00:10, 9.27it/s]", "metrics": { "predict_time": 11.81052, "total_time": 12.046749 }, "output": [ "https://replicate.delivery/mgxm/8d6a8069-b91f-4e61-8136-fa7c0775532c/out-0.png" ], "started_at": "2022-08-22T22:53:37.960962Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/rktab6ur7ve3dfuvh7efbo3j54", "cancel": "https://api.replicate.com/v1/predictions/rktab6ur7ve3dfuvh7efbo3j54/cancel" }, "version": "d70beb400d223e6432425a5299910329c6050c6abcf97b8c70537d6a1fcb269a" }
Generated inUsing seed: 45586 0it [00:00, ?it/s] 2it [00:00, 9.55it/s] 3it [00:00, 8.54it/s] 5it [00:00, 9.26it/s] 6it [00:00, 9.27it/s] 7it [00:00, 9.27it/s] 8it [00:00, 9.26it/s] 9it [00:00, 9.23it/s] 10it [00:01, 9.12it/s] 11it [00:01, 9.32it/s] 12it [00:01, 9.31it/s] 13it [00:01, 9.32it/s] 14it [00:01, 9.30it/s] 15it [00:01, 9.13it/s] 16it [00:01, 9.24it/s] 17it [00:01, 9.34it/s] 18it [00:01, 9.29it/s] 19it [00:02, 9.30it/s] 20it [00:02, 9.23it/s] 21it [00:02, 9.26it/s] 22it [00:02, 9.29it/s] 23it [00:02, 9.28it/s] 24it [00:02, 9.13it/s] 25it [00:02, 9.33it/s] 26it [00:02, 9.30it/s] 27it [00:02, 9.32it/s] 28it [00:03, 9.29it/s] 29it [00:03, 9.19it/s] 30it [00:03, 9.27it/s] 31it [00:03, 9.30it/s] 32it [00:03, 9.31it/s] 33it [00:03, 9.20it/s] 34it [00:03, 9.33it/s] 35it [00:03, 9.25it/s] 36it [00:03, 9.34it/s] 37it [00:03, 9.30it/s] 38it [00:04, 9.16it/s] 39it [00:04, 9.31it/s] 40it [00:04, 9.32it/s] 41it [00:04, 9.29it/s] 42it [00:04, 9.32it/s] 43it [00:04, 9.12it/s] 44it [00:04, 9.31it/s] 45it [00:04, 9.35it/s] 46it [00:04, 9.33it/s] 47it [00:05, 9.29it/s] 48it [00:05, 9.33it/s] 49it [00:05, 9.33it/s] 50it [00:05, 9.26it/s] 51it [00:05, 9.27it/s] 52it [00:05, 9.19it/s] 53it [00:05, 9.30it/s] 54it [00:05, 9.20it/s] 55it [00:05, 9.27it/s] 56it [00:06, 9.31it/s] 57it [00:06, 9.30it/s] 58it [00:06, 9.23it/s] 59it [00:06, 9.30it/s] 60it [00:06, 9.28it/s] 61it [00:06, 9.26it/s] 62it [00:06, 9.34it/s] 63it [00:06, 9.13it/s] 64it [00:06, 9.36it/s] 65it [00:07, 9.33it/s] 66it [00:07, 9.32it/s] 67it [00:07, 9.31it/s] 68it [00:07, 9.29it/s] 69it [00:07, 9.32it/s] 70it [00:07, 9.22it/s] 71it [00:07, 9.30it/s] 72it [00:07, 9.31it/s] 73it [00:07, 9.30it/s] 74it [00:07, 9.31it/s] 75it [00:08, 9.31it/s] 76it [00:08, 9.29it/s] 77it [00:08, 9.20it/s] 78it [00:08, 9.30it/s] 79it [00:08, 9.31it/s] 80it [00:08, 9.18it/s] 81it [00:08, 9.27it/s] 82it [00:08, 9.32it/s] 83it [00:08, 9.29it/s] 84it [00:09, 9.24it/s] 85it [00:09, 9.26it/s] 86it [00:09, 9.28it/s] 87it [00:09, 9.26it/s] 88it [00:09, 9.28it/s] 89it [00:09, 9.12it/s] 90it [00:09, 9.32it/s] 91it [00:09, 9.32it/s] 92it [00:09, 8.92it/s] 94it [00:10, 9.32it/s] 95it [00:10, 9.37it/s] 96it [00:10, 9.33it/s] 97it [00:10, 9.34it/s] 98it [00:10, 9.33it/s] 99it [00:10, 9.33it/s] 100it [00:10, 9.31it/s] 100it [00:10, 9.27it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- prompt
- pencil sketch of robots playing poker
- num_outputs
- "1"
- guidance_scale
- 7.5
- num_inference_steps
- 100
{ "prompt": "pencil sketch of robots playing poker", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 100 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { prompt: "pencil sketch of robots playing poker", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 100 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "prompt": "pencil sketch of robots playing poker", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 100 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "prompt": "pencil sketch of robots playing poker", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 100 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'prompt="pencil sketch of robots playing poker"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=100'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "prompt": "pencil sketch of robots playing poker", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 100 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-23T00:17:13.340058Z", "created_at": "2022-08-23T00:17:01.433071Z", "data_removed": false, "error": null, "id": "bhyzpqdgnbhsljpnxp45hbv5ay", "input": { "prompt": "pencil sketch of robots playing poker", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 100 }, "logs": "Using seed: 52932\n\n0it [00:00, ?it/s]\n2it [00:00, 9.74it/s]\n3it [00:00, 9.69it/s]\n4it [00:00, 9.13it/s]\n5it [00:00, 9.39it/s]\n6it [00:00, 9.45it/s]\n7it [00:00, 9.32it/s]\n8it [00:00, 8.85it/s]\n10it [00:01, 9.34it/s]\n11it [00:01, 9.37it/s]\n12it [00:01, 9.22it/s]\n13it [00:01, 9.39it/s]\n14it [00:01, 9.19it/s]\n15it [00:01, 9.37it/s]\n16it [00:01, 9.13it/s]\n18it [00:01, 9.37it/s]\n19it [00:02, 9.36it/s]\n20it [00:02, 8.97it/s]\n22it [00:02, 9.38it/s]\n23it [00:02, 9.27it/s]\n24it [00:02, 9.34it/s]\n25it [00:02, 9.14it/s]\n26it [00:02, 9.28it/s]\n27it [00:02, 9.33it/s]\n28it [00:03, 9.21it/s]\n29it [00:03, 9.13it/s]\n31it [00:03, 9.32it/s]\n32it [00:03, 9.32it/s]\n33it [00:03, 9.10it/s]\n34it [00:03, 9.24it/s]\n35it [00:03, 9.36it/s]\n36it [00:03, 9.33it/s]\n37it [00:03, 9.12it/s]\n38it [00:04, 9.35it/s]\n39it [00:04, 9.33it/s]\n40it [00:04, 9.22it/s]\n41it [00:04, 9.07it/s]\n42it [00:04, 9.30it/s]\n43it [00:04, 9.26it/s]\n44it [00:04, 9.36it/s]\n45it [00:04, 9.31it/s]\n46it [00:04, 9.08it/s]\n47it [00:05, 9.33it/s]\n48it [00:05, 9.33it/s]\n49it [00:05, 9.32it/s]\n50it [00:05, 9.28it/s]\n51it [00:05, 9.27it/s]\n52it [00:05, 9.01it/s]\n54it [00:05, 9.26it/s]\n55it [00:05, 9.28it/s]\n56it [00:06, 9.20it/s]\n57it [00:06, 9.27it/s]\n58it [00:06, 9.26it/s]\n59it [00:06, 9.14it/s]\n60it [00:06, 9.36it/s]\n61it [00:06, 9.19it/s]\n62it [00:06, 9.25it/s]\n63it [00:06, 9.32it/s]\n64it [00:06, 9.31it/s]\n65it [00:07, 9.25it/s]\n66it [00:07, 9.07it/s]\n68it [00:07, 9.34it/s]\n69it [00:07, 9.28it/s]\n70it [00:07, 9.18it/s]\n71it [00:07, 9.24it/s]\n72it [00:07, 9.24it/s]\n73it [00:07, 9.29it/s]\n74it [00:07, 9.22it/s]\n75it [00:08, 9.07it/s]\n76it [00:08, 9.31it/s]\n77it [00:08, 9.33it/s]\n78it [00:08, 9.04it/s]\n79it [00:08, 9.11it/s]\n80it [00:08, 9.22it/s]\n81it [00:08, 9.32it/s]\n82it [00:08, 9.34it/s]\n83it [00:08, 9.21it/s]\n84it [00:09, 9.23it/s]\n85it [00:09, 9.29it/s]\n86it [00:09, 9.27it/s]\n87it [00:09, 9.30it/s]\n88it [00:09, 9.15it/s]\n89it [00:09, 9.36it/s]\n90it [00:09, 9.29it/s]\n91it [00:09, 9.29it/s]\n92it [00:09, 8.94it/s]\n94it [00:10, 9.35it/s]\n95it [00:10, 9.31it/s]\n96it [00:10, 9.29it/s]\n97it [00:10, 9.29it/s]\n98it [00:10, 9.01it/s]\n100it [00:10, 9.34it/s]\n100it [00:10, 9.26it/s]", "metrics": { "predict_time": 11.67104, "total_time": 11.906987 }, "output": [ "https://replicate.delivery/mgxm/891fd1f9-302e-4d1f-a462-c0c2624ffbca/out-0.png" ], "started_at": "2022-08-23T00:17:01.669018Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/bhyzpqdgnbhsljpnxp45hbv5ay", "cancel": "https://api.replicate.com/v1/predictions/bhyzpqdgnbhsljpnxp45hbv5ay/cancel" }, "version": "714ee02be40e197b6d107bcf1fa0533e876ccc727122e7dc15a62a428dcda125" }
Generated inUsing seed: 52932 0it [00:00, ?it/s] 2it [00:00, 9.74it/s] 3it [00:00, 9.69it/s] 4it [00:00, 9.13it/s] 5it [00:00, 9.39it/s] 6it [00:00, 9.45it/s] 7it [00:00, 9.32it/s] 8it [00:00, 8.85it/s] 10it [00:01, 9.34it/s] 11it [00:01, 9.37it/s] 12it [00:01, 9.22it/s] 13it [00:01, 9.39it/s] 14it [00:01, 9.19it/s] 15it [00:01, 9.37it/s] 16it [00:01, 9.13it/s] 18it [00:01, 9.37it/s] 19it [00:02, 9.36it/s] 20it [00:02, 8.97it/s] 22it [00:02, 9.38it/s] 23it [00:02, 9.27it/s] 24it [00:02, 9.34it/s] 25it [00:02, 9.14it/s] 26it [00:02, 9.28it/s] 27it [00:02, 9.33it/s] 28it [00:03, 9.21it/s] 29it [00:03, 9.13it/s] 31it [00:03, 9.32it/s] 32it [00:03, 9.32it/s] 33it [00:03, 9.10it/s] 34it [00:03, 9.24it/s] 35it [00:03, 9.36it/s] 36it [00:03, 9.33it/s] 37it [00:03, 9.12it/s] 38it [00:04, 9.35it/s] 39it [00:04, 9.33it/s] 40it [00:04, 9.22it/s] 41it [00:04, 9.07it/s] 42it [00:04, 9.30it/s] 43it [00:04, 9.26it/s] 44it [00:04, 9.36it/s] 45it [00:04, 9.31it/s] 46it [00:04, 9.08it/s] 47it [00:05, 9.33it/s] 48it [00:05, 9.33it/s] 49it [00:05, 9.32it/s] 50it [00:05, 9.28it/s] 51it [00:05, 9.27it/s] 52it [00:05, 9.01it/s] 54it [00:05, 9.26it/s] 55it [00:05, 9.28it/s] 56it [00:06, 9.20it/s] 57it [00:06, 9.27it/s] 58it [00:06, 9.26it/s] 59it [00:06, 9.14it/s] 60it [00:06, 9.36it/s] 61it [00:06, 9.19it/s] 62it [00:06, 9.25it/s] 63it [00:06, 9.32it/s] 64it [00:06, 9.31it/s] 65it [00:07, 9.25it/s] 66it [00:07, 9.07it/s] 68it [00:07, 9.34it/s] 69it [00:07, 9.28it/s] 70it [00:07, 9.18it/s] 71it [00:07, 9.24it/s] 72it [00:07, 9.24it/s] 73it [00:07, 9.29it/s] 74it [00:07, 9.22it/s] 75it [00:08, 9.07it/s] 76it [00:08, 9.31it/s] 77it [00:08, 9.33it/s] 78it [00:08, 9.04it/s] 79it [00:08, 9.11it/s] 80it [00:08, 9.22it/s] 81it [00:08, 9.32it/s] 82it [00:08, 9.34it/s] 83it [00:08, 9.21it/s] 84it [00:09, 9.23it/s] 85it [00:09, 9.29it/s] 86it [00:09, 9.27it/s] 87it [00:09, 9.30it/s] 88it [00:09, 9.15it/s] 89it [00:09, 9.36it/s] 90it [00:09, 9.29it/s] 91it [00:09, 9.29it/s] 92it [00:09, 8.94it/s] 94it [00:10, 9.35it/s] 95it [00:10, 9.31it/s] 96it [00:10, 9.29it/s] 97it [00:10, 9.29it/s] 98it [00:10, 9.01it/s] 100it [00:10, 9.34it/s] 100it [00:10, 9.26it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- prompt
- phase shift into an era of human+AI art collab
- num_outputs
- "1"
- guidance_scale
- 7.5
- num_inference_steps
- "301"
{ "prompt": "phase shift into an era of human+AI art collab", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": "301" }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { prompt: "phase shift into an era of human+AI art collab", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: "301" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "prompt": "phase shift into an era of human+AI art collab", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": "301" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "prompt": "phase shift into an era of human+AI art collab", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": "301" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'prompt="phase shift into an era of human+AI art collab"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps="301"'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "prompt": "phase shift into an era of human+AI art collab", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": "301" } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-23T00:26:28Z", "created_at": "2022-08-23T00:25:55.134698Z", "data_removed": false, "error": "", "id": "rjcl54wakbbrfcajzllmidreya", "input": { "prompt": "phase shift into an era of human+AI art collab", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": "301" }, "logs": "Using seed: 48356\r\n\r\n0it [00:00, ?it/s]\r\n2it [00:00, 9.30it/s]\r\n3it [00:00, 8.75it/s]\r\n4it [00:00, 9.08it/s]\r\n5it [00:00, 8.94it/s]\r\n6it [00:00, 8.71it/s]\r\n8it [00:00, 9.05it/s]\r\n9it [00:00, 9.06it/s]\r\n11it [00:01, 9.47it/s]\r\n12it [00:01, 9.07it/s]\r\n14it [00:01, 9.23it/s]\r\n15it [00:01, 9.13it/s]\r\n16it [00:01, 8.91it/s]\r\n18it [00:01, 9.37it/s]\r\n19it [00:02, 9.02it/s]\r\n20it [00:02, 9.01it/s]\r\n21it [00:02, 9.23it/s]\r\n22it [00:02, 8.90it/s]\r\n24it [00:02, 9.47it/s]\r\n25it [00:02, 9.42it/s]\r\n26it [00:02, 9.30it/s]\r\n27it [00:02, 9.07it/s]\r\n28it [00:03, 9.25it/s]\r\n29it [00:03, 9.30it/s]\r\n30it [00:03, 9.05it/s]\r\n31it [00:03, 9.10it/s]\r\n32it [00:03, 9.06it/s]\r\n33it [00:03, 9.31it/s]\r\n34it [00:03, 9.34it/s]\r\n35it [00:03, 9.25it/s]\r\n36it [00:03, 9.09it/s]\r\n37it [00:04, 9.16it/s]\r\n38it [00:04, 9.39it/s]\r\n39it [00:04, 9.09it/s]\r\n40it [00:04, 8.47it/s]\r\n42it [00:04, 8.98it/s]\r\n44it [00:04, 9.19it/s]\r\n45it [00:04, 9.31it/s]\r\n46it [00:05, 8.86it/s]\r\n47it [00:05, 9.04it/s]\r\n48it [00:05, 9.13it/s]\r\n49it [00:05, 8.96it/s]\r\n51it [00:05, 9.37it/s]\r\n52it [00:05, 9.38it/s]\r\n53it [00:05, 9.42it/s]\r\n54it [00:05, 9.42it/s]\r\n55it [00:06, 9.05it/s]\r\n56it [00:06, 9.01it/s]\r\n57it [00:06, 9.14it/s]\r\n58it [00:06, 9.14it/s]\r\n60it [00:06, 9.23it/s]\r\n62it [00:06, 9.23it/s]\r\n63it [00:06, 9.28it/s]\r\n65it [00:07, 9.36it/s]\r\n66it [00:07, 8.99it/s]\r\n68it [00:07, 9.22it/s]\r\n69it [00:07, 9.11it/s]\r\n70it [00:07, 9.27it/s]\r\n71it [00:07, 9.36it/s]\r\n72it [00:07, 9.13it/s]\r\n74it [00:08, 9.22it/s]\r\n75it [00:08, 9.12it/s]\r\n77it [00:08, 9.38it/s]\r\n78it [00:08, 8.97it/s]\r\n79it [00:08, 9.14it/s]\r\n81it [00:08, 9.24it/s]\r\n82it [00:08, 9.26it/s]\r\n83it [00:09, 9.21it/s]\r\n84it [00:09, 8.90it/s]\r\n85it [00:09, 9.16it/s]\r\n87it [00:09, 9.47it/s]\r\n88it [00:09, 9.24it/s]\r\n89it [00:09, 9.16it/s]\r\n91it [00:09, 9.20it/s]\r\n92it [00:10, 8.66it/s]\r\n94it [00:10, 9.38it/s]\r\n95it [00:10, 9.33it/s]\r\n96it [00:10, 9.14it/s]\r\n97it [00:10, 8.78it/s]\r\n98it [00:10, 8.86it/s]\r\n99it [00:10, 8.95it/s]\r\n100it [00:10, 9.15it/s]\r\n102it [00:11, 9.40it/s]\r\n103it [00:11, 8.88it/s]\r\n104it [00:11, 8.90it/s]\r\n105it [00:11, 9.09it/s]\r\n106it [00:11, 9.32it/s]\r\n107it [00:11, 9.33it/s]\r\n108it [00:11, 8.95it/s]\r\n110it [00:12, 8.96it/s]\r\n112it [00:12, 9.30it/s]\r\n113it [00:12, 9.25it/s]\r\n114it [00:12, 9.19it/s]\r\n115it [00:12, 9.17it/s]\r\n117it [00:12, 9.01it/s]\r\n119it [00:12, 9.34it/s]\r\n120it [00:13, 9.23it/s]\r\n121it [00:13, 9.31it/s]\r\n122it [00:13, 9.16it/s]\r\n123it [00:13, 8.96it/s]\r\n124it [00:13, 9.14it/s]\r\n126it [00:13, 9.35it/s]\r\n128it [00:13, 9.47it/s]\r\n129it [00:14, 9.06it/s]\r\n130it [00:14, 9.22it/s]\r\n131it [00:14, 9.27it/s]\r\n132it [00:14, 9.15it/s]\r\n133it [00:14, 9.29it/s]\r\n134it [00:14, 9.26it/s]\r\n135it [00:14, 9.20it/s]\r\n137it [00:14, 9.34it/s]\r\n138it [00:15, 9.29it/s]\r\n140it [00:15, 9.31it/s]\r\n141it [00:15, 9.08it/s]\r\n142it [00:15, 9.19it/s]\r\n143it [00:15, 9.18it/s]\r\n145it [00:15, 9.34it/s]\r\n146it [00:15, 9.37it/s]\r\n147it [00:16, 9.26it/s]\r\n148it [00:16, 9.34it/s]\r\n150it [00:16, 9.20it/s]\r\n152it [00:16, 9.35it/s]\r\n154it [00:16, 9.44it/s]\r\n155it [00:16, 9.18it/s]\r\n156it [00:16, 9.34it/s]\r\n157it [00:17, 9.17it/s]\r\n158it [00:17, 9.37it/s]\r\n159it [00:17, 8.99it/s]\r\n160it [00:17, 9.22it/s]\r\n161it [00:17, 9.34it/s]\r\n162it [00:17, 9.12it/s]\r\n163it [00:17, 9.07it/s]\r\n164it [00:17, 9.16it/s]\r\n165it [00:17, 9.13it/s]\r\n167it [00:18, 9.23it/s]\r\n168it [00:18, 9.31it/s]\r\n169it [00:18, 9.16it/s]\r\n171it [00:18, 9.38it/s]\r\n172it [00:18, 9.16it/s]\r\n174it [00:18, 9.16it/s]\r\n175it [00:19, 9.17it/s]\r\n177it [00:19, 9.21it/s]\r\n178it [00:19, 9.28it/s]\r\n179it [00:19, 9.25it/s]\r\n180it [00:19, 9.38it/s]\r\n181it [00:19, 8.94it/s]\r\n182it [00:19, 9.03it/s]\r\n184it [00:20, 9.40it/s]\r\n185it [00:20, 9.31it/s]\r\n186it [00:20, 9.12it/s]\r\n187it [00:20, 9.12it/s]\r\n189it [00:20, 9.14it/s]\r\n190it [00:20, 9.20it/s]\r\n191it [00:20, 9.04it/s]\r\n192it [00:20, 9.15it/s]\r\n193it [00:21, 9.32it/s]\r\n195it [00:21, 9.46it/s]\r\n196it [00:21, 9.12it/s]\r\n197it [00:21, 9.22it/s]\r\n198it [00:21, 9.31it/s]\r\n200it [00:21, 9.40it/s]\r\n201it [00:21, 9.16it/s]\r\n203it [00:22, 9.14it/s]\r\n204it [00:22, 9.25it/s]\r\n205it [00:22, 9.10it/s]\r\n207it [00:22, 9.47it/s]\r\n208it [00:22, 9.12it/s]\r\n209it [00:22, 9.09it/s]\r\n210it [00:22, 9.03it/s]\r\n212it [00:23, 9.32it/s]\r\n213it [00:23, 9.41it/s]\r\n214it [00:23, 9.46it/s]\r\n215it [00:23, 9.24it/s]\r\n216it [00:23, 9.07it/s]\r\n217it [00:23, 9.17it/s]\r\n218it [00:23, 9.39it/s]\r\n219it [00:23, 9.33it/s]\r\n221it [00:24, 9.29it/s]\r\n222it [00:24, 9.33it/s]\r\n223it [00:24, 9.43it/s]\r\n224it [00:24, 9.02it/s]\r\n226it [00:24, 9.14it/s]\r\n227it [00:24, 9.23it/s]\r\n228it [00:24, 9.10it/s]\r\n229it [00:24, 9.22it/s]\r\n230it [00:25, 9.22it/s]\r\n231it [00:25, 9.30it/s]\r\n232it [00:25, 9.24it/s]\r\n233it [00:25, 9.16it/s]\r\n235it [00:25, 9.54it/s]\r\n236it [00:25, 9.54it/s]\r\n237it [00:25, 9.46it/s]\r\n238it [00:25, 9.00it/s]\r\n239it [00:25, 9.17it/s]\r\n240it [00:26, 9.11it/s]\r\n241it [00:26, 9.14it/s]\r\n242it [00:26, 9.09it/s]\r\n243it [00:26, 9.25it/s]\r\n244it [00:26, 9.01it/s]\r\n246it [00:26, 9.20it/s]\r\n247it [00:26, 9.18it/s]\r\n249it [00:27, 9.37it/s]\r\n250it [00:27, 9.47it/s]\r\n251it [00:27, 9.16it/s]\r\n252it [00:27, 8.83it/s]\r\n253it [00:27, 9.04it/s]\r\n255it [00:27, 9.16it/s]\r\n256it [00:27, 9.20it/s]\r\n258it [00:28, 9.52it/s]\r\n259it [00:28, 9.43it/s]\r\n260it [00:28, 9.43it/s]\r\n261it [00:28, 9.40it/s]\r\n262it [00:28, 9.21it/s]\r\n263it [00:28, 9.34it/s]\r\n264it [00:28, 9.30it/s]\r\n265it [00:28, 9.33it/s]\r\n266it [00:28, 9.24it/s]\r\n267it [00:29, 9.31it/s]\r\n268it [00:29, 9.29it/s]\r\n269it [00:29, 9.30it/s]\r\n270it [00:29, 9.15it/s]\r\n271it [00:29, 9.33it/s]\r\n272it [00:29, 9.33it/s]\r\n273it [00:29, 9.05it/s]\r\n274it [00:29, 9.26it/s]\r\n275it [00:29, 9.21it/s]\r\n276it [00:29, 9.32it/s]\r\n277it [00:30, 9.35it/s]\r\n278it [00:30, 9.04it/s]\r\n279it [00:30, 9.09it/s]\r\n281it [00:30, 9.10it/s]\r\n282it [00:30, 8.97it/s]\r\n284it [00:30, 9.10it/s]\r\n285it [00:30, 9.11it/s]\r\n286it [00:31, 9.09it/s]\r\n288it [00:31, 9.26it/s]\r\n289it [00:31, 9.30it/s]\r\n290it [00:31, 9.16it/s]\r\n291it [00:31, 9.29it/s]\r\n292it [00:31, 9.28it/s]\r\n293it [00:31, 9.40it/s]\r\n294it [00:31, 9.12it/s]\r\n296it [00:32, 9.48it/s]\r\n297it [00:32, 9.44it/s]\r\n298it [00:32, 9.40it/s]\r\n299it [00:32, 9.22it/s]\r\n300it [00:32, 9.40it/s]\r\n301it [00:32, 9.34it/s]\r\n301it [00:32, 9.21it/s]", "metrics": { "predict_time": 33, "total_time": 32.865302 }, "output": [ "https://replicate.delivery/mgxm/129d24dc-e39e-4240-8978-88420cc1f910/out-0.png" ], "started_at": "2022-08-23T00:25:55Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/rjcl54wakbbrfcajzllmidreya", "cancel": "https://api.replicate.com/v1/predictions/rjcl54wakbbrfcajzllmidreya/cancel" }, "version": "714ee02be40e197b6d107bcf1fa0533e876ccc727122e7dc15a62a428dcda125" }
Generated inUsing seed: 48356 0it [00:00, ?it/s] 2it [00:00, 9.30it/s] 3it [00:00, 8.75it/s] 4it [00:00, 9.08it/s] 5it [00:00, 8.94it/s] 6it [00:00, 8.71it/s] 8it [00:00, 9.05it/s] 9it [00:00, 9.06it/s] 11it [00:01, 9.47it/s] 12it [00:01, 9.07it/s] 14it [00:01, 9.23it/s] 15it [00:01, 9.13it/s] 16it [00:01, 8.91it/s] 18it [00:01, 9.37it/s] 19it [00:02, 9.02it/s] 20it [00:02, 9.01it/s] 21it [00:02, 9.23it/s] 22it [00:02, 8.90it/s] 24it [00:02, 9.47it/s] 25it [00:02, 9.42it/s] 26it [00:02, 9.30it/s] 27it [00:02, 9.07it/s] 28it [00:03, 9.25it/s] 29it [00:03, 9.30it/s] 30it [00:03, 9.05it/s] 31it [00:03, 9.10it/s] 32it [00:03, 9.06it/s] 33it [00:03, 9.31it/s] 34it [00:03, 9.34it/s] 35it [00:03, 9.25it/s] 36it [00:03, 9.09it/s] 37it [00:04, 9.16it/s] 38it [00:04, 9.39it/s] 39it [00:04, 9.09it/s] 40it [00:04, 8.47it/s] 42it [00:04, 8.98it/s] 44it [00:04, 9.19it/s] 45it [00:04, 9.31it/s] 46it [00:05, 8.86it/s] 47it [00:05, 9.04it/s] 48it [00:05, 9.13it/s] 49it [00:05, 8.96it/s] 51it [00:05, 9.37it/s] 52it [00:05, 9.38it/s] 53it [00:05, 9.42it/s] 54it [00:05, 9.42it/s] 55it [00:06, 9.05it/s] 56it [00:06, 9.01it/s] 57it [00:06, 9.14it/s] 58it [00:06, 9.14it/s] 60it [00:06, 9.23it/s] 62it [00:06, 9.23it/s] 63it [00:06, 9.28it/s] 65it [00:07, 9.36it/s] 66it [00:07, 8.99it/s] 68it [00:07, 9.22it/s] 69it [00:07, 9.11it/s] 70it [00:07, 9.27it/s] 71it [00:07, 9.36it/s] 72it [00:07, 9.13it/s] 74it [00:08, 9.22it/s] 75it [00:08, 9.12it/s] 77it [00:08, 9.38it/s] 78it [00:08, 8.97it/s] 79it [00:08, 9.14it/s] 81it [00:08, 9.24it/s] 82it [00:08, 9.26it/s] 83it [00:09, 9.21it/s] 84it [00:09, 8.90it/s] 85it [00:09, 9.16it/s] 87it [00:09, 9.47it/s] 88it [00:09, 9.24it/s] 89it [00:09, 9.16it/s] 91it [00:09, 9.20it/s] 92it [00:10, 8.66it/s] 94it [00:10, 9.38it/s] 95it [00:10, 9.33it/s] 96it [00:10, 9.14it/s] 97it [00:10, 8.78it/s] 98it [00:10, 8.86it/s] 99it [00:10, 8.95it/s] 100it [00:10, 9.15it/s] 102it [00:11, 9.40it/s] 103it [00:11, 8.88it/s] 104it [00:11, 8.90it/s] 105it [00:11, 9.09it/s] 106it [00:11, 9.32it/s] 107it [00:11, 9.33it/s] 108it [00:11, 8.95it/s] 110it [00:12, 8.96it/s] 112it [00:12, 9.30it/s] 113it [00:12, 9.25it/s] 114it [00:12, 9.19it/s] 115it [00:12, 9.17it/s] 117it [00:12, 9.01it/s] 119it [00:12, 9.34it/s] 120it [00:13, 9.23it/s] 121it [00:13, 9.31it/s] 122it [00:13, 9.16it/s] 123it [00:13, 8.96it/s] 124it [00:13, 9.14it/s] 126it [00:13, 9.35it/s] 128it [00:13, 9.47it/s] 129it [00:14, 9.06it/s] 130it [00:14, 9.22it/s] 131it [00:14, 9.27it/s] 132it [00:14, 9.15it/s] 133it [00:14, 9.29it/s] 134it [00:14, 9.26it/s] 135it [00:14, 9.20it/s] 137it [00:14, 9.34it/s] 138it [00:15, 9.29it/s] 140it [00:15, 9.31it/s] 141it [00:15, 9.08it/s] 142it [00:15, 9.19it/s] 143it [00:15, 9.18it/s] 145it [00:15, 9.34it/s] 146it [00:15, 9.37it/s] 147it [00:16, 9.26it/s] 148it [00:16, 9.34it/s] 150it [00:16, 9.20it/s] 152it [00:16, 9.35it/s] 154it [00:16, 9.44it/s] 155it [00:16, 9.18it/s] 156it [00:16, 9.34it/s] 157it [00:17, 9.17it/s] 158it [00:17, 9.37it/s] 159it [00:17, 8.99it/s] 160it [00:17, 9.22it/s] 161it [00:17, 9.34it/s] 162it [00:17, 9.12it/s] 163it [00:17, 9.07it/s] 164it [00:17, 9.16it/s] 165it [00:17, 9.13it/s] 167it [00:18, 9.23it/s] 168it [00:18, 9.31it/s] 169it [00:18, 9.16it/s] 171it [00:18, 9.38it/s] 172it [00:18, 9.16it/s] 174it [00:18, 9.16it/s] 175it [00:19, 9.17it/s] 177it [00:19, 9.21it/s] 178it [00:19, 9.28it/s] 179it [00:19, 9.25it/s] 180it [00:19, 9.38it/s] 181it [00:19, 8.94it/s] 182it [00:19, 9.03it/s] 184it [00:20, 9.40it/s] 185it [00:20, 9.31it/s] 186it [00:20, 9.12it/s] 187it [00:20, 9.12it/s] 189it [00:20, 9.14it/s] 190it [00:20, 9.20it/s] 191it [00:20, 9.04it/s] 192it [00:20, 9.15it/s] 193it [00:21, 9.32it/s] 195it [00:21, 9.46it/s] 196it [00:21, 9.12it/s] 197it [00:21, 9.22it/s] 198it [00:21, 9.31it/s] 200it [00:21, 9.40it/s] 201it [00:21, 9.16it/s] 203it [00:22, 9.14it/s] 204it [00:22, 9.25it/s] 205it [00:22, 9.10it/s] 207it [00:22, 9.47it/s] 208it [00:22, 9.12it/s] 209it [00:22, 9.09it/s] 210it [00:22, 9.03it/s] 212it [00:23, 9.32it/s] 213it [00:23, 9.41it/s] 214it [00:23, 9.46it/s] 215it [00:23, 9.24it/s] 216it [00:23, 9.07it/s] 217it [00:23, 9.17it/s] 218it [00:23, 9.39it/s] 219it [00:23, 9.33it/s] 221it [00:24, 9.29it/s] 222it [00:24, 9.33it/s] 223it [00:24, 9.43it/s] 224it [00:24, 9.02it/s] 226it [00:24, 9.14it/s] 227it [00:24, 9.23it/s] 228it [00:24, 9.10it/s] 229it [00:24, 9.22it/s] 230it [00:25, 9.22it/s] 231it [00:25, 9.30it/s] 232it [00:25, 9.24it/s] 233it [00:25, 9.16it/s] 235it [00:25, 9.54it/s] 236it [00:25, 9.54it/s] 237it [00:25, 9.46it/s] 238it [00:25, 9.00it/s] 239it [00:25, 9.17it/s] 240it [00:26, 9.11it/s] 241it [00:26, 9.14it/s] 242it [00:26, 9.09it/s] 243it [00:26, 9.25it/s] 244it [00:26, 9.01it/s] 246it [00:26, 9.20it/s] 247it [00:26, 9.18it/s] 249it [00:27, 9.37it/s] 250it [00:27, 9.47it/s] 251it [00:27, 9.16it/s] 252it [00:27, 8.83it/s] 253it [00:27, 9.04it/s] 255it [00:27, 9.16it/s] 256it [00:27, 9.20it/s] 258it [00:28, 9.52it/s] 259it [00:28, 9.43it/s] 260it [00:28, 9.43it/s] 261it [00:28, 9.40it/s] 262it [00:28, 9.21it/s] 263it [00:28, 9.34it/s] 264it [00:28, 9.30it/s] 265it [00:28, 9.33it/s] 266it [00:28, 9.24it/s] 267it [00:29, 9.31it/s] 268it [00:29, 9.29it/s] 269it [00:29, 9.30it/s] 270it [00:29, 9.15it/s] 271it [00:29, 9.33it/s] 272it [00:29, 9.33it/s] 273it [00:29, 9.05it/s] 274it [00:29, 9.26it/s] 275it [00:29, 9.21it/s] 276it [00:29, 9.32it/s] 277it [00:30, 9.35it/s] 278it [00:30, 9.04it/s] 279it [00:30, 9.09it/s] 281it [00:30, 9.10it/s] 282it [00:30, 8.97it/s] 284it [00:30, 9.10it/s] 285it [00:30, 9.11it/s] 286it [00:31, 9.09it/s] 288it [00:31, 9.26it/s] 289it [00:31, 9.30it/s] 290it [00:31, 9.16it/s] 291it [00:31, 9.29it/s] 292it [00:31, 9.28it/s] 293it [00:31, 9.40it/s] 294it [00:31, 9.12it/s] 296it [00:32, 9.48it/s] 297it [00:32, 9.44it/s] 298it [00:32, 9.40it/s] 299it [00:32, 9.22it/s] 300it [00:32, 9.40it/s] 301it [00:32, 9.34it/s] 301it [00:32, 9.21it/s]
Prediction
stability-ai/stable-diffusion:ac732df8IDsqyie5bdmrewdoxi6ehhxlesfeStatusSucceededSourceWebHardware–Total durationCreatedInput
- width
- 512
- height
- 512
- prompt
- eye
- init_image
- https://replicate.delivery/mgxm/7e0ffe6c-04fd-4440-92b2-e4376b4620a5/out-0_1.png
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "eye", "init_image": "https://replicate.delivery/mgxm/7e0ffe6c-04fd-4440-92b2-e4376b4620a5/out-0_1.png", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "eye", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "eye", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "eye", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="eye"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "eye", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-25T20:07:51.275348Z", "created_at": "2022-08-25T20:07:46.549858Z", "data_removed": false, "error": null, "id": "sqyie5bdmrewdoxi6ehhxlesfe", "input": { "width": 512, "height": 512, "prompt": "eye", "init_image": "https://replicate.delivery/mgxm/7e0ffe6c-04fd-4440-92b2-e4376b4620a5/out-0_1.png", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 2494\n\n0it [00:00, ?it/s]\n2it [00:00, 13.33it/s]\n4it [00:00, 13.49it/s]\n6it [00:00, 13.57it/s]\n8it [00:00, 13.75it/s]\n10it [00:00, 13.87it/s]\n12it [00:00, 14.01it/s]\n14it [00:01, 14.07it/s]\n16it [00:01, 14.11it/s]\n18it [00:01, 14.08it/s]\n20it [00:01, 13.88it/s]\n22it [00:01, 13.72it/s]\n24it [00:01, 13.70it/s]\n26it [00:01, 13.66it/s]\n28it [00:02, 13.73it/s]\n30it [00:02, 13.78it/s]\n32it [00:02, 13.83it/s]\n34it [00:02, 13.78it/s]\n36it [00:02, 13.86it/s]\n38it [00:02, 13.91it/s]\n40it [00:02, 13.96it/s]\n42it [00:03, 14.01it/s]\n44it [00:03, 14.07it/s]\n46it [00:03, 14.10it/s]\n48it [00:03, 14.07it/s]\n49it [00:03, 13.89it/s]", "metrics": { "predict_time": 4.547854, "total_time": 4.72549 }, "output": [ "https://replicate.delivery/mgxm/4ee3ab76-7864-4604-8897-9d330214d225/out-0.png" ], "started_at": "2022-08-25T20:07:46.727494Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/sqyie5bdmrewdoxi6ehhxlesfe", "cancel": "https://api.replicate.com/v1/predictions/sqyie5bdmrewdoxi6ehhxlesfe/cancel" }, "version": "3080f37ef32771c9984d65033cbe71caa96c69680008bae64cf691724a6df04c" }
Generated inUsing seed: 2494 0it [00:00, ?it/s] 2it [00:00, 13.33it/s] 4it [00:00, 13.49it/s] 6it [00:00, 13.57it/s] 8it [00:00, 13.75it/s] 10it [00:00, 13.87it/s] 12it [00:00, 14.01it/s] 14it [00:01, 14.07it/s] 16it [00:01, 14.11it/s] 18it [00:01, 14.08it/s] 20it [00:01, 13.88it/s] 22it [00:01, 13.72it/s] 24it [00:01, 13.70it/s] 26it [00:01, 13.66it/s] 28it [00:02, 13.73it/s] 30it [00:02, 13.78it/s] 32it [00:02, 13.83it/s] 34it [00:02, 13.78it/s] 36it [00:02, 13.86it/s] 38it [00:02, 13.91it/s] 40it [00:02, 13.96it/s] 42it [00:03, 14.01it/s] 44it [00:03, 14.07it/s] 46it [00:03, 14.10it/s] 48it [00:03, 14.07it/s] 49it [00:03, 13.89it/s]
Prediction
stability-ai/stable-diffusion:ac732df8IDljnv74ftfbfvtlqrvsz5zarakmStatusSucceededSourceWebHardware–Total durationCreatedInput
- mask
- https://replicate.delivery/mgxm/24034da1-d461-4c60-ba7b-1e9e7184a3f1/time-surrealism-watch-oil-wallpaper-preview-mask.jpeg
- width
- 512
- height
- 512
- prompt
- modern apple watches with colorful hd displays in a surrealist style
- init_image
- https://replicate.delivery/mgxm/dff721db-bf78-4e04-a3a1-c412fe6e76b0/time-surrealism-watch-oil-wallpaper-preview.jpeg
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.5
- num_inference_steps
- 50
{ "mask": "https://replicate.delivery/mgxm/24034da1-d461-4c60-ba7b-1e9e7184a3f1/time-surrealism-watch-oil-wallpaper-preview-mask.jpeg", "width": 512, "height": 512, "prompt": "modern apple watches with colorful hd displays in a surrealist style", "init_image": "https://replicate.delivery/mgxm/dff721db-bf78-4e04-a3a1-c412fe6e76b0/time-surrealism-watch-oil-wallpaper-preview.jpeg", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.5, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "modern apple watches with colorful hd displays in a surrealist style", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "modern apple watches with colorful hd displays in a surrealist style", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "modern apple watches with colorful hd displays in a surrealist style", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="modern apple watches with colorful hd displays in a surrealist style"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "modern apple watches with colorful hd displays in a surrealist style", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-08-25T20:16:53.793840Z", "created_at": "2022-08-25T20:16:44.657704Z", "data_removed": false, "error": null, "id": "ljnv74ftfbfvtlqrvsz5zarakm", "input": { "mask": "https://replicate.delivery/mgxm/24034da1-d461-4c60-ba7b-1e9e7184a3f1/time-surrealism-watch-oil-wallpaper-preview-mask.jpeg", "width": 512, "height": 512, "prompt": "modern apple watches with colorful hd displays in a surrealist style", "init_image": "https://replicate.delivery/mgxm/dff721db-bf78-4e04-a3a1-c412fe6e76b0/time-surrealism-watch-oil-wallpaper-preview.jpeg", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.5, "num_inference_steps": 50 }, "logs": "Using seed: 2463\n\n0it [00:00, ?it/s]\n1it [00:00, 6.17it/s]\n3it [00:00, 10.31it/s]\n5it [00:00, 11.95it/s]\n7it [00:00, 12.66it/s]\n9it [00:00, 13.17it/s]\n11it [00:00, 13.45it/s]\n13it [00:01, 13.53it/s]\n15it [00:01, 13.72it/s]\n17it [00:01, 13.86it/s]\n19it [00:01, 13.91it/s]\n21it [00:01, 13.95it/s]\n23it [00:01, 14.04it/s]\n25it [00:01, 14.05it/s]\n27it [00:02, 14.01it/s]\n29it [00:02, 14.08it/s]\n31it [00:02, 14.12it/s]\n33it [00:02, 14.05it/s]\n34it [00:02, 13.50it/s]", "metrics": { "predict_time": 8.948849, "total_time": 9.136136 }, "output": [ "https://replicate.delivery/mgxm/bb35c491-8794-43d6-81e1-41211831535d/out-0.png" ], "started_at": "2022-08-25T20:16:44.844991Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/ljnv74ftfbfvtlqrvsz5zarakm", "cancel": "https://api.replicate.com/v1/predictions/ljnv74ftfbfvtlqrvsz5zarakm/cancel" }, "version": "3080f37ef32771c9984d65033cbe71caa96c69680008bae64cf691724a6df04c" }
Generated inUsing seed: 2463 0it [00:00, ?it/s] 1it [00:00, 6.17it/s] 3it [00:00, 10.31it/s] 5it [00:00, 11.95it/s] 7it [00:00, 12.66it/s] 9it [00:00, 13.17it/s] 11it [00:00, 13.45it/s] 13it [00:01, 13.53it/s] 15it [00:01, 13.72it/s] 17it [00:01, 13.86it/s] 19it [00:01, 13.91it/s] 21it [00:01, 13.95it/s] 23it [00:01, 14.04it/s] 25it [00:01, 14.05it/s] 27it [00:02, 14.01it/s] 29it [00:02, 14.08it/s] 31it [00:02, 14.12it/s] 33it [00:02, 14.05it/s] 34it [00:02, 13.50it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- width
- 512
- height
- 512
- prompt
- Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-10-04T12:00:48.971279Z", "created_at": "2022-10-04T12:00:44.118403Z", "data_removed": false, "error": null, "id": "kgpqs7h2ffcfrouxeoua6tpkjy", "input": { "width": 512, "height": 512, "prompt": "Beautiful digital matte pastel paint sunflowers poppies chillwave greg rutkowski artstation", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 16090\n\n0it [00:00, ?it/s]\n2it [00:00, 12.31it/s]\n4it [00:00, 12.86it/s]\n6it [00:00, 12.71it/s]\n8it [00:00, 12.99it/s]\n10it [00:00, 13.06it/s]\n12it [00:00, 13.03it/s]\n14it [00:01, 13.17it/s]\n16it [00:01, 13.04it/s]\n18it [00:01, 13.26it/s]\n20it [00:01, 12.85it/s]\n22it [00:01, 13.09it/s]\n24it [00:01, 13.22it/s]\n26it [00:01, 13.42it/s]\n28it [00:02, 13.12it/s]\n30it [00:02, 13.09it/s]\n32it [00:02, 13.24it/s]\n34it [00:02, 13.01it/s]\n36it [00:02, 13.22it/s]\n38it [00:02, 13.25it/s]\n40it [00:03, 12.86it/s]\n42it [00:03, 13.15it/s]\n44it [00:03, 13.31it/s]\n46it [00:03, 13.29it/s]\n48it [00:03, 13.03it/s]\n50it [00:03, 13.20it/s]\n50it [00:03, 13.11it/s]", "metrics": { "predict_time": 4.69074, "total_time": 4.852876 }, "output": [ "https://replicate.delivery/mgxm/0d5e6ce0-dd85-4dd8-a087-a9c59876b456/out-0.png" ], "started_at": "2022-10-04T12:00:44.280539Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/kgpqs7h2ffcfrouxeoua6tpkjy", "cancel": "https://api.replicate.com/v1/predictions/kgpqs7h2ffcfrouxeoua6tpkjy/cancel" }, "version": "a9758cbfbd5f3c2094457d996681af52552901775aa2d6dd0b17fd15df959bef" }
Generated inUsing seed: 16090 0it [00:00, ?it/s] 2it [00:00, 12.31it/s] 4it [00:00, 12.86it/s] 6it [00:00, 12.71it/s] 8it [00:00, 12.99it/s] 10it [00:00, 13.06it/s] 12it [00:00, 13.03it/s] 14it [00:01, 13.17it/s] 16it [00:01, 13.04it/s] 18it [00:01, 13.26it/s] 20it [00:01, 12.85it/s] 22it [00:01, 13.09it/s] 24it [00:01, 13.22it/s] 26it [00:01, 13.42it/s] 28it [00:02, 13.12it/s] 30it [00:02, 13.09it/s] 32it [00:02, 13.24it/s] 34it [00:02, 13.01it/s] 36it [00:02, 13.22it/s] 38it [00:02, 13.25it/s] 40it [00:03, 12.86it/s] 42it [00:03, 13.15it/s] 44it [00:03, 13.31it/s] 46it [00:03, 13.29it/s] 48it [00:03, 13.03it/s] 50it [00:03, 13.20it/s] 50it [00:03, 13.11it/s]
Prediction
stability-ai/stable-diffusion:ac732df8IDkmp3c6s2kjdqtboq22pyokrksiStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedInput
- width
- 512
- height
- 512
- prompt
- multicolor hyperspace
- scheduler
- K-LMS
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "multicolor hyperspace", scheduler: "K-LMS", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="multicolor hyperspace"' \ -i 'scheduler="K-LMS"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-10-28T14:10:01.303206Z", "created_at": "2022-10-28T14:09:57.451115Z", "data_removed": false, "error": null, "id": "kmp3c6s2kjdqtboq22pyokrksi", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 33234\n\n0it [00:00, ?it/s]\n2it [00:00, 13.23it/s]\n4it [00:00, 13.24it/s]\n6it [00:00, 13.82it/s]\n8it [00:00, 14.17it/s]\n10it [00:00, 14.39it/s]\n12it [00:00, 14.55it/s]\n14it [00:00, 14.65it/s]\n16it [00:01, 14.63it/s]\n18it [00:01, 14.68it/s]\n20it [00:01, 14.71it/s]\n22it [00:01, 14.65it/s]\n24it [00:01, 14.65it/s]\n26it [00:01, 14.68it/s]\n28it [00:01, 14.06it/s]\n30it [00:02, 14.37it/s]\n32it [00:02, 14.52it/s]\n34it [00:02, 14.62it/s]\n36it [00:02, 14.68it/s]\n38it [00:02, 14.67it/s]\n40it [00:02, 14.72it/s]\n42it [00:02, 14.71it/s]\n44it [00:03, 14.74it/s]\n46it [00:03, 14.73it/s]\n48it [00:03, 14.71it/s]\n50it [00:03, 14.67it/s]\n50it [00:03, 14.53it/s]", "metrics": { "predict_time": 3.810126, "total_time": 3.852091 }, "output": [ "https://replicate.delivery/pbxt/AE5fg6Nbehm5fIkWbIVNsrK1jUEqRr8btVZwoQSEgMemLlpfB/out-0.png" ], "started_at": "2022-10-28T14:09:57.493080Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/kmp3c6s2kjdqtboq22pyokrksi", "cancel": "https://api.replicate.com/v1/predictions/kmp3c6s2kjdqtboq22pyokrksi/cancel" }, "version": "5b703f0fa41880f918ab1b12c88a25b468c18639be17515259fb66a83f4ad0a4" }
Generated inUsing seed: 33234 0it [00:00, ?it/s] 2it [00:00, 13.23it/s] 4it [00:00, 13.24it/s] 6it [00:00, 13.82it/s] 8it [00:00, 14.17it/s] 10it [00:00, 14.39it/s] 12it [00:00, 14.55it/s] 14it [00:00, 14.65it/s] 16it [00:01, 14.63it/s] 18it [00:01, 14.68it/s] 20it [00:01, 14.71it/s] 22it [00:01, 14.65it/s] 24it [00:01, 14.65it/s] 26it [00:01, 14.68it/s] 28it [00:01, 14.06it/s] 30it [00:02, 14.37it/s] 32it [00:02, 14.52it/s] 34it [00:02, 14.62it/s] 36it [00:02, 14.68it/s] 38it [00:02, 14.67it/s] 40it [00:02, 14.72it/s] 42it [00:02, 14.71it/s] 44it [00:03, 14.74it/s] 46it [00:03, 14.73it/s] 48it [00:03, 14.71it/s] 50it [00:03, 14.67it/s] 50it [00:03, 14.53it/s]
Prediction
stability-ai/stable-diffusion:ac732df8IDrsytjp72cbghhh7gr4ax4rnwlyStatusSucceededSourceWebHardwareA100 (40GB)Total durationCreatedby @bfirshInput
- width
- 512
- height
- 512
- prompt
- multicolor hyperspace
- scheduler
- K-LMS
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "multicolor hyperspace", scheduler: "K-LMS", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="multicolor hyperspace"' \ -i 'scheduler="K-LMS"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-11-03T01:17:52.861177Z", "created_at": "2022-11-03T01:17:48.718920Z", "data_removed": false, "error": null, "id": "rsytjp72cbghhh7gr4ax4rnwly", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 45688\n\n 0%| | 0/50 [00:00<?, ?it/s]\n 2%|▏ | 1/50 [00:00<00:05, 9.47it/s]\n 6%|▌ | 3/50 [00:00<00:03, 12.33it/s]\n 10%|█ | 5/50 [00:00<00:03, 12.32it/s]\n 14%|█▍ | 7/50 [00:00<00:03, 12.99it/s]\n 18%|█▊ | 9/50 [00:00<00:03, 13.40it/s]\n 22%|██▏ | 11/50 [00:00<00:02, 13.62it/s]\n 26%|██▌ | 13/50 [00:00<00:02, 13.30it/s]\n 30%|███ | 15/50 [00:01<00:02, 13.48it/s]\n 34%|███▍ | 17/50 [00:01<00:02, 13.55it/s]\n 38%|███▊ | 19/50 [00:01<00:02, 13.66it/s]\n 42%|████▏ | 21/50 [00:01<00:02, 13.75it/s]\n 46%|████▌ | 23/50 [00:01<00:01, 13.76it/s]\n 50%|█████ | 25/50 [00:01<00:01, 13.35it/s]\n 54%|█████▍ | 27/50 [00:02<00:01, 13.50it/s]\n 58%|█████▊ | 29/50 [00:02<00:01, 13.60it/s]\n 62%|██████▏ | 31/50 [00:02<00:01, 13.69it/s]\n 66%|██████▌ | 33/50 [00:02<00:01, 13.74it/s]\n 70%|███████ | 35/50 [00:02<00:01, 13.78it/s]\n 74%|███████▍ | 37/50 [00:02<00:00, 13.46it/s]\n 78%|███████▊ | 39/50 [00:02<00:00, 13.57it/s]\n 82%|████████▏ | 41/50 [00:03<00:00, 13.60it/s]\n 86%|████████▌ | 43/50 [00:03<00:00, 13.68it/s]\n 90%|█████████ | 45/50 [00:03<00:00, 13.63it/s]\n 94%|█████████▍| 47/50 [00:03<00:00, 13.73it/s]\n 98%|█████████▊| 49/50 [00:03<00:00, 13.65it/s]\n100%|██████████| 50/50 [00:03<00:00, 13.49it/s]", "metrics": { "predict_time": 4.108314, "total_time": 4.142257 }, "output": [ "https://replicate.delivery/pbxt/Uy5Ma1EruBKkOReB5TwrX9P1ekJjnSftkujvBqkDneSCMywfB/out-0.png" ], "started_at": "2022-11-03T01:17:48.752863Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/rsytjp72cbghhh7gr4ax4rnwly", "cancel": "https://api.replicate.com/v1/predictions/rsytjp72cbghhh7gr4ax4rnwly/cancel" }, "version": "8abccf52e7cba9f6e82317253f4a3549082e966db5584e92c808ece132037776" }
Generated inUsing seed: 45688 0%| | 0/50 [00:00<?, ?it/s] 2%|▏ | 1/50 [00:00<00:05, 9.47it/s] 6%|▌ | 3/50 [00:00<00:03, 12.33it/s] 10%|█ | 5/50 [00:00<00:03, 12.32it/s] 14%|█▍ | 7/50 [00:00<00:03, 12.99it/s] 18%|█▊ | 9/50 [00:00<00:03, 13.40it/s] 22%|██▏ | 11/50 [00:00<00:02, 13.62it/s] 26%|██▌ | 13/50 [00:00<00:02, 13.30it/s] 30%|███ | 15/50 [00:01<00:02, 13.48it/s] 34%|███▍ | 17/50 [00:01<00:02, 13.55it/s] 38%|███▊ | 19/50 [00:01<00:02, 13.66it/s] 42%|████▏ | 21/50 [00:01<00:02, 13.75it/s] 46%|████▌ | 23/50 [00:01<00:01, 13.76it/s] 50%|█████ | 25/50 [00:01<00:01, 13.35it/s] 54%|█████▍ | 27/50 [00:02<00:01, 13.50it/s] 58%|█████▊ | 29/50 [00:02<00:01, 13.60it/s] 62%|██████▏ | 31/50 [00:02<00:01, 13.69it/s] 66%|██████▌ | 33/50 [00:02<00:01, 13.74it/s] 70%|███████ | 35/50 [00:02<00:01, 13.78it/s] 74%|███████▍ | 37/50 [00:02<00:00, 13.46it/s] 78%|███████▊ | 39/50 [00:02<00:00, 13.57it/s] 82%|████████▏ | 41/50 [00:03<00:00, 13.60it/s] 86%|████████▌ | 43/50 [00:03<00:00, 13.68it/s] 90%|█████████ | 45/50 [00:03<00:00, 13.63it/s] 94%|█████████▍| 47/50 [00:03<00:00, 13.73it/s] 98%|█████████▊| 49/50 [00:03<00:00, 13.65it/s] 100%|██████████| 50/50 [00:03<00:00, 13.49it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- width
- 512
- height
- 512
- prompt
- Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo
- scheduler
- K_EULER_ANCESTRAL
- num_outputs
- "1"
- guidance_scale
- 7.5
- negative_prompt
- yellow color, nose
- prompt_strength
- 0.8
- num_inference_steps
- "30"
{ "width": 512, "height": 512, "prompt": "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "yellow color, nose", "prompt_strength": 0.8, "num_inference_steps": "30" }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", scheduler: "K_EULER_ANCESTRAL", num_outputs: "1", guidance_scale: 7.5, negative_prompt: "yellow color, nose", num_inference_steps: "30" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "yellow color, nose", "num_inference_steps": "30" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "yellow color, nose", "num_inference_steps": "30" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo"' \ -i 'scheduler="K_EULER_ANCESTRAL"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'negative_prompt="yellow color, nose"' \ -i 'num_inference_steps="30"'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "yellow color, nose", "num_inference_steps": "30" } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-11-22T23:15:24.895155Z", "created_at": "2022-11-22T23:15:22.302509Z", "data_removed": false, "error": null, "id": "4irm2hlalvdmlkryu4df63pwnq", "input": { "width": 512, "height": 512, "prompt": "Ragdoll cat king wearing a golden crown, intricate, elegant, highly detailed, centered, digital painting, artstation, concept art, smooth, sharp focus, illustration, artgerm, Tomasz Alen Kopera, Peter Mohrbacher, donato giancola, Joseph Christian Leyendecker, WLOP, Boris Vallejo", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "yellow color, nose", "prompt_strength": 0.8, "num_inference_steps": "30" }, "logs": "Using seed: 55251\n 0%| | 0/30 [00:00<?, ?it/s]\n 7%|▋ | 2/30 [00:00<00:02, 13.41it/s]\n 13%|█▎ | 4/30 [00:00<00:01, 13.90it/s]\n 20%|██ | 6/30 [00:00<00:01, 13.63it/s]\n 27%|██▋ | 8/30 [00:00<00:01, 13.90it/s]\n 33%|███▎ | 10/30 [00:00<00:01, 14.11it/s]\n 40%|████ | 12/30 [00:00<00:01, 14.27it/s]\n 47%|████▋ | 14/30 [00:00<00:01, 14.26it/s]\n 53%|█████▎ | 16/30 [00:01<00:00, 14.44it/s]\n 60%|██████ | 18/30 [00:01<00:00, 14.55it/s]\n 67%|██████▋ | 20/30 [00:01<00:00, 14.66it/s]\n 73%|███████▎ | 22/30 [00:01<00:00, 14.68it/s]\n 80%|████████ | 24/30 [00:01<00:00, 14.74it/s]\n 87%|████████▋ | 26/30 [00:01<00:00, 14.78it/s]\n 93%|█████████▎| 28/30 [00:01<00:00, 14.74it/s]\n100%|██████████| 30/30 [00:02<00:00, 14.53it/s]\n100%|██████████| 30/30 [00:02<00:00, 14.41it/s]", "metrics": { "predict_time": 2.56023, "total_time": 2.592646 }, "output": [ "https://replicate.delivery/pbxt/aooDunhAPcbUBh979fEblu4qqohEtlG0dPJUf1aLuUkMowCQA/out-0.png" ], "started_at": "2022-11-22T23:15:22.334925Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/4irm2hlalvdmlkryu4df63pwnq", "cancel": "https://api.replicate.com/v1/predictions/4irm2hlalvdmlkryu4df63pwnq/cancel" }, "version": "27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478" }
Generated inUsing seed: 55251 0%| | 0/30 [00:00<?, ?it/s] 7%|▋ | 2/30 [00:00<00:02, 13.41it/s] 13%|█▎ | 4/30 [00:00<00:01, 13.90it/s] 20%|██ | 6/30 [00:00<00:01, 13.63it/s] 27%|██▋ | 8/30 [00:00<00:01, 13.90it/s] 33%|███▎ | 10/30 [00:00<00:01, 14.11it/s] 40%|████ | 12/30 [00:00<00:01, 14.27it/s] 47%|████▋ | 14/30 [00:00<00:01, 14.26it/s] 53%|█████▎ | 16/30 [00:01<00:00, 14.44it/s] 60%|██████ | 18/30 [00:01<00:00, 14.55it/s] 67%|██████▋ | 20/30 [00:01<00:00, 14.66it/s] 73%|███████▎ | 22/30 [00:01<00:00, 14.68it/s] 80%|████████ | 24/30 [00:01<00:00, 14.74it/s] 87%|████████▋ | 26/30 [00:01<00:00, 14.78it/s] 93%|█████████▎| 28/30 [00:01<00:00, 14.74it/s] 100%|██████████| 30/30 [00:02<00:00, 14.53it/s] 100%|██████████| 30/30 [00:02<00:00, 14.41it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- width
- 512
- height
- 512
- prompt
- multicolor hyperspace
- scheduler
- K-LMS
- num_outputs
- "1"
- guidance_scale
- 7.5
- negative_prompt
- green
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "green", "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "multicolor hyperspace", scheduler: "K-LMS", num_outputs: "1", guidance_scale: 7.5, negative_prompt: "green", num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "green", "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "green", "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="multicolor hyperspace"' \ -i 'scheduler="K-LMS"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'negative_prompt="green"' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "green", "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-11-22T23:34:08.104586Z", "created_at": "2022-11-22T23:34:03.421109Z", "data_removed": false, "error": null, "id": "uu3pugtcofbn3cgac6k4aymef4", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K-LMS", "num_outputs": "1", "guidance_scale": 7.5, "negative_prompt": "green", "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 63632\n 0%| | 0/50 [00:00<?, ?it/s]\n 4%|▍ | 2/50 [00:00<00:03, 13.41it/s]\n 8%|▊ | 4/50 [00:00<00:03, 13.57it/s]\n 12%|█▏ | 6/50 [00:00<00:03, 13.48it/s]\n 16%|█▌ | 8/50 [00:00<00:03, 13.60it/s]\n 20%|██ | 10/50 [00:00<00:02, 13.67it/s]\n 24%|██▍ | 12/50 [00:00<00:02, 13.68it/s]\n 28%|██▊ | 14/50 [00:01<00:02, 13.71it/s]\n 32%|███▏ | 16/50 [00:01<00:02, 13.68it/s]\n 36%|███▌ | 18/50 [00:01<00:02, 13.62it/s]\n 40%|████ | 20/50 [00:01<00:02, 13.47it/s]\n 44%|████▍ | 22/50 [00:01<00:02, 13.56it/s]\n 48%|████▊ | 24/50 [00:01<00:01, 13.62it/s]\n 52%|█████▏ | 26/50 [00:01<00:01, 13.53it/s]\n 56%|█████▌ | 28/50 [00:02<00:01, 13.54it/s]\n 60%|██████ | 30/50 [00:02<00:01, 13.55it/s]\n 64%|██████▍ | 32/50 [00:02<00:01, 13.49it/s]\n 68%|██████▊ | 34/50 [00:02<00:01, 13.46it/s]\n 72%|███████▏ | 36/50 [00:02<00:01, 13.52it/s]\n 76%|███████▌ | 38/50 [00:02<00:00, 13.57it/s]\n 80%|████████ | 40/50 [00:02<00:00, 13.59it/s]\n 84%|████████▍ | 42/50 [00:03<00:00, 13.33it/s]\n 88%|████████▊ | 44/50 [00:03<00:00, 13.40it/s]\n 92%|█████████▏| 46/50 [00:03<00:00, 13.36it/s]\n 96%|█████████▌| 48/50 [00:03<00:00, 13.50it/s]\n100%|██████████| 50/50 [00:03<00:00, 13.57it/s]\n100%|██████████| 50/50 [00:03<00:00, 13.54it/s]", "metrics": { "predict_time": 4.647599, "total_time": 4.683477 }, "output": [ "https://replicate.delivery/pbxt/2zVI7LtEO0aeKSruhERO0PQCVEsbFivhEyXyR0LDMSg3cYBIA/out-0.png" ], "started_at": "2022-11-22T23:34:03.456987Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/uu3pugtcofbn3cgac6k4aymef4", "cancel": "https://api.replicate.com/v1/predictions/uu3pugtcofbn3cgac6k4aymef4/cancel" }, "version": "27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478" }
Generated inUsing seed: 63632 0%| | 0/50 [00:00<?, ?it/s] 4%|▍ | 2/50 [00:00<00:03, 13.41it/s] 8%|▊ | 4/50 [00:00<00:03, 13.57it/s] 12%|█▏ | 6/50 [00:00<00:03, 13.48it/s] 16%|█▌ | 8/50 [00:00<00:03, 13.60it/s] 20%|██ | 10/50 [00:00<00:02, 13.67it/s] 24%|██▍ | 12/50 [00:00<00:02, 13.68it/s] 28%|██▊ | 14/50 [00:01<00:02, 13.71it/s] 32%|███▏ | 16/50 [00:01<00:02, 13.68it/s] 36%|███▌ | 18/50 [00:01<00:02, 13.62it/s] 40%|████ | 20/50 [00:01<00:02, 13.47it/s] 44%|████▍ | 22/50 [00:01<00:02, 13.56it/s] 48%|████▊ | 24/50 [00:01<00:01, 13.62it/s] 52%|█████▏ | 26/50 [00:01<00:01, 13.53it/s] 56%|█████▌ | 28/50 [00:02<00:01, 13.54it/s] 60%|██████ | 30/50 [00:02<00:01, 13.55it/s] 64%|██████▍ | 32/50 [00:02<00:01, 13.49it/s] 68%|██████▊ | 34/50 [00:02<00:01, 13.46it/s] 72%|███████▏ | 36/50 [00:02<00:01, 13.52it/s] 76%|███████▌ | 38/50 [00:02<00:00, 13.57it/s] 80%|████████ | 40/50 [00:02<00:00, 13.59it/s] 84%|████████▍ | 42/50 [00:03<00:00, 13.33it/s] 88%|████████▊ | 44/50 [00:03<00:00, 13.40it/s] 92%|█████████▏| 46/50 [00:03<00:00, 13.36it/s] 96%|█████████▌| 48/50 [00:03<00:00, 13.50it/s] 100%|██████████| 50/50 [00:03<00:00, 13.57it/s] 100%|██████████| 50/50 [00:03<00:00, 13.54it/s]
Prediction
stability-ai/stable-diffusion:ac732df8ID4hwtj7bbmvetvgdah5o4xln3qyStatusSucceededSourceWebHardware–Total durationCreatedInput
- width
- 512
- height
- 512
- prompt
- multicolor hyperspace
- scheduler
- K_EULER_ANCESTRAL
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 512, height: 512, prompt: "multicolor hyperspace", scheduler: "K_EULER_ANCESTRAL", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=512' \ -i 'height=512' \ -i 'prompt="multicolor hyperspace"' \ -i 'scheduler="K_EULER_ANCESTRAL"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-11-22T23:34:36.399360Z", "created_at": "2022-11-22T23:34:31.934216Z", "data_removed": false, "error": null, "id": "4hwtj7bbmvetvgdah5o4xln3qy", "input": { "width": 512, "height": 512, "prompt": "multicolor hyperspace", "scheduler": "K_EULER_ANCESTRAL", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 63752\n 0%| | 0/50 [00:00<?, ?it/s]\n 4%|▍ | 2/50 [00:00<00:03, 13.59it/s]\n 8%|▊ | 4/50 [00:00<00:03, 14.13it/s]\n 12%|█▏ | 6/50 [00:00<00:03, 14.41it/s]\n 16%|█▌ | 8/50 [00:00<00:02, 14.53it/s]\n 20%|██ | 10/50 [00:00<00:02, 14.27it/s]\n 24%|██▍ | 12/50 [00:00<00:02, 14.30it/s]\n 28%|██▊ | 14/50 [00:00<00:02, 14.22it/s]\n 32%|███▏ | 16/50 [00:01<00:02, 14.37it/s]\n 36%|███▌ | 18/50 [00:01<00:02, 14.43it/s]\n 40%|████ | 20/50 [00:01<00:02, 14.41it/s]\n 44%|████▍ | 22/50 [00:01<00:01, 14.39it/s]\n 48%|████▊ | 24/50 [00:01<00:01, 14.17it/s]\n 52%|█████▏ | 26/50 [00:01<00:01, 14.20it/s]\n 56%|█████▌ | 28/50 [00:01<00:01, 14.17it/s]\n 60%|██████ | 30/50 [00:02<00:01, 14.35it/s]\n 64%|██████▍ | 32/50 [00:02<00:01, 14.44it/s]\n 68%|██████▊ | 34/50 [00:02<00:01, 14.43it/s]\n 72%|███████▏ | 36/50 [00:02<00:00, 14.47it/s]\n 76%|███████▌ | 38/50 [00:02<00:00, 14.49it/s]\n 80%|████████ | 40/50 [00:02<00:00, 14.47it/s]\n 84%|████████▍ | 42/50 [00:02<00:00, 14.34it/s]\n 88%|████████▊ | 44/50 [00:03<00:00, 14.43it/s]\n 92%|█████████▏| 46/50 [00:03<00:00, 14.48it/s]\n 96%|█████████▌| 48/50 [00:03<00:00, 14.52it/s]\n100%|██████████| 50/50 [00:03<00:00, 14.54it/s]\n100%|██████████| 50/50 [00:03<00:00, 14.38it/s]", "metrics": { "predict_time": 4.430653, "total_time": 4.465144 }, "output": [ "https://replicate.delivery/pbxt/DogdQ8movv5LNlAaHGXrRU9eS1hZMs0OZpIthOsyTbeL6wCQA/out-0.png" ], "started_at": "2022-11-22T23:34:31.968707Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/4hwtj7bbmvetvgdah5o4xln3qy", "cancel": "https://api.replicate.com/v1/predictions/4hwtj7bbmvetvgdah5o4xln3qy/cancel" }, "version": "27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478" }
Generated inUsing seed: 63752 0%| | 0/50 [00:00<?, ?it/s] 4%|▍ | 2/50 [00:00<00:03, 13.59it/s] 8%|▊ | 4/50 [00:00<00:03, 14.13it/s] 12%|█▏ | 6/50 [00:00<00:03, 14.41it/s] 16%|█▌ | 8/50 [00:00<00:02, 14.53it/s] 20%|██ | 10/50 [00:00<00:02, 14.27it/s] 24%|██▍ | 12/50 [00:00<00:02, 14.30it/s] 28%|██▊ | 14/50 [00:00<00:02, 14.22it/s] 32%|███▏ | 16/50 [00:01<00:02, 14.37it/s] 36%|███▌ | 18/50 [00:01<00:02, 14.43it/s] 40%|████ | 20/50 [00:01<00:02, 14.41it/s] 44%|████▍ | 22/50 [00:01<00:01, 14.39it/s] 48%|████▊ | 24/50 [00:01<00:01, 14.17it/s] 52%|█████▏ | 26/50 [00:01<00:01, 14.20it/s] 56%|█████▌ | 28/50 [00:01<00:01, 14.17it/s] 60%|██████ | 30/50 [00:02<00:01, 14.35it/s] 64%|██████▍ | 32/50 [00:02<00:01, 14.44it/s] 68%|██████▊ | 34/50 [00:02<00:01, 14.43it/s] 72%|███████▏ | 36/50 [00:02<00:00, 14.47it/s] 76%|███████▌ | 38/50 [00:02<00:00, 14.49it/s] 80%|████████ | 40/50 [00:02<00:00, 14.47it/s] 84%|████████▍ | 42/50 [00:02<00:00, 14.34it/s] 88%|████████▊ | 44/50 [00:03<00:00, 14.43it/s] 92%|█████████▏| 46/50 [00:03<00:00, 14.48it/s] 96%|█████████▌| 48/50 [00:03<00:00, 14.52it/s] 100%|██████████| 50/50 [00:03<00:00, 14.54it/s] 100%|██████████| 50/50 [00:03<00:00, 14.38it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- width
- 768
- height
- 768
- prompt
- an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed
- scheduler
- K_EULER
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 768, height: 768, prompt: "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", scheduler: "K_EULER", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=768' \ -i 'height=768' \ -i 'prompt="an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed"' \ -i 'scheduler="K_EULER"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-12-06T16:04:42.659888Z", "created_at": "2022-12-06T16:04:35.607941Z", "data_removed": false, "error": null, "id": "cxpa7egscbbtjbsx4r6623v5n4", "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 17203\n 0%| | 0/50 [00:00<?, ?it/s]\n 2%|▏ | 1/50 [00:00<00:07, 6.90it/s]\n 4%|▍ | 2/50 [00:00<00:06, 7.27it/s]\n 6%|▌ | 3/50 [00:00<00:05, 7.86it/s]\n 8%|▊ | 4/50 [00:00<00:05, 8.16it/s]\n 10%|█ | 5/50 [00:00<00:05, 8.34it/s]\n 12%|█▏ | 6/50 [00:00<00:05, 8.46it/s]\n 14%|█▍ | 7/50 [00:00<00:05, 8.53it/s]\n 16%|█▌ | 8/50 [00:00<00:04, 8.58it/s]\n 18%|█▊ | 9/50 [00:01<00:04, 8.62it/s]\n 20%|██ | 10/50 [00:01<00:04, 8.65it/s]\n 22%|██▏ | 11/50 [00:01<00:04, 8.67it/s]\n 24%|██▍ | 12/50 [00:01<00:04, 8.68it/s]\n 26%|██▌ | 13/50 [00:01<00:04, 8.68it/s]\n 28%|██▊ | 14/50 [00:01<00:04, 8.69it/s]\n 30%|███ | 15/50 [00:01<00:04, 8.69it/s]\n 32%|███▏ | 16/50 [00:01<00:03, 8.69it/s]\n 34%|███▍ | 17/50 [00:02<00:03, 8.69it/s]\n 36%|███▌ | 18/50 [00:02<00:03, 8.70it/s]\n 38%|███▊ | 19/50 [00:02<00:03, 8.70it/s]\n 40%|████ | 20/50 [00:02<00:03, 8.70it/s]\n 42%|████▏ | 21/50 [00:02<00:03, 8.70it/s]\n 44%|████▍ | 22/50 [00:02<00:03, 8.70it/s]\n 46%|████▌ | 23/50 [00:02<00:03, 8.70it/s]\n 48%|████▊ | 24/50 [00:02<00:02, 8.70it/s]\n 50%|█████ | 25/50 [00:02<00:02, 8.69it/s]\n 52%|█████▏ | 26/50 [00:03<00:02, 8.70it/s]\n 54%|█████▍ | 27/50 [00:03<00:02, 8.70it/s]\n 56%|█████▌ | 28/50 [00:03<00:02, 8.71it/s]\n 58%|█████▊ | 29/50 [00:03<00:02, 8.71it/s]\n 60%|██████ | 30/50 [00:03<00:02, 8.70it/s]\n 62%|██████▏ | 31/50 [00:03<00:02, 8.67it/s]\n 64%|██████▍ | 32/50 [00:03<00:02, 8.68it/s]\n 66%|██████▌ | 33/50 [00:03<00:01, 8.69it/s]\n 68%|██████▊ | 34/50 [00:03<00:01, 8.69it/s]\n 70%|███████ | 35/50 [00:04<00:01, 8.70it/s]\n 72%|███████▏ | 36/50 [00:04<00:01, 8.71it/s]\n 74%|███████▍ | 37/50 [00:04<00:01, 8.71it/s]\n 76%|███████▌ | 38/50 [00:04<00:01, 8.71it/s]\n 78%|███████▊ | 39/50 [00:04<00:01, 8.71it/s]\n 80%|████████ | 40/50 [00:04<00:01, 8.67it/s]\n 82%|████████▏ | 41/50 [00:04<00:01, 8.69it/s]\n 84%|████████▍ | 42/50 [00:04<00:00, 8.70it/s]\n 86%|████████▌ | 43/50 [00:04<00:00, 8.71it/s]\n 88%|████████▊ | 44/50 [00:05<00:00, 8.72it/s]\n 90%|█████████ | 45/50 [00:05<00:00, 8.72it/s]\n 92%|█████████▏| 46/50 [00:05<00:00, 8.72it/s]\n 94%|█████████▍| 47/50 [00:05<00:00, 8.72it/s]\n 96%|█████████▌| 48/50 [00:05<00:00, 8.72it/s]\n 98%|█████████▊| 49/50 [00:05<00:00, 8.72it/s]\n100%|██████████| 50/50 [00:05<00:00, 8.73it/s]\n100%|██████████| 50/50 [00:05<00:00, 8.63it/s]", "metrics": { "predict_time": 7.019087, "total_time": 7.051947 }, "output": [ "https://replicate.delivery/pbxt/f4nlztv3uz1iFC4AEf2wBYQGTezdVeysvtZUtwfsvZOJDN6AC/out-0.png" ], "started_at": "2022-12-06T16:04:35.640801Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/cxpa7egscbbtjbsx4r6623v5n4", "cancel": "https://api.replicate.com/v1/predictions/cxpa7egscbbtjbsx4r6623v5n4/cancel" }, "version": "0827b64897df7b6e8c04625167bbb275b9db0f14ab09e2454b9824141963c966" }
Generated inUsing seed: 17203 0%| | 0/50 [00:00<?, ?it/s] 2%|▏ | 1/50 [00:00<00:07, 6.90it/s] 4%|▍ | 2/50 [00:00<00:06, 7.27it/s] 6%|▌ | 3/50 [00:00<00:05, 7.86it/s] 8%|▊ | 4/50 [00:00<00:05, 8.16it/s] 10%|█ | 5/50 [00:00<00:05, 8.34it/s] 12%|█▏ | 6/50 [00:00<00:05, 8.46it/s] 14%|█▍ | 7/50 [00:00<00:05, 8.53it/s] 16%|█▌ | 8/50 [00:00<00:04, 8.58it/s] 18%|█▊ | 9/50 [00:01<00:04, 8.62it/s] 20%|██ | 10/50 [00:01<00:04, 8.65it/s] 22%|██▏ | 11/50 [00:01<00:04, 8.67it/s] 24%|██▍ | 12/50 [00:01<00:04, 8.68it/s] 26%|██▌ | 13/50 [00:01<00:04, 8.68it/s] 28%|██▊ | 14/50 [00:01<00:04, 8.69it/s] 30%|███ | 15/50 [00:01<00:04, 8.69it/s] 32%|███▏ | 16/50 [00:01<00:03, 8.69it/s] 34%|███▍ | 17/50 [00:02<00:03, 8.69it/s] 36%|███▌ | 18/50 [00:02<00:03, 8.70it/s] 38%|███▊ | 19/50 [00:02<00:03, 8.70it/s] 40%|████ | 20/50 [00:02<00:03, 8.70it/s] 42%|████▏ | 21/50 [00:02<00:03, 8.70it/s] 44%|████▍ | 22/50 [00:02<00:03, 8.70it/s] 46%|████▌ | 23/50 [00:02<00:03, 8.70it/s] 48%|████▊ | 24/50 [00:02<00:02, 8.70it/s] 50%|█████ | 25/50 [00:02<00:02, 8.69it/s] 52%|█████▏ | 26/50 [00:03<00:02, 8.70it/s] 54%|█████▍ | 27/50 [00:03<00:02, 8.70it/s] 56%|█████▌ | 28/50 [00:03<00:02, 8.71it/s] 58%|█████▊ | 29/50 [00:03<00:02, 8.71it/s] 60%|██████ | 30/50 [00:03<00:02, 8.70it/s] 62%|██████▏ | 31/50 [00:03<00:02, 8.67it/s] 64%|██████▍ | 32/50 [00:03<00:02, 8.68it/s] 66%|██████▌ | 33/50 [00:03<00:01, 8.69it/s] 68%|██████▊ | 34/50 [00:03<00:01, 8.69it/s] 70%|███████ | 35/50 [00:04<00:01, 8.70it/s] 72%|███████▏ | 36/50 [00:04<00:01, 8.71it/s] 74%|███████▍ | 37/50 [00:04<00:01, 8.71it/s] 76%|███████▌ | 38/50 [00:04<00:01, 8.71it/s] 78%|███████▊ | 39/50 [00:04<00:01, 8.71it/s] 80%|████████ | 40/50 [00:04<00:01, 8.67it/s] 82%|████████▏ | 41/50 [00:04<00:01, 8.69it/s] 84%|████████▍ | 42/50 [00:04<00:00, 8.70it/s] 86%|████████▌ | 43/50 [00:04<00:00, 8.71it/s] 88%|████████▊ | 44/50 [00:05<00:00, 8.72it/s] 90%|█████████ | 45/50 [00:05<00:00, 8.72it/s] 92%|█████████▏| 46/50 [00:05<00:00, 8.72it/s] 94%|█████████▍| 47/50 [00:05<00:00, 8.72it/s] 96%|█████████▌| 48/50 [00:05<00:00, 8.72it/s] 98%|█████████▊| 49/50 [00:05<00:00, 8.72it/s] 100%|██████████| 50/50 [00:05<00:00, 8.73it/s] 100%|██████████| 50/50 [00:05<00:00, 8.63it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- width
- 768
- height
- 768
- prompt
- an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed
- scheduler
- K_EULER
- num_outputs
- "1"
- guidance_scale
- 7.5
- prompt_strength
- 0.8
- num_inference_steps
- 50
{ "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { width: 768, height: 768, prompt: "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", scheduler: "K_EULER", num_outputs: "1", guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'width=768' \ -i 'height=768' \ -i 'prompt="an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed"' \ -i 'scheduler="K_EULER"' \ -i 'num_outputs="1"' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2022-12-09T02:48:58.002360Z", "created_at": "2022-12-09T02:47:38.477014Z", "data_removed": false, "error": null, "id": "3wztszjpcjbabi6iwgqukwyegq", "input": { "width": 768, "height": 768, "prompt": "an astronaut riding a horse on mars artstation, hd, dramatic lighting, detailed", "scheduler": "K_EULER", "num_outputs": "1", "guidance_scale": 7.5, "prompt_strength": 0.8, "num_inference_steps": 50 }, "logs": "Using seed: 57140\n 0%| | 0/50 [00:00<?, ?it/s]\n 2%|▏ | 1/50 [00:00<00:12, 3.84it/s]\n 4%|▍ | 2/50 [00:00<00:12, 3.84it/s]\n 6%|▌ | 3/50 [00:00<00:12, 3.84it/s]\n 8%|▊ | 4/50 [00:01<00:11, 3.84it/s]\n 10%|█ | 5/50 [00:01<00:11, 3.84it/s]\n 12%|█▏ | 6/50 [00:01<00:11, 3.83it/s]\n 14%|█▍ | 7/50 [00:01<00:11, 3.83it/s]\n 16%|█▌ | 8/50 [00:02<00:10, 3.83it/s]\n 18%|█▊ | 9/50 [00:02<00:10, 3.83it/s]\n 20%|██ | 10/50 [00:02<00:10, 3.83it/s]\n 22%|██▏ | 11/50 [00:02<00:10, 3.83it/s]\n 24%|██▍ | 12/50 [00:03<00:09, 3.82it/s]\n 26%|██▌ | 13/50 [00:03<00:09, 3.82it/s]\n 28%|██▊ | 14/50 [00:03<00:09, 3.82it/s]\n 30%|███ | 15/50 [00:03<00:09, 3.82it/s]\n 32%|███▏ | 16/50 [00:04<00:08, 3.82it/s]\n 34%|███▍ | 17/50 [00:04<00:08, 3.82it/s]\n 36%|███▌ | 18/50 [00:04<00:08, 3.83it/s]\n 38%|███▊ | 19/50 [00:04<00:08, 3.83it/s]\n 40%|████ | 20/50 [00:05<00:07, 3.83it/s]\n 42%|████▏ | 21/50 [00:05<00:07, 3.83it/s]\n 44%|████▍ | 22/50 [00:05<00:07, 3.83it/s]\n 46%|████▌ | 23/50 [00:06<00:07, 3.83it/s]\n 48%|████▊ | 24/50 [00:06<00:06, 3.83it/s]\n 50%|█████ | 25/50 [00:06<00:06, 3.83it/s]\n 52%|█████▏ | 26/50 [00:06<00:06, 3.83it/s]\n 54%|█████▍ | 27/50 [00:07<00:06, 3.83it/s]\n 56%|█████▌ | 28/50 [00:07<00:05, 3.83it/s]\n 58%|█████▊ | 29/50 [00:07<00:05, 3.83it/s]\n 60%|██████ | 30/50 [00:07<00:05, 3.83it/s]\n 62%|██████▏ | 31/50 [00:08<00:04, 3.83it/s]\n 64%|██████▍ | 32/50 [00:08<00:04, 3.83it/s]\n 66%|██████▌ | 33/50 [00:08<00:04, 3.84it/s]\n 68%|██████▊ | 34/50 [00:08<00:04, 3.84it/s]\n 70%|███████ | 35/50 [00:09<00:03, 3.84it/s]\n 72%|███████▏ | 36/50 [00:09<00:03, 3.84it/s]\n 74%|███████▍ | 37/50 [00:09<00:03, 3.84it/s]\n 76%|███████▌ | 38/50 [00:09<00:03, 3.84it/s]\n 78%|███████▊ | 39/50 [00:10<00:02, 3.84it/s]\n 80%|████████ | 40/50 [00:10<00:02, 3.84it/s]\n 82%|████████▏ | 41/50 [00:10<00:02, 3.83it/s]\n 84%|████████▍ | 42/50 [00:10<00:02, 3.83it/s]\n 86%|████████▌ | 43/50 [00:11<00:01, 3.83it/s]\n 88%|████████▊ | 44/50 [00:11<00:01, 3.83it/s]\n 90%|█████████ | 45/50 [00:11<00:01, 3.83it/s]\n 92%|█████████▏| 46/50 [00:12<00:01, 3.83it/s]\n 94%|█████████▍| 47/50 [00:12<00:00, 3.83it/s]\n 96%|█████████▌| 48/50 [00:12<00:00, 3.84it/s]\n 98%|█████████▊| 49/50 [00:12<00:00, 3.84it/s]\n100%|██████████| 50/50 [00:13<00:00, 3.83it/s]\n100%|██████████| 50/50 [00:13<00:00, 3.83it/s]", "metrics": { "predict_time": 14.28122, "total_time": 79.525346 }, "output": [ "https://replicate.delivery/pbxt/XdWzrhW73EIAIxuxl92iE5qVHdiFMnNh3axdwp56EdSGUBCE/out-0.png" ], "started_at": "2022-12-09T02:48:43.721140Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/3wztszjpcjbabi6iwgqukwyegq", "cancel": "https://api.replicate.com/v1/predictions/3wztszjpcjbabi6iwgqukwyegq/cancel" }, "version": "6359a0cab3ca6e4d3320c33d79096161208e9024d174b2311e5a21b6c7e1131c" }
Generated inUsing seed: 57140 0%| | 0/50 [00:00<?, ?it/s] 2%|▏ | 1/50 [00:00<00:12, 3.84it/s] 4%|▍ | 2/50 [00:00<00:12, 3.84it/s] 6%|▌ | 3/50 [00:00<00:12, 3.84it/s] 8%|▊ | 4/50 [00:01<00:11, 3.84it/s] 10%|█ | 5/50 [00:01<00:11, 3.84it/s] 12%|█▏ | 6/50 [00:01<00:11, 3.83it/s] 14%|█▍ | 7/50 [00:01<00:11, 3.83it/s] 16%|█▌ | 8/50 [00:02<00:10, 3.83it/s] 18%|█▊ | 9/50 [00:02<00:10, 3.83it/s] 20%|██ | 10/50 [00:02<00:10, 3.83it/s] 22%|██▏ | 11/50 [00:02<00:10, 3.83it/s] 24%|██▍ | 12/50 [00:03<00:09, 3.82it/s] 26%|██▌ | 13/50 [00:03<00:09, 3.82it/s] 28%|██▊ | 14/50 [00:03<00:09, 3.82it/s] 30%|███ | 15/50 [00:03<00:09, 3.82it/s] 32%|███▏ | 16/50 [00:04<00:08, 3.82it/s] 34%|███▍ | 17/50 [00:04<00:08, 3.82it/s] 36%|███▌ | 18/50 [00:04<00:08, 3.83it/s] 38%|███▊ | 19/50 [00:04<00:08, 3.83it/s] 40%|████ | 20/50 [00:05<00:07, 3.83it/s] 42%|████▏ | 21/50 [00:05<00:07, 3.83it/s] 44%|████▍ | 22/50 [00:05<00:07, 3.83it/s] 46%|████▌ | 23/50 [00:06<00:07, 3.83it/s] 48%|████▊ | 24/50 [00:06<00:06, 3.83it/s] 50%|█████ | 25/50 [00:06<00:06, 3.83it/s] 52%|█████▏ | 26/50 [00:06<00:06, 3.83it/s] 54%|█████▍ | 27/50 [00:07<00:06, 3.83it/s] 56%|█████▌ | 28/50 [00:07<00:05, 3.83it/s] 58%|█████▊ | 29/50 [00:07<00:05, 3.83it/s] 60%|██████ | 30/50 [00:07<00:05, 3.83it/s] 62%|██████▏ | 31/50 [00:08<00:04, 3.83it/s] 64%|██████▍ | 32/50 [00:08<00:04, 3.83it/s] 66%|██████▌ | 33/50 [00:08<00:04, 3.84it/s] 68%|██████▊ | 34/50 [00:08<00:04, 3.84it/s] 70%|███████ | 35/50 [00:09<00:03, 3.84it/s] 72%|███████▏ | 36/50 [00:09<00:03, 3.84it/s] 74%|███████▍ | 37/50 [00:09<00:03, 3.84it/s] 76%|███████▌ | 38/50 [00:09<00:03, 3.84it/s] 78%|███████▊ | 39/50 [00:10<00:02, 3.84it/s] 80%|████████ | 40/50 [00:10<00:02, 3.84it/s] 82%|████████▏ | 41/50 [00:10<00:02, 3.83it/s] 84%|████████▍ | 42/50 [00:10<00:02, 3.83it/s] 86%|████████▌ | 43/50 [00:11<00:01, 3.83it/s] 88%|████████▊ | 44/50 [00:11<00:01, 3.83it/s] 90%|█████████ | 45/50 [00:11<00:01, 3.83it/s] 92%|█████████▏| 46/50 [00:12<00:01, 3.83it/s] 94%|█████████▍| 47/50 [00:12<00:00, 3.83it/s] 96%|█████████▌| 48/50 [00:12<00:00, 3.84it/s] 98%|█████████▊| 49/50 [00:12<00:00, 3.84it/s] 100%|██████████| 50/50 [00:13<00:00, 3.83it/s] 100%|██████████| 50/50 [00:13<00:00, 3.83it/s]
Prediction
stability-ai/stable-diffusion:ac732df8Input
- prompt
- an astronaut riding a horse on mars, hd, dramatic lighting
- scheduler
- K_EULER
- num_outputs
- 1
- guidance_scale
- 7.5
- image_dimensions
- 512x512
- num_inference_steps
- 50
{ "prompt": "an astronaut riding a horse on mars, hd, dramatic lighting", "scheduler": "K_EULER", "num_outputs": 1, "guidance_scale": 7.5, "image_dimensions": "512x512", "num_inference_steps": 50 }
Install Replicate’s Node.js client library:npm install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", { input: { prompt: "an astronaut riding a horse on mars, hd, dramatic lighting", scheduler: "K_EULER", num_outputs: 1, guidance_scale: 7.5, num_inference_steps: 50 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Import the client:import replicate
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", input={ "prompt": "an astronaut riding a horse on mars, hd, dramatic lighting", "scheduler": "K_EULER", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 50 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Set theREPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run stability-ai/stable-diffusion using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4", "input": { "prompt": "an astronaut riding a horse on mars, hd, dramatic lighting", "scheduler": "K_EULER", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4 \ -i 'prompt="an astronaut riding a horse on mars, hd, dramatic lighting"' \ -i 'scheduler="K_EULER"' \ -i 'num_outputs=1' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=50'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/stability-ai/stable-diffusion@sha256:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "prompt": "an astronaut riding a horse on mars, hd, dramatic lighting", "scheduler": "K_EULER", "num_outputs": 1, "guidance_scale": 7.5, "num_inference_steps": 50 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2023-04-10T15:45:33.474978Z", "created_at": "2023-04-10T15:45:31.434998Z", "data_removed": false, "error": null, "id": "uvb7ynit4bhpjds3vn4bx7npeq", "input": { "prompt": "an astronaut riding a horse on mars, hd, dramatic lighting", "scheduler": "K_EULER", "num_outputs": 1, "guidance_scale": 7.5, "image_dimensions": "512x512", "num_inference_steps": 50 }, "logs": "Using seed: 52443\ninput_shape: torch.Size([1, 77])\n 0%| | 0/50 [00:00<?, ?it/s]\n 10%|█ | 5/50 [00:00<00:01, 41.06it/s]\n 20%|██ | 10/50 [00:00<00:00, 41.32it/s]\n 30%|███ | 15/50 [00:00<00:00, 43.77it/s]\n 42%|████▏ | 21/50 [00:00<00:00, 46.80it/s]\n 54%|█████▍ | 27/50 [00:00<00:00, 48.53it/s]\n 66%|██████▌ | 33/50 [00:00<00:00, 49.63it/s]\n 78%|███████▊ | 39/50 [00:00<00:00, 50.45it/s]\n 90%|█████████ | 45/50 [00:00<00:00, 50.90it/s]\n100%|██████████| 50/50 [00:01<00:00, 48.51it/s]", "metrics": { "predict_time": 1.889956, "total_time": 2.03998 }, "output": [ "https://replicate.delivery/pbxt/sWeZFZou6v3CPKuoJbqX46ugPaHT1DcsWYx0srPmGrMOCPYIA/out-0.png" ], "started_at": "2023-04-10T15:45:31.585022Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/uvb7ynit4bhpjds3vn4bx7npeq", "cancel": "https://api.replicate.com/v1/predictions/uvb7ynit4bhpjds3vn4bx7npeq/cancel" }, "version": "db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf" }
Generated inUsing seed: 52443 input_shape: torch.Size([1, 77]) 0%| | 0/50 [00:00<?, ?it/s] 10%|█ | 5/50 [00:00<00:01, 41.06it/s] 20%|██ | 10/50 [00:00<00:00, 41.32it/s] 30%|███ | 15/50 [00:00<00:00, 43.77it/s] 42%|████▏ | 21/50 [00:00<00:00, 46.80it/s] 54%|█████▍ | 27/50 [00:00<00:00, 48.53it/s] 66%|██████▌ | 33/50 [00:00<00:00, 49.63it/s] 78%|███████▊ | 39/50 [00:00<00:00, 50.45it/s] 90%|█████████ | 45/50 [00:00<00:00, 50.90it/s] 100%|██████████| 50/50 [00:01<00:00, 48.51it/s]
Want to make some of these yourself?
Run this model