Prompt
Enable maximum speed optimization
Default: false
Number of images to generate
Default: 1
Number of inference steps
Default: 50
Guidance scale
Default: 7.5
Seed
Default: 42
Image height
Default: 1024
Image width
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run prunaai/sdxl-cheetah using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "prunaai/sdxl-cheetah:91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807", { input: { seed: 0, prompt: "a beautiful unicorn", go_fast_af: false, num_images: 1, image_width: 1024, image_height: 1024, guidance_scale: 7.5, num_inference_steps: 30 } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
import replicate
output = replicate.run( "prunaai/sdxl-cheetah:91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807", input={ "seed": 0, "prompt": "a beautiful unicorn", "go_fast_af": False, "num_images": 1, "image_width": 1024, "image_height": 1024, "guidance_scale": 7.5, "num_inference_steps": 30 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807", "input": { "seed": 0, "prompt": "a beautiful unicorn", "go_fast_af": false, "num_images": 1, "image_width": 1024, "image_height": 1024, "guidance_scale": 7.5, "num_inference_steps": 30 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/prunaai/sdxl-cheetah@sha256:91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807 \ -i 'seed=0' \ -i 'prompt="a beautiful unicorn"' \ -i 'go_fast_af=false' \ -i 'num_images=1' \ -i 'image_width=1024' \ -i 'image_height=1024' \ -i 'guidance_scale=7.5' \ -i 'num_inference_steps=30'
To learn more, take a look at the Cog documentation.
docker run -d -p 5000:5000 --gpus=all r8.im/prunaai/sdxl-cheetah@sha256:91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807 curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "seed": 0, "prompt": "a beautiful unicorn", "go_fast_af": false, "num_images": 1, "image_width": 1024, "image_height": 1024, "guidance_scale": 7.5, "num_inference_steps": 30 } }' \ http://localhost:5000/predictions
docker run -d -p 5000:5000 --gpus=all r8.im/prunaai/sdxl-cheetah@sha256:91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "seed": 0, "prompt": "a beautiful unicorn", "go_fast_af": false, "num_images": 1, "image_width": 1024, "image_height": 1024, "guidance_scale": 7.5, "num_inference_steps": 30 } }' \ http://localhost:5000/predictions
{ "completed_at": "2025-03-28T22:59:05.889721Z", "created_at": "2025-03-28T22:59:04.030000Z", "data_removed": false, "error": null, "id": "7bnne5qpvsrj40cnvvx9z3bv8r", "input": { "seed": 0, "prompt": "a beautiful unicorn", "go_fast_af": false, "num_images": 1, "image_width": 1024, "image_height": 1024, "guidance_scale": 7.5, "num_inference_steps": 30 }, "logs": "0%| | 0/30 [00:00<?, ?it/s]\n 50%|█████ | 15/30 [00:00<00:00, 36.16it/s]\n100%|██████████| 30/30 [00:00<00:00, 34.93it/s]\n100%|██████████| 30/30 [00:00<00:00, 35.09it/s]", "metrics": { "predict_time": 1.8470771849999998, "total_time": 1.859721 }, "output": "https://replicate.delivery/yhqm/lyq6dRHDSda3NVXEroV4xSLuLhofqlrJEp7aJIGbyT1c3mOKA/output.png", "started_at": "2025-03-28T22:59:04.042644Z", "status": "succeeded", "urls": { "stream": "https://stream.replicate.com/v1/files/yswh-fhnt4fn73fdipqf7o6wqhatk4xwdusp2jkhhsgkllymocncfulra", "get": "https://api.replicate.com/v1/predictions/7bnne5qpvsrj40cnvvx9z3bv8r", "cancel": "https://api.replicate.com/v1/predictions/7bnne5qpvsrj40cnvvx9z3bv8r/cancel" }, "version": "91b608b2a471e40d5a9425dcd9610c066430236d313ab84e65d26c02a585e807" }
0%| | 0/30 [00:00<?, ?it/s] 50%|█████ | 15/30 [00:00<00:00, 36.16it/s] 100%|██████████| 30/30 [00:00<00:00, 34.93it/s] 100%|██████████| 30/30 [00:00<00:00, 35.09it/s]
View more examples
This model runs on Nvidia A100 (80GB) GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.