Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
prunaai /flux.1-cheetah:40395798
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run prunaai/flux.1-cheetah using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"prunaai/flux.1-cheetah:40395798681414feaa3bcf8c39bd0e4c12e6b53ce1ec984d52a77e635469d8e5",
{
input: {
seed: -1,
prompt: "a tiny astronaut hatching from an egg on the moon",
guidance: 3.5,
go_faster: true,
go_fast_af: false,
image_size: 1024,
aspect_ratio: "1:1",
output_format: "webp",
output_quality: 80,
num_inference_steps: 28
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run prunaai/flux.1-cheetah using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"prunaai/flux.1-cheetah:40395798681414feaa3bcf8c39bd0e4c12e6b53ce1ec984d52a77e635469d8e5",
input={
"seed": -1,
"prompt": "a tiny astronaut hatching from an egg on the moon",
"guidance": 3.5,
"go_faster": True,
"go_fast_af": False,
"image_size": 1024,
"aspect_ratio": "1:1",
"output_format": "webp",
"output_quality": 80,
"num_inference_steps": 28
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run prunaai/flux.1-cheetah using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "40395798681414feaa3bcf8c39bd0e4c12e6b53ce1ec984d52a77e635469d8e5",
"input": {
"seed": -1,
"prompt": "a tiny astronaut hatching from an egg on the moon",
"guidance": 3.5,
"go_faster": true,
"go_fast_af": false,
"image_size": 1024,
"aspect_ratio": "1:1",
"output_format": "webp",
"output_quality": 80,
"num_inference_steps": 28
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2025-03-29T16:01:56.445981Z",
"created_at": "2025-03-29T16:01:54.469000Z",
"data_removed": false,
"error": null,
"id": "vqfmppk74nrm80cnwahs0973k4",
"input": {
"seed": -1,
"prompt": "a tiny astronaut hatching from an egg on the moon",
"guidance": 3.5,
"go_faster": true,
"go_fast_af": false,
"image_size": 1024,
"aspect_ratio": "1:1",
"output_format": "webp",
"output_quality": 80,
"num_inference_steps": 28
},
"logs": "Using txt2img pipeline\nSetting cache speed factor: 0.4\nRunning prediction with args: ['prompt', 'height', 'width', 'guidance_scale', 'num_inference_steps', 'generator']\n 0%| | 0/28 [00:00<?, ?it/s]\n 54%|█████▎ | 15/28 [00:01<00:00, 13.12it/s]\n100%|██████████| 28/28 [00:01<00:00, 17.30it/s]",
"metrics": {
"predict_time": 1.962889675,
"total_time": 1.976981
},
"output": "https://replicate.delivery/xezq/oyo2ufOW2QQNFKnpJTVNef8vSuZW2enVbdTpGbrqGB6Q3y1RB/output_-1_0.webp",
"started_at": "2025-03-29T16:01:54.483091Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/bcwr-mxxb5li23bpprcczmmotp5y77gxcg3at4yl2bbawzixrki43hgqq",
"get": "https://api.replicate.com/v1/predictions/vqfmppk74nrm80cnwahs0973k4",
"cancel": "https://api.replicate.com/v1/predictions/vqfmppk74nrm80cnwahs0973k4/cancel"
},
"version": "40395798681414feaa3bcf8c39bd0e4c12e6b53ce1ec984d52a77e635469d8e5"
}
Using txt2img pipeline
Setting cache speed factor: 0.4
Running prediction with args: ['prompt', 'height', 'width', 'guidance_scale', 'num_inference_steps', 'generator']
0%| | 0/28 [00:00<?, ?it/s]
54%|█████▎ | 15/28 [00:01<00:00, 13.12it/s]
100%|██████████| 28/28 [00:01<00:00, 17.30it/s]