Readme
This model doesn't have a readme.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run gougouccnu/stable-audio-open-1.0 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"gougouccnu/stable-audio-open-1.0:0f69810d42dd9098b8d20d0c8db087fbfd5ccd1704eef0a99a48c7325ab7fc3a",
{
input: {
seed: -1,
steps: 100,
prompt: "A toilet flushing.",
cfg_scale: 6,
sigma_max: 500,
sigma_min: 0.03,
batch_size: 1,
sampler_type: "dpmpp-3m-sde",
seconds_start: 0,
seconds_total: 8,
negative_prompt: "",
init_noise_level: 1
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run gougouccnu/stable-audio-open-1.0 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"gougouccnu/stable-audio-open-1.0:0f69810d42dd9098b8d20d0c8db087fbfd5ccd1704eef0a99a48c7325ab7fc3a",
input={
"seed": -1,
"steps": 100,
"prompt": "A toilet flushing.",
"cfg_scale": 6,
"sigma_max": 500,
"sigma_min": 0.03,
"batch_size": 1,
"sampler_type": "dpmpp-3m-sde",
"seconds_start": 0,
"seconds_total": 8,
"negative_prompt": "",
"init_noise_level": 1
}
)
# To access the file URL:
print(output.url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output.read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run gougouccnu/stable-audio-open-1.0 using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "gougouccnu/stable-audio-open-1.0:0f69810d42dd9098b8d20d0c8db087fbfd5ccd1704eef0a99a48c7325ab7fc3a",
"input": {
"seed": -1,
"steps": 100,
"prompt": "A toilet flushing.",
"cfg_scale": 6,
"sigma_max": 500,
"sigma_min": 0.03,
"batch_size": 1,
"sampler_type": "dpmpp-3m-sde",
"seconds_start": 0,
"seconds_total": 8,
"negative_prompt": "",
"init_noise_level": 1
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
This is a modal window.
Beginning of dialog window. Escape will cancel and close the window.
End of dialog window.
{
"completed_at": "2025-01-08T06:39:50.890854Z",
"created_at": "2025-01-08T06:33:40.105000Z",
"data_removed": false,
"error": null,
"id": "vsaagqva15rg80cm8jfaafk59r",
"input": {
"seed": -1,
"steps": 100,
"prompt": "A toilet flushing.",
"cfg_scale": 6,
"sigma_max": 500,
"sigma_min": 0.03,
"batch_size": 1,
"sampler_type": "dpmpp-3m-sde",
"seconds_start": 0,
"seconds_total": 8,
"negative_prompt": "",
"init_noise_level": 1
},
"logs": "Prompt: A toilet flushing.\n2887216820\n/src/stable_audio_tools/models/conditioners.py:314: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\nwith torch.cuda.amp.autocast(dtype=torch.float16) and torch.set_grad_enabled(self.enable_grad):\n/src/stable_audio_tools/inference/sampling.py:177: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\nwith torch.cuda.amp.autocast():\n 0%| | 0/100 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/contextlib.py:103: FutureWarning: `torch.backends.cuda.sdp_kernel()` is deprecated. In the future, this context manager will be removed. Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, with updated signature.\nself.gen = func(*args, **kwds)\n 1%| | 1/100 [00:00<00:23, 4.27it/s]\n 3%|▎ | 3/100 [00:00<00:11, 8.17it/s]\n 5%|▌ | 5/100 [00:00<00:09, 10.50it/s]\n 7%|▋ | 7/100 [00:00<00:07, 11.79it/s]\n 9%|▉ | 9/100 [00:00<00:07, 12.69it/s]\n 11%|█ | 11/100 [00:00<00:06, 13.32it/s]\n 13%|█▎ | 13/100 [00:01<00:06, 13.46it/s]\n 15%|█▌ | 15/100 [00:01<00:06, 13.65it/s]\n 17%|█▋ | 17/100 [00:01<00:05, 14.15it/s]\n 19%|█▉ | 19/100 [00:01<00:05, 14.50it/s]\n 21%|██ | 21/100 [00:01<00:05, 14.73it/s]\n 23%|██▎ | 23/100 [00:01<00:05, 14.91it/s]\n 25%|██▌ | 25/100 [00:01<00:05, 14.90it/s]\n 27%|██▋ | 27/100 [00:02<00:04, 14.98it/s]\n 29%|██▉ | 29/100 [00:02<00:04, 14.95it/s]\n 31%|███ | 31/100 [00:02<00:04, 15.03it/s]\n 33%|███▎ | 33/100 [00:02<00:04, 15.05it/s]\n 35%|███▌ | 35/100 [00:02<00:04, 15.13it/s]\n 37%|███▋ | 37/100 [00:02<00:04, 15.13it/s]\n 39%|███▉ | 39/100 [00:02<00:04, 15.15it/s]\n 41%|████ | 41/100 [00:02<00:03, 15.20it/s]\n 43%|████▎ | 43/100 [00:03<00:03, 15.31it/s]\n 45%|████▌ | 45/100 [00:03<00:03, 15.23it/s]\n 47%|████▋ | 47/100 [00:03<00:03, 15.33it/s]\n 49%|████▉ | 49/100 [00:03<00:03, 15.30it/s]\n 51%|█████ | 51/100 [00:03<00:03, 15.38it/s]\n 53%|█████▎ | 53/100 [00:03<00:03, 15.29it/s]\n 55%|█████▌ | 55/100 [00:03<00:02, 15.31it/s]\n 57%|█████▋ | 57/100 [00:03<00:02, 15.25it/s]\n 59%|█████▉ | 59/100 [00:04<00:02, 15.20it/s]\n 61%|██████ | 61/100 [00:04<00:02, 15.03it/s]\n 63%|██████▎ | 63/100 [00:04<00:02, 15.17it/s]\n 65%|██████▌ | 65/100 [00:04<00:02, 14.64it/s]\n 67%|██████▋ | 67/100 [00:04<00:02, 14.60it/s]\n 69%|██████▉ | 69/100 [00:04<00:02, 14.43it/s]\n 71%|███████ | 71/100 [00:04<00:01, 14.57it/s]\n 73%|███████▎ | 73/100 [00:05<00:01, 14.57it/s]\n 75%|███████▌ | 75/100 [00:05<00:01, 14.81it/s]\n 77%|███████▋ | 77/100 [00:05<00:01, 14.81it/s]\n 79%|███████▉ | 79/100 [00:05<00:01, 14.98it/s]\n 81%|████████ | 81/100 [00:05<00:01, 15.16it/s]\n 83%|████████▎ | 83/100 [00:05<00:01, 15.24it/s]\n 85%|████████▌ | 85/100 [00:05<00:00, 15.34it/s]\n 87%|████████▋ | 87/100 [00:06<00:00, 15.44it/s]\n 89%|████████▉ | 89/100 [00:06<00:00, 15.35it/s]\n 91%|█████████ | 91/100 [00:06<00:00, 15.42it/s]\n 93%|█████████▎| 93/100 [00:06<00:00, 15.37it/s]\n 95%|█████████▌| 95/100 [00:06<00:00, 15.34it/s]\n 97%|█████████▋| 97/100 [00:06<00:00, 15.43it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:599: UserWarning: Should have ta>=t0 but got ta=0.029999999329447746 and t0=0.03.\nwarnings.warn(f\"Should have ta>=t0 but got ta={ta} and t0={self._start}.\")\n 99%|█████████▉| 99/100 [00:06<00:00, 15.43it/s]\n100%|██████████| 100/100 [00:06<00:00, 14.61it/s]",
"metrics": {
"predict_time": 7.731288225,
"total_time": 370.785854
},
"output": "https://replicate.delivery/czjl/S1zDW6lofGTxHKaGE4iQPjY1wMnZkeEB4ExfAfIebMfhtPvAF/output.wav",
"started_at": "2025-01-08T06:39:43.159566Z",
"status": "succeeded",
"urls": {
"stream": "https://stream.replicate.com/v1/files/fddq-krtvscrikk4erqado76o2g3jca7oki47cfvkprqrwzuaaq6k4ztq",
"get": "https://api.replicate.com/v1/predictions/vsaagqva15rg80cm8jfaafk59r",
"cancel": "https://api.replicate.com/v1/predictions/vsaagqva15rg80cm8jfaafk59r/cancel"
},
"version": "0f69810d42dd9098b8d20d0c8db087fbfd5ccd1704eef0a99a48c7325ab7fc3a"
}
Prompt: A toilet flushing.
2887216820
/src/stable_audio_tools/models/conditioners.py:314: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(dtype=torch.float16) and torch.set_grad_enabled(self.enable_grad):
/src/stable_audio_tools/inference/sampling.py:177: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast():
0%| | 0/100 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/contextlib.py:103: FutureWarning: `torch.backends.cuda.sdp_kernel()` is deprecated. In the future, this context manager will be removed. Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, with updated signature.
self.gen = func(*args, **kwds)
1%| | 1/100 [00:00<00:23, 4.27it/s]
3%|▎ | 3/100 [00:00<00:11, 8.17it/s]
5%|▌ | 5/100 [00:00<00:09, 10.50it/s]
7%|▋ | 7/100 [00:00<00:07, 11.79it/s]
9%|▉ | 9/100 [00:00<00:07, 12.69it/s]
11%|█ | 11/100 [00:00<00:06, 13.32it/s]
13%|█▎ | 13/100 [00:01<00:06, 13.46it/s]
15%|█▌ | 15/100 [00:01<00:06, 13.65it/s]
17%|█▋ | 17/100 [00:01<00:05, 14.15it/s]
19%|█▉ | 19/100 [00:01<00:05, 14.50it/s]
21%|██ | 21/100 [00:01<00:05, 14.73it/s]
23%|██▎ | 23/100 [00:01<00:05, 14.91it/s]
25%|██▌ | 25/100 [00:01<00:05, 14.90it/s]
27%|██▋ | 27/100 [00:02<00:04, 14.98it/s]
29%|██▉ | 29/100 [00:02<00:04, 14.95it/s]
31%|███ | 31/100 [00:02<00:04, 15.03it/s]
33%|███▎ | 33/100 [00:02<00:04, 15.05it/s]
35%|███▌ | 35/100 [00:02<00:04, 15.13it/s]
37%|███▋ | 37/100 [00:02<00:04, 15.13it/s]
39%|███▉ | 39/100 [00:02<00:04, 15.15it/s]
41%|████ | 41/100 [00:02<00:03, 15.20it/s]
43%|████▎ | 43/100 [00:03<00:03, 15.31it/s]
45%|████▌ | 45/100 [00:03<00:03, 15.23it/s]
47%|████▋ | 47/100 [00:03<00:03, 15.33it/s]
49%|████▉ | 49/100 [00:03<00:03, 15.30it/s]
51%|█████ | 51/100 [00:03<00:03, 15.38it/s]
53%|█████▎ | 53/100 [00:03<00:03, 15.29it/s]
55%|█████▌ | 55/100 [00:03<00:02, 15.31it/s]
57%|█████▋ | 57/100 [00:03<00:02, 15.25it/s]
59%|█████▉ | 59/100 [00:04<00:02, 15.20it/s]
61%|██████ | 61/100 [00:04<00:02, 15.03it/s]
63%|██████▎ | 63/100 [00:04<00:02, 15.17it/s]
65%|██████▌ | 65/100 [00:04<00:02, 14.64it/s]
67%|██████▋ | 67/100 [00:04<00:02, 14.60it/s]
69%|██████▉ | 69/100 [00:04<00:02, 14.43it/s]
71%|███████ | 71/100 [00:04<00:01, 14.57it/s]
73%|███████▎ | 73/100 [00:05<00:01, 14.57it/s]
75%|███████▌ | 75/100 [00:05<00:01, 14.81it/s]
77%|███████▋ | 77/100 [00:05<00:01, 14.81it/s]
79%|███████▉ | 79/100 [00:05<00:01, 14.98it/s]
81%|████████ | 81/100 [00:05<00:01, 15.16it/s]
83%|████████▎ | 83/100 [00:05<00:01, 15.24it/s]
85%|████████▌ | 85/100 [00:05<00:00, 15.34it/s]
87%|████████▋ | 87/100 [00:06<00:00, 15.44it/s]
89%|████████▉ | 89/100 [00:06<00:00, 15.35it/s]
91%|█████████ | 91/100 [00:06<00:00, 15.42it/s]
93%|█████████▎| 93/100 [00:06<00:00, 15.37it/s]
95%|█████████▌| 95/100 [00:06<00:00, 15.34it/s]
97%|█████████▋| 97/100 [00:06<00:00, 15.43it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:599: UserWarning: Should have ta>=t0 but got ta=0.029999999329447746 and t0=0.03.
warnings.warn(f"Should have ta>=t0 but got ta={ta} and t0={self._start}.")
99%|█████████▉| 99/100 [00:06<00:00, 15.43it/s]
100%|██████████| 100/100 [00:06<00:00, 14.61it/s]
This model runs on Nvidia T4 GPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
This model runs on T4 hardware which costs $0.000225 per second. View more.
Prompt: A toilet flushing.
2887216820
/src/stable_audio_tools/models/conditioners.py:314: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast(dtype=torch.float16) and torch.set_grad_enabled(self.enable_grad):
/src/stable_audio_tools/inference/sampling.py:177: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
with torch.cuda.amp.autocast():
0%| | 0/100 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/contextlib.py:103: FutureWarning: `torch.backends.cuda.sdp_kernel()` is deprecated. In the future, this context manager will be removed. Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, with updated signature.
self.gen = func(*args, **kwds)
1%| | 1/100 [00:00<00:23, 4.27it/s]
3%|▎ | 3/100 [00:00<00:11, 8.17it/s]
5%|▌ | 5/100 [00:00<00:09, 10.50it/s]
7%|▋ | 7/100 [00:00<00:07, 11.79it/s]
9%|▉ | 9/100 [00:00<00:07, 12.69it/s]
11%|█ | 11/100 [00:00<00:06, 13.32it/s]
13%|█▎ | 13/100 [00:01<00:06, 13.46it/s]
15%|█▌ | 15/100 [00:01<00:06, 13.65it/s]
17%|█▋ | 17/100 [00:01<00:05, 14.15it/s]
19%|█▉ | 19/100 [00:01<00:05, 14.50it/s]
21%|██ | 21/100 [00:01<00:05, 14.73it/s]
23%|██▎ | 23/100 [00:01<00:05, 14.91it/s]
25%|██▌ | 25/100 [00:01<00:05, 14.90it/s]
27%|██▋ | 27/100 [00:02<00:04, 14.98it/s]
29%|██▉ | 29/100 [00:02<00:04, 14.95it/s]
31%|███ | 31/100 [00:02<00:04, 15.03it/s]
33%|███▎ | 33/100 [00:02<00:04, 15.05it/s]
35%|███▌ | 35/100 [00:02<00:04, 15.13it/s]
37%|███▋ | 37/100 [00:02<00:04, 15.13it/s]
39%|███▉ | 39/100 [00:02<00:04, 15.15it/s]
41%|████ | 41/100 [00:02<00:03, 15.20it/s]
43%|████▎ | 43/100 [00:03<00:03, 15.31it/s]
45%|████▌ | 45/100 [00:03<00:03, 15.23it/s]
47%|████▋ | 47/100 [00:03<00:03, 15.33it/s]
49%|████▉ | 49/100 [00:03<00:03, 15.30it/s]
51%|█████ | 51/100 [00:03<00:03, 15.38it/s]
53%|█████▎ | 53/100 [00:03<00:03, 15.29it/s]
55%|█████▌ | 55/100 [00:03<00:02, 15.31it/s]
57%|█████▋ | 57/100 [00:03<00:02, 15.25it/s]
59%|█████▉ | 59/100 [00:04<00:02, 15.20it/s]
61%|██████ | 61/100 [00:04<00:02, 15.03it/s]
63%|██████▎ | 63/100 [00:04<00:02, 15.17it/s]
65%|██████▌ | 65/100 [00:04<00:02, 14.64it/s]
67%|██████▋ | 67/100 [00:04<00:02, 14.60it/s]
69%|██████▉ | 69/100 [00:04<00:02, 14.43it/s]
71%|███████ | 71/100 [00:04<00:01, 14.57it/s]
73%|███████▎ | 73/100 [00:05<00:01, 14.57it/s]
75%|███████▌ | 75/100 [00:05<00:01, 14.81it/s]
77%|███████▋ | 77/100 [00:05<00:01, 14.81it/s]
79%|███████▉ | 79/100 [00:05<00:01, 14.98it/s]
81%|████████ | 81/100 [00:05<00:01, 15.16it/s]
83%|████████▎ | 83/100 [00:05<00:01, 15.24it/s]
85%|████████▌ | 85/100 [00:05<00:00, 15.34it/s]
87%|████████▋ | 87/100 [00:06<00:00, 15.44it/s]
89%|████████▉ | 89/100 [00:06<00:00, 15.35it/s]
91%|█████████ | 91/100 [00:06<00:00, 15.42it/s]
93%|█████████▎| 93/100 [00:06<00:00, 15.37it/s]
95%|█████████▌| 95/100 [00:06<00:00, 15.34it/s]
97%|█████████▋| 97/100 [00:06<00:00, 15.43it/s]/root/.pyenv/versions/3.10.15/lib/python3.10/site-packages/torchsde/_brownian/brownian_interval.py:599: UserWarning: Should have ta>=t0 but got ta=0.029999999329447746 and t0=0.03.
warnings.warn(f"Should have ta>=t0 but got ta={ta} and t0={self._start}.")
99%|█████████▉| 99/100 [00:06<00:00, 15.43it/s]
100%|██████████| 100/100 [00:06<00:00, 14.61it/s]