pitch correction on your voice
This is a modal window.
Beginning of dialog window. Escape will cancel and close the window.
End of dialog window.
Audio input file
Strategy for normalizing audio.
Default: "closest"
Output format for generated audio.
Default: "wav"
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run nateraw/autotune using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "nateraw/autotune:53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd", { input: { scale: "Gb:min", audio_file: "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", output_format: "wav" } } ); console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
import replicate
output = replicate.run( "nateraw/autotune:53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd", input={ "scale": "Gb:min", "audio_file": "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", "output_format": "wav" } ) print(output)
To learn more, take a look at the guide on getting started with Python.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd", "input": { "scale": "Gb:min", "audio_file": "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", "output_format": "wav" } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/nateraw/autotune@sha256:53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd \ -i 'scale="Gb:min"' \ -i 'audio_file="https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav"' \ -i 'output_format="wav"'
To learn more, take a look at the Cog documentation.
docker run -d -p 5000:5000 r8.im/nateraw/autotune@sha256:53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "scale": "Gb:min", "audio_file": "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", "output_format": "wav" } }' \ http://localhost:5000/predictions
docker run -d -p 5000:5000 r8.im/nateraw/autotune@sha256:53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "scale": "Gb:min", "audio_file": "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", "output_format": "wav" } }' \ http://localhost:5000/predictions
{ "completed_at": "2024-04-19T06:23:28.241776Z", "created_at": "2024-04-19T06:23:24.297000Z", "data_removed": false, "error": null, "id": "pchg10s4h5rgc0ceym39nxnkm8", "input": { "scale": "Gb:min", "audio_file": "https://replicate.delivery/pbxt/KlypmfOy3eCR7nfLP2XFRvXbujUFjjoUfQtIXjVrfdmfpbty/nate_is_singing_Gb_minor.wav", "output_format": "wav" }, "logs": null, "metrics": { "predict_time": 3.927591, "total_time": 3.944776 }, "output": "https://replicate.delivery/pbxt/6jyhVrZFPhoRAh28hwEtKkqPIKxYRELUBwnse0xR8Abwf7rSA/tmpldzs_qfinate_is_singing_Gb_minor_pitch_corrected.wav", "started_at": "2024-04-19T06:23:24.314185Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/pchg10s4h5rgc0ceym39nxnkm8", "cancel": "https://api.replicate.com/v1/predictions/pchg10s4h5rgc0ceym39nxnkm8/cancel" }, "version": "53d58aea27ccd949e5f9d77e4b2a74ffe90e1fa534295b257cea50f011e233dd" }
This model runs on CPU hardware. We don't yet have enough runs of this model to provide performance information.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input