typetext
{
"prompt": "What are you wanting to chat about today?",
"voice": "A male speaker with a low-pitched narrator story voice, expressive energetic voice delivers words fast pace in a open space with very clear audio."
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_7Lr**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run lee101/guided-text-to-speech using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"lee101/guided-text-to-speech:fc0617a394340824a7dd1aa78f76e92c061449abd48e67ee9dbe30a6448c8be2",
{
input: {
prompt: "What are you wanting to chat about today?",
voice: "A male speaker with a low-pitched narrator story voice, expressive energetic voice delivers words fast pace in a open space with very clear audio."
}
}
);
// To access the file URL:
console.log(output.url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_7Lr**********************************
This is your API token. Keep it to yourself.
import replicate
Run lee101/guided-text-to-speech using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"lee101/guided-text-to-speech:fc0617a394340824a7dd1aa78f76e92c061449abd48e67ee9dbe30a6448c8be2",
input={
"prompt": "What are you wanting to chat about today?",
"voice": "A male speaker with a low-pitched narrator story voice, expressive energetic voice delivers words fast pace in a open space with very clear audio."
}
)
# To access the file URL:
print(output.url())
#=> "http://example.com"
# To write the file to disk:
with open("my-image.png", "wb") as file:
file.write(output.read())
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_7Lr**********************************
This is your API token. Keep it to yourself.
Run lee101/guided-text-to-speech using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "lee101/guided-text-to-speech:fc0617a394340824a7dd1aa78f76e92c061449abd48e67ee9dbe30a6448c8be2",
"input": {
"prompt": "What are you wanting to chat about today?",
"voice": "A male speaker with a low-pitched narrator story voice, expressive energetic voice delivers words fast pace in a open space with very clear audio."
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
{
"id": "my3t4g8q0xrg80cfsefbt2n48r",
"model": "lee101/guided-text-to-speech",
"version": "fc0617a394340824a7dd1aa78f76e92c061449abd48e67ee9dbe30a6448c8be2",
"input": {
"prompt": "What are you wanting to chat about today?",
"voice": "A male speaker with a low-pitched narrator story voice, expressive energetic voice delivers words fast pace in a open space with very clear audio."
},
"logs": "Using the model-agnostic default `max_length` (=2580) to control the generation length. We recommend setting `max_new_tokens` to control the maximum length of the generation.\nCalling `sample` directly is deprecated and will be removed in v4.41. Use `generate` or a custom generation loop instead.\n--- Logging error ---\nTraceback (most recent call last):\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/logging/__init__.py\", line 1110, in emit\nmsg = self.format(record)\n^^^^^^^^^^^^^^^^^^^\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/logging/__init__.py\", line 953, in format\nreturn fmt.format(record)\n^^^^^^^^^^^^^^^^^^\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/logging/__init__.py\", line 687, in format\nrecord.message = record.getMessage()\n^^^^^^^^^^^^^^^^^^^\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/logging/__init__.py\", line 377, in getMessage\nmsg = msg % self.args\n~~~~^~~~~~~~~~~\nTypeError: not all arguments converted during string formatting\nCall stack:\nFile \"<string>\", line 1, in <module>\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/multiprocessing/spawn.py\", line 122, in spawn_main\nexitcode = _main(fd, parent_sentinel)\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/multiprocessing/spawn.py\", line 135, in _main\nreturn self._bootstrap(parent_sentinel)\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/multiprocessing/process.py\", line 314, in _bootstrap\nself.run()\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/cog/server/worker.py\", line 179, in run\nself._loop()\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/cog/server/worker.py\", line 211, in _loop\nself._predict(ev.payload)\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/cog/server/worker.py\", line 221, in _predict\nresult = predict(**payload)\nFile \"/src/predict.py\", line 14, in predict\nsample_rate, audio_arr = gen_tts(prompt, voice)\nFile \"/src/parlerlib.py\", line 90, in gen_tts\ngeneration = model.generate(\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\nreturn func(*args, **kwargs)\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/parler_tts/modeling_parler_tts.py\", line 2608, in generate\noutputs = self.sample(\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2584, in sample\nreturn self._sample(*args, **kwargs)\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2730, in _sample\nlogger.warning_once(\nFile \"/root/.pyenv/versions/3.11.9/lib/python3.11/site-packages/transformers/utils/logging.py\", line 329, in warning_once\nself.warning(*args, **kwargs)\nMessage: '`eos_token_id` is deprecated in this function and will be removed in v4.41, use `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead. Otherwise make sure to set `model.generation_config.eos_token_id`'\nArguments: (<class 'FutureWarning'>,)",
"output": "https://replicate.delivery/czjl/Dkba34f2BQzjVCHHa2uVbIjye7UvnlJeXrcyMdtEMmJhEWzlA/tmpqr0mdm_w.mp3",
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2024-05-30T22:28:06.535Z",
"started_at": "2024-05-30T22:32:25.818798Z",
"completed_at": "2024-05-30T22:32:48.981541Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/my3t4g8q0xrg80cfsefbt2n48r/cancel",
"get": "https://api.replicate.com/v1/predictions/my3t4g8q0xrg80cfsefbt2n48r",
"web": "https://replicate.com/p/my3t4g8q0xrg80cfsefbt2n48r"
},
"metrics": {
"predict_time": 23.162743,
"total_time": 282.446541
}
}