typetext
{
"debug": false,
"max_new_tokens": 128,
"min_new_tokens": 10,
"prompt": "[PROMPT] a spooky ghost, in the style of",
"stop_sequences": "[/PROMPT]",
"temperature": 0.75,
"top_k": 50,
"top_p": 0.9
}npm install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_cQc**********************************
This is your API token. Keep it to yourself.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fofr/llama2-prompter using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fofr/llama2-prompter:4f815ea4e4d6d070cd00469d1960c303f15b9b5634a8faa0a0f0136a93a8acd5",
{
input: {
debug: false,
max_new_tokens: 128,
min_new_tokens: 10,
prompt: "[PROMPT] a spooky ghost, in the style of",
stop_sequences: "[/PROMPT]",
temperature: 0.75,
top_k: 50,
top_p: 0.9
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_cQc**********************************
This is your API token. Keep it to yourself.
import replicate
Run fofr/llama2-prompter using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fofr/llama2-prompter:4f815ea4e4d6d070cd00469d1960c303f15b9b5634a8faa0a0f0136a93a8acd5",
input={
"debug": False,
"max_new_tokens": 128,
"min_new_tokens": 10,
"prompt": "[PROMPT] a spooky ghost, in the style of",
"stop_sequences": "[/PROMPT]",
"temperature": 0.75,
"top_k": 50,
"top_p": 0.9
}
)
# The fofr/llama2-prompter model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/fofr/llama2-prompter/api#output-schema
print(item, end="")
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN environment variable:export REPLICATE_API_TOKEN=r8_cQc**********************************
This is your API token. Keep it to yourself.
Run fofr/llama2-prompter using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "fofr/llama2-prompter:4f815ea4e4d6d070cd00469d1960c303f15b9b5634a8faa0a0f0136a93a8acd5",
"input": {
"debug": false,
"max_new_tokens": 128,
"min_new_tokens": 10,
"prompt": "[PROMPT] a spooky ghost, in the style of",
"stop_sequences": "[/PROMPT]",
"temperature": 0.75,
"top_k": 50,
"top_p": 0.9
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
[PROMPT] a spooky ghost, in the style of realistic illustrations, fantasy art, cg, shiny, cobwebs, 32k uhd, dark gray and amber
{
"id": "bvbyjmjbjdjxj2z2d76zq7j32q",
"model": "fofr/llama2-prompter",
"version": "4f815ea4e4d6d070cd00469d1960c303f15b9b5634a8faa0a0f0136a93a8acd5",
"input": {
"debug": false,
"max_new_tokens": 128,
"min_new_tokens": 10,
"prompt": "[PROMPT] a spooky ghost, in the style of",
"stop_sequences": "[/PROMPT]",
"temperature": 0.75,
"top_k": 50,
"top_p": 0.9
},
"logs": "Your formatted prompt is:\n[PROMPT] a spooky ghost, in the style of",
"output": [
"\n",
"[P",
"ROM",
"PT",
"]",
" a",
" sp",
"ook",
"y",
" g",
"host",
",",
" in",
" the",
" style",
" of",
" real",
"istic",
" illustr",
"ations",
",",
" fant",
"asy",
" art",
",",
" c",
"g",
",",
" sh",
"iny",
",",
" c",
"ob",
"web",
"s",
",",
" ",
"3",
"2",
"k",
" u",
"hd",
",",
" dark",
" gray",
" and",
" am",
"ber",
""
],
"data_removed": false,
"error": null,
"source": "web",
"status": "succeeded",
"created_at": "2023-09-01T10:38:04.484528Z",
"started_at": "2023-09-01T10:38:04.438772Z",
"completed_at": "2023-09-01T10:38:08.248884Z",
"urls": {
"cancel": "https://api.replicate.com/v1/predictions/bvbyjmjbjdjxj2z2d76zq7j32q/cancel",
"get": "https://api.replicate.com/v1/predictions/bvbyjmjbjdjxj2z2d76zq7j32q"
},
"metrics": {
"predict_time": 3.810112,
"total_time": 3.764356
}
}