You're looking at a specific version of this model. Jump to the model overview.
tomasmcm /claude2-alpaca-13b:49b2d4cc
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run tomasmcm/claude2-alpaca-13b using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"tomasmcm/claude2-alpaca-13b:49b2d4cc082625d10463f56426bd012ebd75c11e3d6c2b54f7532c6ddd46b944",
{
input: {
stop: "###",
top_k: -1,
top_p: 0.95,
prompt: "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\nGenerate a haiku about AI.\n\n### Response:\n",
max_tokens: 128,
temperature: 0.8,
presence_penalty: 0,
frequency_penalty: 0
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run tomasmcm/claude2-alpaca-13b using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"tomasmcm/claude2-alpaca-13b:49b2d4cc082625d10463f56426bd012ebd75c11e3d6c2b54f7532c6ddd46b944",
input={
"stop": "###",
"top_k": -1,
"top_p": 0.95,
"prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\nGenerate a haiku about AI.\n\n### Response:\n",
"max_tokens": 128,
"temperature": 0.8,
"presence_penalty": 0,
"frequency_penalty": 0
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run tomasmcm/claude2-alpaca-13b using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "49b2d4cc082625d10463f56426bd012ebd75c11e3d6c2b54f7532c6ddd46b944",
"input": {
"stop": "###",
"top_k": -1,
"top_p": 0.95,
"prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\\n\\n### Instruction:\\nGenerate a haiku about AI.\\n\\n### Response:\\n",
"max_tokens": 128,
"temperature": 0.8,
"presence_penalty": 0,
"frequency_penalty": 0
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2023-12-10T15:30:32.768997Z",
"created_at": "2023-12-10T15:30:32.277634Z",
"data_removed": false,
"error": null,
"id": "gr6wdltb5pkbqjztz42rfubbim",
"input": {
"stop": "###",
"top_k": -1,
"top_p": 0.95,
"prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\nGenerate a haiku about AI.\n\n### Response:\n",
"max_tokens": 128,
"temperature": 0.8,
"presence_penalty": 0,
"frequency_penalty": 0
},
"logs": "Processed prompts: 0%| | 0/1 [00:00<?, ?it/s]\nProcessed prompts: 100%|██████████| 1/1 [00:00<00:00, 2.23it/s]\nProcessed prompts: 100%|██████████| 1/1 [00:00<00:00, 2.23it/s]\nGenerated 16 tokens in 0.44997215270996094 seconds.",
"metrics": {
"predict_time": 0.455228,
"total_time": 0.491363
},
"output": "Artificial intelligence\nIs a powerful technology\nThat can help us.",
"started_at": "2023-12-10T15:30:32.313769Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/gr6wdltb5pkbqjztz42rfubbim",
"cancel": "https://api.replicate.com/v1/predictions/gr6wdltb5pkbqjztz42rfubbim/cancel"
},
"version": "49b2d4cc082625d10463f56426bd012ebd75c11e3d6c2b54f7532c6ddd46b944"
}
Processed prompts: 0%| | 0/1 [00:00<?, ?it/s]
Processed prompts: 100%|██████████| 1/1 [00:00<00:00, 2.23it/s]
Processed prompts: 100%|██████████| 1/1 [00:00<00:00, 2.23it/s]
Generated 16 tokens in 0.44997215270996094 seconds.