You're looking at a specific version of this model. Jump to the model overview.
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run nohamoamary/image-captioning-with-visual-attention using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"nohamoamary/image-captioning-with-visual-attention:9bb60a6baa58801aa7cd4c4fafc95fcf1531bf59b84962aff5a718f4d1f58986",
{
input: {}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run nohamoamary/image-captioning-with-visual-attention using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"nohamoamary/image-captioning-with-visual-attention:9bb60a6baa58801aa7cd4c4fafc95fcf1531bf59b84962aff5a718f4d1f58986",
input={}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run nohamoamary/image-captioning-with-visual-attention using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "9bb60a6baa58801aa7cd4c4fafc95fcf1531bf59b84962aff5a718f4d1f58986",
"input": {}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Pull and run nohamoamary/image-captioning-with-visual-attention using Cog (this will download the full model and run it in your local environment):
cog predict r8.im/nohamoamary/image-captioning-with-visual-attention@sha256:9bb60a6baa58801aa7cd4c4fafc95fcf1531bf59b84962aff5a718f4d1f58986 \
To learn more, take a look at the Cog documentation.
Pull and run nohamoamary/image-captioning-with-visual-attention using Docker (this will download the full model and run it in your local environment):
docker run -d -p 5000:5000 r8.im/nohamoamary/image-captioning-with-visual-attention@sha256:9bb60a6baa58801aa7cd4c4fafc95fcf1531bf59b84962aff5a718f4d1f58986
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": {} }' \ http://localhost:5000/predictions
Add a payment method to run this model.
Each run costs approximately $0.00022. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2022-01-23T20:31:03.709606Z",
"created_at": "2022-01-23T20:30:27.842247Z",
"data_removed": false,
"error": null,
"id": "3mekk6icijf33ccccdhh45oo4a",
"input": {
"input": "https://replicate.delivery/mgxm/0375d10a-8374-4ef6-b9f0-d40a253b0687/18.jpeg"
},
"logs": "2022-01-23 20:31:00.725074: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8\n2022-01-23 20:31:01.363218: I tensorflow/stream_executor/cuda/cuda_dnn.cc:359] Loaded cuDNN version 8101\n2022-01-23 20:31:02.099783: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11\n2022-01-23 20:31:02.600324: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11",
"metrics": {
"predict_time": 2.636301,
"total_time": 35.867359
},
"output": [
{
"text": "man hikes on foggy cliff"
}
],
"started_at": "2022-01-23T20:31:01.073305Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/3mekk6icijf33ccccdhh45oo4a",
"cancel": "https://api.replicate.com/v1/predictions/3mekk6icijf33ccccdhh45oo4a/cancel"
},
"version": "508d006dfaff69aa9c997eb7d778c891f34777ad368ca4d3e9958b6e3abc2bd3"
}
2022-01-23 20:31:00.725074: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8
2022-01-23 20:31:01.363218: I tensorflow/stream_executor/cuda/cuda_dnn.cc:359] Loaded cuDNN version 8101
2022-01-23 20:31:02.099783: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11
2022-01-23 20:31:02.600324: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11
This example was created by a different version, nohamoamary/image-captioning-with-visual-attention:508d006d.