Readme
This model doesn't have a readme.
blue_pencil-XL meets ANIMAGINE XL 3.0 / ANIMAGINE XL 3.1, The top ranked model on Civitai
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run charlesmccarthy/anima_pencil-xl using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"charlesmccarthy/anima_pencil-xl:7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3",
{
input: {
vae: "sdxl-vae-fp16-fix",
seed: -1,
model: "Anima_Pencil-XL-v4.safetensors",
steps: 35,
width: 1184,
height: 864,
prompt: "1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset",
cfg_scale: 7,
scheduler: "DPM++ 2M SDE Karras",
batch_size: 1,
negative_prompt: "unaestheticXL_Sky3.1, animal, cat, dog, big breasts",
guidance_rescale: 0.7
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run charlesmccarthy/anima_pencil-xl using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"charlesmccarthy/anima_pencil-xl:7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3",
input={
"vae": "sdxl-vae-fp16-fix",
"seed": -1,
"model": "Anima_Pencil-XL-v4.safetensors",
"steps": 35,
"width": 1184,
"height": 864,
"prompt": "1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset",
"cfg_scale": 7,
"scheduler": "DPM++ 2M SDE Karras",
"batch_size": 1,
"negative_prompt": "unaestheticXL_Sky3.1, animal, cat, dog, big breasts",
"guidance_rescale": 0.7
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run charlesmccarthy/anima_pencil-xl using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3",
"input": {
"vae": "sdxl-vae-fp16-fix",
"seed": -1,
"model": "Anima_Pencil-XL-v4.safetensors",
"steps": 35,
"width": 1184,
"height": 864,
"prompt": "1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset",
"cfg_scale": 7,
"scheduler": "DPM++ 2M SDE Karras",
"batch_size": 1,
"negative_prompt": "unaestheticXL_Sky3.1, animal, cat, dog, big breasts",
"guidance_rescale": 0.7
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/charlesmccarthy/anima_pencil-xl@sha256:7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3 \
-i 'vae="sdxl-vae-fp16-fix"' \
-i 'seed=-1' \
-i 'model="Anima_Pencil-XL-v4.safetensors"' \
-i 'steps=35' \
-i 'width=1184' \
-i 'height=864' \
-i 'prompt="1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset"' \
-i 'cfg_scale=7' \
-i 'scheduler="DPM++ 2M SDE Karras"' \
-i 'batch_size=1' \
-i 'negative_prompt="unaestheticXL_Sky3.1, animal, cat, dog, big breasts"' \
-i 'guidance_rescale=0.7'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/charlesmccarthy/anima_pencil-xl@sha256:7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "vae": "sdxl-vae-fp16-fix", "seed": -1, "model": "Anima_Pencil-XL-v4.safetensors", "steps": 35, "width": 1184, "height": 864, "prompt": "1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset", "cfg_scale": 7, "scheduler": "DPM++ 2M SDE Karras", "batch_size": 1, "negative_prompt": "unaestheticXL_Sky3.1, animal, cat, dog, big breasts", "guidance_rescale": 0.7 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.032. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-06-02T23:50:27.669548Z",
"created_at": "2024-06-02T23:48:22.368000Z",
"data_removed": false,
"error": null,
"id": "jacgrzn6w1rgp0cfvdd9ysk4nc",
"input": {
"vae": "sdxl-vae-fp16-fix",
"seed": -1,
"model": "Anima_Pencil-XL-v4.safetensors",
"steps": 35,
"width": 1184,
"height": 864,
"prompt": "1girl, cat girl, cat ears, cat tail, yellow eyes, white hair, bob cut, from side, scenery, sunset",
"cfg_scale": 7,
"scheduler": "DPM++ 2M SDE Karras",
"batch_size": 1,
"negative_prompt": "unaestheticXL_Sky3.1, animal, cat, dog, big breasts",
"guidance_rescale": 0.7
},
"logs": "0%| | 0/35 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.14/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Plan failed with a cudnnException: CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR: cudnnFinalize Descriptor Failed cudnn_status: CUDNN_STATUS_NOT_SUPPORTED (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:919.)\nreturn F.conv2d(input, weight, bias, self.stride,\n 3%|▎ | 1/35 [00:01<01:06, 1.96s/it]\n 6%|▌ | 2/35 [00:02<00:28, 1.15it/s]\n 9%|▊ | 3/35 [00:02<00:17, 1.79it/s]\n 11%|█▏ | 4/35 [00:02<00:12, 2.42it/s]\n 14%|█▍ | 5/35 [00:02<00:09, 3.01it/s]\n 17%|█▋ | 6/35 [00:02<00:08, 3.52it/s]\n 20%|██ | 7/35 [00:03<00:07, 3.95it/s]\n 23%|██▎ | 8/35 [00:03<00:06, 4.29it/s]\n 26%|██▌ | 9/35 [00:03<00:05, 4.55it/s]\n 29%|██▊ | 10/35 [00:03<00:05, 4.75it/s]\n 31%|███▏ | 11/35 [00:03<00:04, 4.89it/s]\n 34%|███▍ | 12/35 [00:03<00:04, 4.99it/s]\n 37%|███▋ | 13/35 [00:04<00:04, 5.06it/s]\n 40%|████ | 14/35 [00:04<00:04, 5.12it/s]\n 43%|████▎ | 15/35 [00:04<00:03, 5.16it/s]\n 46%|████▌ | 16/35 [00:04<00:03, 5.18it/s]\n 49%|████▊ | 17/35 [00:04<00:03, 5.20it/s]\n 51%|█████▏ | 18/35 [00:05<00:03, 5.21it/s]\n 54%|█████▍ | 19/35 [00:05<00:03, 5.22it/s]\n 57%|█████▋ | 20/35 [00:05<00:02, 5.23it/s]\n 60%|██████ | 21/35 [00:05<00:02, 5.23it/s]\n 63%|██████▎ | 22/35 [00:05<00:02, 5.23it/s]\n 66%|██████▌ | 23/35 [00:06<00:02, 5.23it/s]\n 69%|██████▊ | 24/35 [00:06<00:02, 5.23it/s]\n 71%|███████▏ | 25/35 [00:06<00:01, 5.23it/s]\n 74%|███████▍ | 26/35 [00:06<00:01, 5.23it/s]\n 77%|███████▋ | 27/35 [00:06<00:01, 5.23it/s]\n 80%|████████ | 28/35 [00:07<00:01, 5.23it/s]\n 83%|████████▎ | 29/35 [00:07<00:01, 5.23it/s]\n 86%|████████▌ | 30/35 [00:07<00:00, 5.23it/s]\n 89%|████████▊ | 31/35 [00:07<00:00, 5.23it/s]\n 91%|█████████▏| 32/35 [00:07<00:00, 5.23it/s]\n 94%|█████████▍| 33/35 [00:07<00:00, 5.23it/s]\n 97%|█████████▋| 34/35 [00:08<00:00, 5.23it/s]\n100%|██████████| 35/35 [00:08<00:00, 5.23it/s]\n100%|██████████| 35/35 [00:08<00:00, 4.19it/s]",
"metrics": {
"predict_time": 10.308153,
"total_time": 125.301548
},
"output": [
"https://replicate.delivery/pbxt/4lQPeySMkJV8dak0feO6NKDFGmZX1e1icJvOsWZOfYneQ3quE/0.png"
],
"started_at": "2024-06-02T23:50:17.361395Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/jacgrzn6w1rgp0cfvdd9ysk4nc",
"cancel": "https://api.replicate.com/v1/predictions/jacgrzn6w1rgp0cfvdd9ysk4nc/cancel"
},
"version": "7e1d2d606d603ef3e4a4ecab1e7d1cf257efe2e5b3e5215a89f14c05113cceb3"
}
0%| | 0/35 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.14/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Plan failed with a cudnnException: CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR: cudnnFinalize Descriptor Failed cudnn_status: CUDNN_STATUS_NOT_SUPPORTED (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:919.)
return F.conv2d(input, weight, bias, self.stride,
3%|▎ | 1/35 [00:01<01:06, 1.96s/it]
6%|▌ | 2/35 [00:02<00:28, 1.15it/s]
9%|▊ | 3/35 [00:02<00:17, 1.79it/s]
11%|█▏ | 4/35 [00:02<00:12, 2.42it/s]
14%|█▍ | 5/35 [00:02<00:09, 3.01it/s]
17%|█▋ | 6/35 [00:02<00:08, 3.52it/s]
20%|██ | 7/35 [00:03<00:07, 3.95it/s]
23%|██▎ | 8/35 [00:03<00:06, 4.29it/s]
26%|██▌ | 9/35 [00:03<00:05, 4.55it/s]
29%|██▊ | 10/35 [00:03<00:05, 4.75it/s]
31%|███▏ | 11/35 [00:03<00:04, 4.89it/s]
34%|███▍ | 12/35 [00:03<00:04, 4.99it/s]
37%|███▋ | 13/35 [00:04<00:04, 5.06it/s]
40%|████ | 14/35 [00:04<00:04, 5.12it/s]
43%|████▎ | 15/35 [00:04<00:03, 5.16it/s]
46%|████▌ | 16/35 [00:04<00:03, 5.18it/s]
49%|████▊ | 17/35 [00:04<00:03, 5.20it/s]
51%|█████▏ | 18/35 [00:05<00:03, 5.21it/s]
54%|█████▍ | 19/35 [00:05<00:03, 5.22it/s]
57%|█████▋ | 20/35 [00:05<00:02, 5.23it/s]
60%|██████ | 21/35 [00:05<00:02, 5.23it/s]
63%|██████▎ | 22/35 [00:05<00:02, 5.23it/s]
66%|██████▌ | 23/35 [00:06<00:02, 5.23it/s]
69%|██████▊ | 24/35 [00:06<00:02, 5.23it/s]
71%|███████▏ | 25/35 [00:06<00:01, 5.23it/s]
74%|███████▍ | 26/35 [00:06<00:01, 5.23it/s]
77%|███████▋ | 27/35 [00:06<00:01, 5.23it/s]
80%|████████ | 28/35 [00:07<00:01, 5.23it/s]
83%|████████▎ | 29/35 [00:07<00:01, 5.23it/s]
86%|████████▌ | 30/35 [00:07<00:00, 5.23it/s]
89%|████████▊ | 31/35 [00:07<00:00, 5.23it/s]
91%|█████████▏| 32/35 [00:07<00:00, 5.23it/s]
94%|█████████▍| 33/35 [00:07<00:00, 5.23it/s]
97%|█████████▋| 34/35 [00:08<00:00, 5.23it/s]
100%|██████████| 35/35 [00:08<00:00, 5.23it/s]
100%|██████████| 35/35 [00:08<00:00, 4.19it/s]
This model costs approximately $0.032 to run on Replicate, or 31 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia L40S GPU hardware. Predictions typically complete within 34 seconds. The predict time for this model varies significantly based on the inputs.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
0%| | 0/35 [00:00<?, ?it/s]/root/.pyenv/versions/3.10.14/lib/python3.10/site-packages/torch/nn/modules/conv.py:456: UserWarning: Plan failed with a cudnnException: CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR: cudnnFinalize Descriptor Failed cudnn_status: CUDNN_STATUS_NOT_SUPPORTED (Triggered internally at ../aten/src/ATen/native/cudnn/Conv_v8.cpp:919.)
return F.conv2d(input, weight, bias, self.stride,
3%|▎ | 1/35 [00:01<01:06, 1.96s/it]
6%|▌ | 2/35 [00:02<00:28, 1.15it/s]
9%|▊ | 3/35 [00:02<00:17, 1.79it/s]
11%|█▏ | 4/35 [00:02<00:12, 2.42it/s]
14%|█▍ | 5/35 [00:02<00:09, 3.01it/s]
17%|█▋ | 6/35 [00:02<00:08, 3.52it/s]
20%|██ | 7/35 [00:03<00:07, 3.95it/s]
23%|██▎ | 8/35 [00:03<00:06, 4.29it/s]
26%|██▌ | 9/35 [00:03<00:05, 4.55it/s]
29%|██▊ | 10/35 [00:03<00:05, 4.75it/s]
31%|███▏ | 11/35 [00:03<00:04, 4.89it/s]
34%|███▍ | 12/35 [00:03<00:04, 4.99it/s]
37%|███▋ | 13/35 [00:04<00:04, 5.06it/s]
40%|████ | 14/35 [00:04<00:04, 5.12it/s]
43%|████▎ | 15/35 [00:04<00:03, 5.16it/s]
46%|████▌ | 16/35 [00:04<00:03, 5.18it/s]
49%|████▊ | 17/35 [00:04<00:03, 5.20it/s]
51%|█████▏ | 18/35 [00:05<00:03, 5.21it/s]
54%|█████▍ | 19/35 [00:05<00:03, 5.22it/s]
57%|█████▋ | 20/35 [00:05<00:02, 5.23it/s]
60%|██████ | 21/35 [00:05<00:02, 5.23it/s]
63%|██████▎ | 22/35 [00:05<00:02, 5.23it/s]
66%|██████▌ | 23/35 [00:06<00:02, 5.23it/s]
69%|██████▊ | 24/35 [00:06<00:02, 5.23it/s]
71%|███████▏ | 25/35 [00:06<00:01, 5.23it/s]
74%|███████▍ | 26/35 [00:06<00:01, 5.23it/s]
77%|███████▋ | 27/35 [00:06<00:01, 5.23it/s]
80%|████████ | 28/35 [00:07<00:01, 5.23it/s]
83%|████████▎ | 29/35 [00:07<00:01, 5.23it/s]
86%|████████▌ | 30/35 [00:07<00:00, 5.23it/s]
89%|████████▊ | 31/35 [00:07<00:00, 5.23it/s]
91%|█████████▏| 32/35 [00:07<00:00, 5.23it/s]
94%|█████████▍| 33/35 [00:07<00:00, 5.23it/s]
97%|█████████▋| 34/35 [00:08<00:00, 5.23it/s]
100%|██████████| 35/35 [00:08<00:00, 5.23it/s]
100%|██████████| 35/35 [00:08<00:00, 4.19it/s]