You're looking at a specific version of this model. Jump to the model overview.
fermatresearch /sdxl-controlnet-lora:a4fb8402
This version has been disabled because it consistently fails to complete setup.
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run fermatresearch/sdxl-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"fermatresearch/sdxl-controlnet-lora:a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4",
{
input: {
seed: null,
image: "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
prompt: "shot in the style of sksfer, a woman",
refine: "base_image_refiner",
scheduler: "K_EULER",
lora_scale: 0.95,
num_outputs: 1,
lora_weights: "https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar",
refine_steps: 20,
guidance_scale: 7.5,
apply_watermark: true,
condition_scale: 0.5,
negative_prompt: "",
num_inference_steps: 40
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run fermatresearch/sdxl-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"fermatresearch/sdxl-controlnet-lora:a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4",
input={
"seed": null,
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman",
"refine": "base_image_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar",
"refine_steps": 20,
"guidance_scale": 7.5,
"apply_watermark": True,
"condition_scale": 0.5,
"negative_prompt": "",
"num_inference_steps": 40
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run fermatresearch/sdxl-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4",
"input": {
"seed": null,
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman",
"refine": "base_image_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar",
"refine_steps": 20,
"guidance_scale": 7.5,
"apply_watermark": true,
"condition_scale": 0.5,
"negative_prompt": "",
"num_inference_steps": 40
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fermatresearch/sdxl-controlnet-lora@sha256:a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4 \
-i 'seed=null' \
-i 'image="https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png"' \
-i 'prompt="shot in the style of sksfer, a woman"' \
-i 'refine="base_image_refiner"' \
-i 'scheduler="K_EULER"' \
-i 'lora_scale=0.95' \
-i 'num_outputs=1' \
-i 'lora_weights="https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar"' \
-i 'refine_steps=20' \
-i 'guidance_scale=7.5' \
-i 'apply_watermark=true' \
-i 'condition_scale=0.5' \
-i 'negative_prompt=""' \
-i 'num_inference_steps=40'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fermatresearch/sdxl-controlnet-lora@sha256:a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "seed": null, "image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png", "prompt": "shot in the style of sksfer, a woman", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.95, "num_outputs": 1, "lora_weights": "https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": true, "condition_scale": 0.5, "negative_prompt": "", "num_inference_steps": 40 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Add a payment method to run this model.
Each run costs approximately $0.020. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2023-10-20T09:17:12.161874Z",
"created_at": "2023-10-20T09:16:49.580032Z",
"data_removed": false,
"error": null,
"id": "nfobo4lbzipkb2d5y4ufmk5gea",
"input": {
"seed": null,
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman",
"refine": "base_image_refiner",
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar",
"refine_steps": 20,
"guidance_scale": 7.5,
"apply_watermark": true,
"condition_scale": 0.5,
"negative_prompt": "",
"num_inference_steps": 40
},
"logs": "Using seed: 57790\nloading custom weights\nweights not in cache\nEnsuring enough disk space...\nFree disk space: 1462651285504\nDownloading weights: https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar\ndownloading https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar\nb'Downloaded 186 MB bytes in 1.558s (119 MB/s)\\nExtracted 186 MB in 0.060s (3.1 GB/s)\\n'\nDownloaded weights in 1.97196626663208 seconds\nLoading fine-tuned model\nDoes not have Unet. assume we are using LoRA\nLoading Unet LoRA\nPrompt: shot in the style of <s0><s1>, a woman\nOriginal width:1024, height:1024\nAspect Ratio: 1.00\nnew_width:1024, new_height:1024\ntxt2img mode\n 0%| | 0/40 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`\ndeprecate(\n 2%|▎ | 1/40 [00:00<00:14, 2.72it/s]\n 5%|▌ | 2/40 [00:00<00:14, 2.71it/s]\n 8%|▊ | 3/40 [00:01<00:13, 2.71it/s]\n 10%|█ | 4/40 [00:01<00:13, 2.71it/s]\n 12%|█▎ | 5/40 [00:01<00:12, 2.70it/s]\n 15%|█▌ | 6/40 [00:02<00:12, 2.70it/s]\n 18%|█▊ | 7/40 [00:02<00:12, 2.70it/s]\n 20%|██ | 8/40 [00:02<00:11, 2.71it/s]\n 22%|██▎ | 9/40 [00:03<00:11, 2.71it/s]\n 25%|██▌ | 10/40 [00:03<00:11, 2.71it/s]\n 28%|██▊ | 11/40 [00:04<00:10, 2.72it/s]\n 30%|███ | 12/40 [00:04<00:10, 2.71it/s]\n 32%|███▎ | 13/40 [00:04<00:09, 2.72it/s]\n 35%|███▌ | 14/40 [00:05<00:09, 2.72it/s]\n 38%|███▊ | 15/40 [00:05<00:09, 2.72it/s]\n 40%|████ | 16/40 [00:05<00:08, 2.72it/s]\n 42%|████▎ | 17/40 [00:06<00:08, 2.72it/s]\n 45%|████▌ | 18/40 [00:06<00:08, 2.72it/s]\n 48%|████▊ | 19/40 [00:07<00:07, 2.71it/s]\n 50%|█████ | 20/40 [00:07<00:07, 2.71it/s]\n 52%|█████▎ | 21/40 [00:07<00:06, 2.71it/s]\n 55%|█████▌ | 22/40 [00:08<00:06, 2.71it/s]\n 57%|█████▊ | 23/40 [00:08<00:06, 2.71it/s]\n 60%|██████ | 24/40 [00:08<00:05, 2.71it/s]\n 62%|██████▎ | 25/40 [00:09<00:05, 2.71it/s]\n 65%|██████▌ | 26/40 [00:09<00:05, 2.71it/s]\n 68%|██████▊ | 27/40 [00:09<00:04, 2.71it/s]\n 70%|███████ | 28/40 [00:10<00:04, 2.71it/s]\n 72%|███████▎ | 29/40 [00:10<00:04, 2.71it/s]\n 75%|███████▌ | 30/40 [00:11<00:03, 2.71it/s]\n 78%|███████▊ | 31/40 [00:11<00:03, 2.71it/s]\n 80%|████████ | 32/40 [00:11<00:02, 2.71it/s]\n 82%|████████▎ | 33/40 [00:12<00:02, 2.71it/s]\n 85%|████████▌ | 34/40 [00:12<00:02, 2.71it/s]\n 88%|████████▊ | 35/40 [00:12<00:01, 2.71it/s]\n 90%|█████████ | 36/40 [00:13<00:01, 2.71it/s]\n 92%|█████████▎| 37/40 [00:13<00:01, 2.71it/s]\n 95%|█████████▌| 38/40 [00:14<00:00, 2.71it/s]\n 98%|█████████▊| 39/40 [00:14<00:00, 2.71it/s]\n100%|██████████| 40/40 [00:14<00:00, 2.71it/s]\n100%|██████████| 40/40 [00:14<00:00, 2.71it/s]\n 0%| | 0/6 [00:00<?, ?it/s]\n 17%|█▋ | 1/6 [00:00<00:01, 4.33it/s]\n 33%|███▎ | 2/6 [00:00<00:00, 4.30it/s]\n 50%|█████ | 3/6 [00:00<00:00, 4.29it/s]\n 67%|██████▋ | 4/6 [00:00<00:00, 4.28it/s]\n 83%|████████▎ | 5/6 [00:01<00:00, 4.27it/s]\n100%|██████████| 6/6 [00:01<00:00, 4.27it/s]\n100%|██████████| 6/6 [00:01<00:00, 4.28it/s]",
"metrics": {
"predict_time": 22.601503,
"total_time": 22.581842
},
"output": [
"https://replicate.delivery/pbxt/xqJDSe1pbrVPRatKXkTu3CbJj2TQutaeQPxA5UeKlxOv8efNC/out-0.png"
],
"started_at": "2023-10-20T09:16:49.560371Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/nfobo4lbzipkb2d5y4ufmk5gea",
"cancel": "https://api.replicate.com/v1/predictions/nfobo4lbzipkb2d5y4ufmk5gea/cancel"
},
"version": "a4fb84022361602a2401d74435229e90da63ea4a2aab40ebf79afd7af5a081d4"
}
Using seed: 57790
loading custom weights
weights not in cache
Ensuring enough disk space...
Free disk space: 1462651285504
Downloading weights: https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar
downloading https://pbxt.replicate.delivery/3wwmvGfvB4weYkJMAR2JJNMXu7RPtd8Hc5ONP3IP23fioXfGB/trained_model.tar
b'Downloaded 186 MB bytes in 1.558s (119 MB/s)\nExtracted 186 MB in 0.060s (3.1 GB/s)\n'
Downloaded weights in 1.97196626663208 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Prompt: shot in the style of <s0><s1>, a woman
Original width:1024, height:1024
Aspect Ratio: 1.00
new_width:1024, new_height:1024
txt2img mode
0%| | 0/40 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▎ | 1/40 [00:00<00:14, 2.72it/s]
5%|▌ | 2/40 [00:00<00:14, 2.71it/s]
8%|▊ | 3/40 [00:01<00:13, 2.71it/s]
10%|█ | 4/40 [00:01<00:13, 2.71it/s]
12%|█▎ | 5/40 [00:01<00:12, 2.70it/s]
15%|█▌ | 6/40 [00:02<00:12, 2.70it/s]
18%|█▊ | 7/40 [00:02<00:12, 2.70it/s]
20%|██ | 8/40 [00:02<00:11, 2.71it/s]
22%|██▎ | 9/40 [00:03<00:11, 2.71it/s]
25%|██▌ | 10/40 [00:03<00:11, 2.71it/s]
28%|██▊ | 11/40 [00:04<00:10, 2.72it/s]
30%|███ | 12/40 [00:04<00:10, 2.71it/s]
32%|███▎ | 13/40 [00:04<00:09, 2.72it/s]
35%|███▌ | 14/40 [00:05<00:09, 2.72it/s]
38%|███▊ | 15/40 [00:05<00:09, 2.72it/s]
40%|████ | 16/40 [00:05<00:08, 2.72it/s]
42%|████▎ | 17/40 [00:06<00:08, 2.72it/s]
45%|████▌ | 18/40 [00:06<00:08, 2.72it/s]
48%|████▊ | 19/40 [00:07<00:07, 2.71it/s]
50%|█████ | 20/40 [00:07<00:07, 2.71it/s]
52%|█████▎ | 21/40 [00:07<00:06, 2.71it/s]
55%|█████▌ | 22/40 [00:08<00:06, 2.71it/s]
57%|█████▊ | 23/40 [00:08<00:06, 2.71it/s]
60%|██████ | 24/40 [00:08<00:05, 2.71it/s]
62%|██████▎ | 25/40 [00:09<00:05, 2.71it/s]
65%|██████▌ | 26/40 [00:09<00:05, 2.71it/s]
68%|██████▊ | 27/40 [00:09<00:04, 2.71it/s]
70%|███████ | 28/40 [00:10<00:04, 2.71it/s]
72%|███████▎ | 29/40 [00:10<00:04, 2.71it/s]
75%|███████▌ | 30/40 [00:11<00:03, 2.71it/s]
78%|███████▊ | 31/40 [00:11<00:03, 2.71it/s]
80%|████████ | 32/40 [00:11<00:02, 2.71it/s]
82%|████████▎ | 33/40 [00:12<00:02, 2.71it/s]
85%|████████▌ | 34/40 [00:12<00:02, 2.71it/s]
88%|████████▊ | 35/40 [00:12<00:01, 2.71it/s]
90%|█████████ | 36/40 [00:13<00:01, 2.71it/s]
92%|█████████▎| 37/40 [00:13<00:01, 2.71it/s]
95%|█████████▌| 38/40 [00:14<00:00, 2.71it/s]
98%|█████████▊| 39/40 [00:14<00:00, 2.71it/s]
100%|██████████| 40/40 [00:14<00:00, 2.71it/s]
100%|██████████| 40/40 [00:14<00:00, 2.71it/s]
0%| | 0/6 [00:00<?, ?it/s]
17%|█▋ | 1/6 [00:00<00:01, 4.33it/s]
33%|███▎ | 2/6 [00:00<00:00, 4.30it/s]
50%|█████ | 3/6 [00:00<00:00, 4.29it/s]
67%|██████▋ | 4/6 [00:00<00:00, 4.28it/s]
83%|████████▎ | 5/6 [00:01<00:00, 4.27it/s]
100%|██████████| 6/6 [00:01<00:00, 4.27it/s]
100%|██████████| 6/6 [00:01<00:00, 4.28it/s]