fofr / sdxl-multi-controlnet-lora
Multi-controlnet, lora loading, img2img, inpainting
Prediction
fofr/sdxl-multi-controlnet-lora:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18IDunv4n4tbrwdybd4xah6msqf66aStatusSucceededSourceWebHardwareA40 (Large)Total durationCreatedInput
- width
- 768
- height
- 768
- prompt
- A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm
- refine
- no_refiner
- scheduler
- K_EULER
- lora_scale
- 0.8
- num_outputs
- 1
- controlnet_1
- soft_edge_hed
- controlnet_2
- none
- controlnet_3
- none
- lora_weights
- https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar
- guidance_scale
- 7.5
- apply_watermark
- high_noise_frac
- 0.8
- negative_prompt
- soft, rainbow
- prompt_strength
- 0.8
- sizing_strategy
- width_height
- controlnet_1_end
- 1
- controlnet_2_end
- 1
- controlnet_3_end
- 1
- controlnet_1_start
- 0
- controlnet_2_start
- 0
- controlnet_3_start
- 0
- num_inference_steps
- 30
- controlnet_1_conditioning_scale
- 0.8
- controlnet_2_conditioning_scale
- 0.8
- controlnet_3_conditioning_scale
- 0.75
{ "image": "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "high_noise_frac": 0.8, "negative_prompt": "soft, rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "fofr/sdxl-multi-controlnet-lora:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18", { input: { image: "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", width: 768, height: 768, prompt: "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", refine: "no_refiner", scheduler: "K_EULER", lora_scale: 0.8, num_outputs: 1, controlnet_1: "soft_edge_hed", controlnet_2: "none", controlnet_3: "none", lora_weights: "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", guidance_scale: 7.5, apply_watermark: false, high_noise_frac: 0.8, negative_prompt: "soft, rainbow", prompt_strength: 0.8, sizing_strategy: "width_height", controlnet_1_end: 1, controlnet_2_end: 1, controlnet_3_end: 1, controlnet_1_image: "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", controlnet_1_start: 0, controlnet_2_start: 0, controlnet_3_start: 0, num_inference_steps: 30, controlnet_1_conditioning_scale: 0.8, controlnet_2_conditioning_scale: 0.8, controlnet_3_conditioning_scale: 0.75 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "fofr/sdxl-multi-controlnet-lora:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18", input={ "image": "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": False, "high_noise_frac": 0.8, "negative_prompt": "soft, rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "fofr/sdxl-multi-controlnet-lora:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18", "input": { "image": "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "high_noise_frac": 0.8, "negative_prompt": "soft, rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/sdxl-multi-controlnet-lora@sha256:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18 \ -i 'image="https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png"' \ -i 'width=768' \ -i 'height=768' \ -i 'prompt="A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm"' \ -i 'refine="no_refiner"' \ -i 'scheduler="K_EULER"' \ -i 'lora_scale=0.8' \ -i 'num_outputs=1' \ -i 'controlnet_1="soft_edge_hed"' \ -i 'controlnet_2="none"' \ -i 'controlnet_3="none"' \ -i 'lora_weights="https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar"' \ -i 'guidance_scale=7.5' \ -i 'apply_watermark=false' \ -i 'high_noise_frac=0.8' \ -i 'negative_prompt="soft, rainbow"' \ -i 'prompt_strength=0.8' \ -i 'sizing_strategy="width_height"' \ -i 'controlnet_1_end=1' \ -i 'controlnet_2_end=1' \ -i 'controlnet_3_end=1' \ -i 'controlnet_1_image="https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png"' \ -i 'controlnet_1_start=0' \ -i 'controlnet_2_start=0' \ -i 'controlnet_3_start=0' \ -i 'num_inference_steps=30' \ -i 'controlnet_1_conditioning_scale=0.8' \ -i 'controlnet_2_conditioning_scale=0.8' \ -i 'controlnet_3_conditioning_scale=0.75'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/sdxl-multi-controlnet-lora@sha256:a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "image": "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "high_noise_frac": 0.8, "negative_prompt": "soft, rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2023-11-15T10:24:18.612369Z", "created_at": "2023-11-15T10:24:09.646918Z", "data_removed": false, "error": null, "id": "unv4n4tbrwdybd4xah6msqf66a", "input": { "image": "https://replicate.delivery/pbxt/JsfNMvUfNDY4hh03Joa4yZXGgWqkk6myOWEncVJdjImv5y1d/out-3-14.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "high_noise_frac": 0.8, "negative_prompt": "soft, rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfNNRrcxL39mumIcKPKRsPf75Bm1sDhU0eOHr8ukiZqczQk/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 }, "logs": "Using seed: 61519\nUsing given dimensions\nskipping loading .. weights already loaded\nPrompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm\nProcessing image with soft_edge_hed\nLoading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]\nLoading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 15567.41it/s]\nYou have 1 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts.\n 0%| | 0/24 [00:00<?, ?it/s]\n 4%|▍ | 1/24 [00:00<00:04, 4.61it/s]\n 8%|▊ | 2/24 [00:00<00:04, 4.60it/s]\n 12%|█▎ | 3/24 [00:00<00:04, 4.60it/s]\n 17%|█▋ | 4/24 [00:00<00:04, 4.60it/s]\n 21%|██ | 5/24 [00:01<00:04, 4.62it/s]\n 25%|██▌ | 6/24 [00:01<00:03, 4.62it/s]\n 29%|██▉ | 7/24 [00:01<00:03, 4.63it/s]\n 33%|███▎ | 8/24 [00:01<00:03, 4.63it/s]\n 38%|███▊ | 9/24 [00:01<00:03, 4.63it/s]\n 42%|████▏ | 10/24 [00:02<00:03, 4.63it/s]\n 46%|████▌ | 11/24 [00:02<00:02, 4.63it/s]\n 50%|█████ | 12/24 [00:02<00:02, 4.63it/s]\n 54%|█████▍ | 13/24 [00:02<00:02, 4.62it/s]\n 58%|█████▊ | 14/24 [00:03<00:02, 4.62it/s]\n 62%|██████▎ | 15/24 [00:03<00:01, 4.62it/s]\n 67%|██████▋ | 16/24 [00:03<00:01, 4.63it/s]\n 71%|███████ | 17/24 [00:03<00:01, 4.63it/s]\n 75%|███████▌ | 18/24 [00:03<00:01, 4.62it/s]\n 79%|███████▉ | 19/24 [00:04<00:01, 4.62it/s]\n 83%|████████▎ | 20/24 [00:04<00:00, 4.62it/s]\n 88%|████████▊ | 21/24 [00:04<00:00, 4.62it/s]\n 92%|█████████▏| 22/24 [00:04<00:00, 4.62it/s]\n 96%|█████████▌| 23/24 [00:04<00:00, 4.62it/s]\n100%|██████████| 24/24 [00:05<00:00, 4.62it/s]\n100%|██████████| 24/24 [00:05<00:00, 4.62it/s]", "metrics": { "predict_time": 8.914106, "total_time": 8.965451 }, "output": [ "https://replicate.delivery/pbxt/XKhUYwvpoppzNJkUYhJ9pGsxflUJChmVBOay9KzeF6zR5k4RA/control-0.png", "https://replicate.delivery/pbxt/tcEOrqX3qXL9CdCtgpkv5gTzEaAXAOMzlYpDqScfDdQpcS8IA/out-0.png" ], "started_at": "2023-11-15T10:24:09.698263Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/unv4n4tbrwdybd4xah6msqf66a", "cancel": "https://api.replicate.com/v1/predictions/unv4n4tbrwdybd4xah6msqf66a/cancel" }, "version": "a7e9ded4a0bf23d05e7bdf65cd53a2bc0549b802cf7740f03351487371e53f18" }
Generated inUsing seed: 61519 Using given dimensions skipping loading .. weights already loaded Prompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm Processing image with soft_edge_hed Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s] Loading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 15567.41it/s] You have 1 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts. 0%| | 0/24 [00:00<?, ?it/s] 4%|▍ | 1/24 [00:00<00:04, 4.61it/s] 8%|▊ | 2/24 [00:00<00:04, 4.60it/s] 12%|█▎ | 3/24 [00:00<00:04, 4.60it/s] 17%|█▋ | 4/24 [00:00<00:04, 4.60it/s] 21%|██ | 5/24 [00:01<00:04, 4.62it/s] 25%|██▌ | 6/24 [00:01<00:03, 4.62it/s] 29%|██▉ | 7/24 [00:01<00:03, 4.63it/s] 33%|███▎ | 8/24 [00:01<00:03, 4.63it/s] 38%|███▊ | 9/24 [00:01<00:03, 4.63it/s] 42%|████▏ | 10/24 [00:02<00:03, 4.63it/s] 46%|████▌ | 11/24 [00:02<00:02, 4.63it/s] 50%|█████ | 12/24 [00:02<00:02, 4.63it/s] 54%|█████▍ | 13/24 [00:02<00:02, 4.62it/s] 58%|█████▊ | 14/24 [00:03<00:02, 4.62it/s] 62%|██████▎ | 15/24 [00:03<00:01, 4.62it/s] 67%|██████▋ | 16/24 [00:03<00:01, 4.63it/s] 71%|███████ | 17/24 [00:03<00:01, 4.63it/s] 75%|███████▌ | 18/24 [00:03<00:01, 4.62it/s] 79%|███████▉ | 19/24 [00:04<00:01, 4.62it/s] 83%|████████▎ | 20/24 [00:04<00:00, 4.62it/s] 88%|████████▊ | 21/24 [00:04<00:00, 4.62it/s] 92%|█████████▏| 22/24 [00:04<00:00, 4.62it/s] 96%|█████████▌| 23/24 [00:04<00:00, 4.62it/s] 100%|██████████| 24/24 [00:05<00:00, 4.62it/s] 100%|██████████| 24/24 [00:05<00:00, 4.62it/s]
Prediction
fofr/sdxl-multi-controlnet-lora:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4ID34t5jv3byxdwpcnklkbkamfheyStatusSucceededSourceWebHardwareA40 (Large)Total durationCreatedInput
- width
- 768
- height
- 768
- prompt
- A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm
- refine
- base_image_refiner
- scheduler
- K_EULER
- lora_scale
- 0.8
- num_outputs
- 1
- controlnet_1
- soft_edge_hed
- controlnet_2
- depth_leres
- controlnet_3
- none
- lora_weights
- https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar
- refine_steps
- 20
- guidance_scale
- 7.5
- apply_watermark
- negative_prompt
- soft, rainbow
- prompt_strength
- 0.85
- sizing_strategy
- width_height
- controlnet_1_end
- 1
- controlnet_2_end
- 1
- controlnet_3_end
- 1
- controlnet_1_start
- 0
- controlnet_2_start
- 0
- controlnet_3_start
- 0
- num_inference_steps
- 30
- controlnet_1_conditioning_scale
- 0.4
- controlnet_2_conditioning_scale
- 0.4
- controlnet_3_conditioning_scale
- 0.75
{ "image": "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "depth_leres", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "soft, rainbow", "prompt_strength": 0.85, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_image": "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.4, "controlnet_2_conditioning_scale": 0.4, "controlnet_3_conditioning_scale": 0.75 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "fofr/sdxl-multi-controlnet-lora:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4", { input: { image: "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", width: 768, height: 768, prompt: "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", refine: "base_image_refiner", scheduler: "K_EULER", lora_scale: 0.8, num_outputs: 1, controlnet_1: "soft_edge_hed", controlnet_2: "depth_leres", controlnet_3: "none", lora_weights: "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", refine_steps: 20, guidance_scale: 7.5, apply_watermark: false, negative_prompt: "soft, rainbow", prompt_strength: 0.85, sizing_strategy: "width_height", controlnet_1_end: 1, controlnet_2_end: 1, controlnet_3_end: 1, controlnet_1_image: "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", controlnet_1_start: 0, controlnet_2_image: "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", controlnet_2_start: 0, controlnet_3_start: 0, num_inference_steps: 30, controlnet_1_conditioning_scale: 0.4, controlnet_2_conditioning_scale: 0.4, controlnet_3_conditioning_scale: 0.75 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "fofr/sdxl-multi-controlnet-lora:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4", input={ "image": "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "depth_leres", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": False, "negative_prompt": "soft, rainbow", "prompt_strength": 0.85, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_image": "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.4, "controlnet_2_conditioning_scale": 0.4, "controlnet_3_conditioning_scale": 0.75 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "fofr/sdxl-multi-controlnet-lora:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4", "input": { "image": "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "depth_leres", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "soft, rainbow", "prompt_strength": 0.85, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_image": "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.4, "controlnet_2_conditioning_scale": 0.4, "controlnet_3_conditioning_scale": 0.75 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/sdxl-multi-controlnet-lora@sha256:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4 \ -i 'image="https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png"' \ -i 'width=768' \ -i 'height=768' \ -i 'prompt="A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm"' \ -i 'refine="base_image_refiner"' \ -i 'scheduler="K_EULER"' \ -i 'lora_scale=0.8' \ -i 'num_outputs=1' \ -i 'controlnet_1="soft_edge_hed"' \ -i 'controlnet_2="depth_leres"' \ -i 'controlnet_3="none"' \ -i 'lora_weights="https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar"' \ -i 'refine_steps=20' \ -i 'guidance_scale=7.5' \ -i 'apply_watermark=false' \ -i 'negative_prompt="soft, rainbow"' \ -i 'prompt_strength=0.85' \ -i 'sizing_strategy="width_height"' \ -i 'controlnet_1_end=1' \ -i 'controlnet_2_end=1' \ -i 'controlnet_3_end=1' \ -i 'controlnet_1_image="https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png"' \ -i 'controlnet_1_start=0' \ -i 'controlnet_2_image="https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png"' \ -i 'controlnet_2_start=0' \ -i 'controlnet_3_start=0' \ -i 'num_inference_steps=30' \ -i 'controlnet_1_conditioning_scale=0.4' \ -i 'controlnet_2_conditioning_scale=0.4' \ -i 'controlnet_3_conditioning_scale=0.75'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/sdxl-multi-controlnet-lora@sha256:6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "image": "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "depth_leres", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "soft, rainbow", "prompt_strength": 0.85, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_image": "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.4, "controlnet_2_conditioning_scale": 0.4, "controlnet_3_conditioning_scale": 0.75 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2023-11-15T13:52:34.751582Z", "created_at": "2023-11-15T13:52:20.688953Z", "data_removed": false, "error": null, "id": "34t5jv3byxdwpcnklkbkamfhey", "input": { "image": "https://replicate.delivery/pbxt/JsfQRpixpfcqxm6Sl73977DgXAy8T0RAWP1qxAAh5PChkLG9/out-0-64.png", "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm", "refine": "base_image_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "depth_leres", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "refine_steps": 20, "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "soft, rainbow", "prompt_strength": 0.85, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfQSHT5S2SO8Le9byiyt8FgSRq1qexfeR0yt2ZAbBGaqcn2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_image": "https://replicate.delivery/pbxt/JsfQSLslasaelHKvWbKoi7zWse2KFcrilLAsDzfbXeDGtJp2/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.4, "controlnet_2_conditioning_scale": 0.4, "controlnet_3_conditioning_scale": 0.75 }, "logs": "Using seed: 22214\nUsing given dimensions\nskipping loading .. weights already loaded\nPrompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm\nProcessing image with soft_edge_hed\nProcessing image with depth_leres\nLoading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]\nLoading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 7879.80it/s]\nYou have 2 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts.\n 0%| | 0/25 [00:00<?, ?it/s]\n 4%|▍ | 1/25 [00:00<00:05, 4.37it/s]\n 8%|▊ | 2/25 [00:00<00:05, 4.36it/s]\n 12%|█▏ | 3/25 [00:00<00:05, 4.36it/s]\n 16%|█▌ | 4/25 [00:00<00:04, 4.36it/s]\n 20%|██ | 5/25 [00:01<00:04, 4.36it/s]\n 24%|██▍ | 6/25 [00:01<00:04, 4.36it/s]\n 28%|██▊ | 7/25 [00:01<00:04, 4.36it/s]\n 32%|███▏ | 8/25 [00:01<00:03, 4.36it/s]\n 36%|███▌ | 9/25 [00:02<00:03, 4.36it/s]\n 40%|████ | 10/25 [00:02<00:03, 4.36it/s]\n 44%|████▍ | 11/25 [00:02<00:03, 4.36it/s]\n 48%|████▊ | 12/25 [00:02<00:02, 4.36it/s]\n 52%|█████▏ | 13/25 [00:02<00:02, 4.36it/s]\n 56%|█████▌ | 14/25 [00:03<00:02, 4.36it/s]\n 60%|██████ | 15/25 [00:03<00:02, 4.35it/s]\n 64%|██████▍ | 16/25 [00:03<00:02, 4.35it/s]\n 68%|██████▊ | 17/25 [00:03<00:01, 4.34it/s]\n 72%|███████▏ | 18/25 [00:04<00:01, 4.34it/s]\n 76%|███████▌ | 19/25 [00:04<00:01, 4.35it/s]\n 80%|████████ | 20/25 [00:04<00:01, 4.35it/s]\n 84%|████████▍ | 21/25 [00:04<00:00, 4.35it/s]\n 88%|████████▊ | 22/25 [00:05<00:00, 4.35it/s]\n 92%|█████████▏| 23/25 [00:05<00:00, 4.35it/s]\n 96%|█████████▌| 24/25 [00:05<00:00, 4.35it/s]\n100%|██████████| 25/25 [00:05<00:00, 4.35it/s]\n100%|██████████| 25/25 [00:05<00:00, 4.35it/s]\n 0%| | 0/6 [00:00<?, ?it/s]\n 17%|█▋ | 1/6 [00:00<00:00, 8.05it/s]\n 33%|███▎ | 2/6 [00:00<00:00, 8.00it/s]\n 50%|█████ | 3/6 [00:00<00:00, 7.97it/s]\n 67%|██████▋ | 4/6 [00:00<00:00, 7.97it/s]\n 83%|████████▎ | 5/6 [00:00<00:00, 7.96it/s]\n100%|██████████| 6/6 [00:00<00:00, 7.95it/s]\n100%|██████████| 6/6 [00:00<00:00, 7.97it/s]", "metrics": { "predict_time": 14.027109, "total_time": 14.062629 }, "output": [ "https://replicate.delivery/pbxt/RxZ70el8KT2iNqzq7WGY7XuTPGpFJLWBe8Txd3ewkBVD5PxjA/control-0.png", "https://replicate.delivery/pbxt/fOTiZQ2JWj2KMCO8jbVnrNzZjpt4AkrfXMgYabLkrteE5PxjA/control-1.png", "https://replicate.delivery/pbxt/MjfBA747maWhCa2Dc9EHuWY5NDesXzLiMfRMOohzlt2E5PxjA/out-0.png" ], "started_at": "2023-11-15T13:52:20.724473Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/34t5jv3byxdwpcnklkbkamfhey", "cancel": "https://api.replicate.com/v1/predictions/34t5jv3byxdwpcnklkbkamfhey/cancel" }, "version": "6dce202b5f8c1ebc617969547ee6ba5a32aaaa5da792002322f905675dd611c4" }
Generated inUsing seed: 22214 Using given dimensions skipping loading .. weights already loaded Prompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, 18mm Processing image with soft_edge_hed Processing image with depth_leres Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s] Loading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 7879.80it/s] You have 2 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts. 0%| | 0/25 [00:00<?, ?it/s] 4%|▍ | 1/25 [00:00<00:05, 4.37it/s] 8%|▊ | 2/25 [00:00<00:05, 4.36it/s] 12%|█▏ | 3/25 [00:00<00:05, 4.36it/s] 16%|█▌ | 4/25 [00:00<00:04, 4.36it/s] 20%|██ | 5/25 [00:01<00:04, 4.36it/s] 24%|██▍ | 6/25 [00:01<00:04, 4.36it/s] 28%|██▊ | 7/25 [00:01<00:04, 4.36it/s] 32%|███▏ | 8/25 [00:01<00:03, 4.36it/s] 36%|███▌ | 9/25 [00:02<00:03, 4.36it/s] 40%|████ | 10/25 [00:02<00:03, 4.36it/s] 44%|████▍ | 11/25 [00:02<00:03, 4.36it/s] 48%|████▊ | 12/25 [00:02<00:02, 4.36it/s] 52%|█████▏ | 13/25 [00:02<00:02, 4.36it/s] 56%|█████▌ | 14/25 [00:03<00:02, 4.36it/s] 60%|██████ | 15/25 [00:03<00:02, 4.35it/s] 64%|██████▍ | 16/25 [00:03<00:02, 4.35it/s] 68%|██████▊ | 17/25 [00:03<00:01, 4.34it/s] 72%|███████▏ | 18/25 [00:04<00:01, 4.34it/s] 76%|███████▌ | 19/25 [00:04<00:01, 4.35it/s] 80%|████████ | 20/25 [00:04<00:01, 4.35it/s] 84%|████████▍ | 21/25 [00:04<00:00, 4.35it/s] 88%|████████▊ | 22/25 [00:05<00:00, 4.35it/s] 92%|█████████▏| 23/25 [00:05<00:00, 4.35it/s] 96%|█████████▌| 24/25 [00:05<00:00, 4.35it/s] 100%|██████████| 25/25 [00:05<00:00, 4.35it/s] 100%|██████████| 25/25 [00:05<00:00, 4.35it/s] 0%| | 0/6 [00:00<?, ?it/s] 17%|█▋ | 1/6 [00:00<00:00, 8.05it/s] 33%|███▎ | 2/6 [00:00<00:00, 8.00it/s] 50%|█████ | 3/6 [00:00<00:00, 7.97it/s] 67%|██████▋ | 4/6 [00:00<00:00, 7.97it/s] 83%|████████▎ | 5/6 [00:00<00:00, 7.96it/s] 100%|██████████| 6/6 [00:00<00:00, 7.95it/s] 100%|██████████| 6/6 [00:00<00:00, 7.97it/s]
Prediction
fofr/sdxl-multi-controlnet-lora:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278IDqzr4q2tbveegedi7eawwj5u4zmStatusSucceededSourceWebHardwareA40 (Large)Total durationCreatedInput
- width
- 768
- height
- 768
- prompt
- A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm
- refine
- no_refiner
- scheduler
- K_EULER
- lora_scale
- 0.8
- num_outputs
- 1
- controlnet_1
- soft_edge_hed
- controlnet_2
- none
- controlnet_3
- none
- lora_weights
- https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar
- guidance_scale
- 7.5
- apply_watermark
- negative_prompt
- rainbow
- prompt_strength
- 0.8
- sizing_strategy
- width_height
- controlnet_1_end
- 1
- controlnet_2_end
- 1
- controlnet_3_end
- 1
- controlnet_1_start
- 0
- controlnet_2_start
- 0
- controlnet_3_start
- 0
- num_inference_steps
- 30
- controlnet_1_conditioning_scale
- 0.8
- controlnet_2_conditioning_scale
- 0.8
- controlnet_3_conditioning_scale
- 0.75
{ "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 }
Install Replicate’s Node.js client library:npm install replicate
Import and set up the client:import Replicate from "replicate"; const replicate = new Replicate({ auth: process.env.REPLICATE_API_TOKEN, });
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run( "fofr/sdxl-multi-controlnet-lora:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278", { input: { width: 768, height: 768, prompt: "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", refine: "no_refiner", scheduler: "K_EULER", lora_scale: 0.8, num_outputs: 1, controlnet_1: "soft_edge_hed", controlnet_2: "none", controlnet_3: "none", lora_weights: "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", guidance_scale: 7.5, apply_watermark: false, negative_prompt: "rainbow", prompt_strength: 0.8, sizing_strategy: "width_height", controlnet_1_end: 1, controlnet_2_end: 1, controlnet_3_end: 1, controlnet_1_image: "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", controlnet_1_start: 0, controlnet_2_start: 0, controlnet_3_start: 0, num_inference_steps: 30, controlnet_1_conditioning_scale: 0.8, controlnet_2_conditioning_scale: 0.8, controlnet_3_conditioning_scale: 0.75 } } ); // To access the file URL: console.log(output[0].url()); //=> "http://example.com" // To write the file to disk: fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
Install Replicate’s Python client library:pip install replicate
Import the client:import replicate
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run( "fofr/sdxl-multi-controlnet-lora:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278", input={ "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": False, "negative_prompt": "rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } ) print(output)
To learn more, take a look at the guide on getting started with Python.
Run fofr/sdxl-multi-controlnet-lora using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \ -H "Content-Type: application/json" \ -H "Prefer: wait" \ -d $'{ "version": "fofr/sdxl-multi-controlnet-lora:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278", "input": { "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } }' \ https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
You can run this model locally using Cog. First, install Cog:brew install cog
If you don’t have Homebrew, there are other installation options available.
Run this to download the model and run it in your local environment:
cog predict r8.im/fofr/sdxl-multi-controlnet-lora@sha256:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278 \ -i 'width=768' \ -i 'height=768' \ -i 'prompt="A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm"' \ -i 'refine="no_refiner"' \ -i 'scheduler="K_EULER"' \ -i 'lora_scale=0.8' \ -i 'num_outputs=1' \ -i 'controlnet_1="soft_edge_hed"' \ -i 'controlnet_2="none"' \ -i 'controlnet_3="none"' \ -i 'lora_weights="https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar"' \ -i 'guidance_scale=7.5' \ -i 'apply_watermark=false' \ -i 'negative_prompt="rainbow"' \ -i 'prompt_strength=0.8' \ -i 'sizing_strategy="width_height"' \ -i 'controlnet_1_end=1' \ -i 'controlnet_2_end=1' \ -i 'controlnet_3_end=1' \ -i 'controlnet_1_image="https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png"' \ -i 'controlnet_1_start=0' \ -i 'controlnet_2_start=0' \ -i 'controlnet_3_start=0' \ -i 'num_inference_steps=30' \ -i 'controlnet_1_conditioning_scale=0.8' \ -i 'controlnet_2_conditioning_scale=0.8' \ -i 'controlnet_3_conditioning_scale=0.75'
To learn more, take a look at the Cog documentation.
Run this to download the model and run it in your local environment:
docker run -d -p 5000:5000 --gpus=all r8.im/fofr/sdxl-multi-controlnet-lora@sha256:4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 } }' \ http://localhost:5000/predictions
To learn more, take a look at the Cog documentation.
Output
{ "completed_at": "2023-11-20T13:40:09.613224Z", "created_at": "2023-11-20T13:37:39.019445Z", "data_removed": false, "error": null, "id": "qzr4q2tbveegedi7eawwj5u4zm", "input": { "width": 768, "height": 768, "prompt": "A TOK photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm", "refine": "no_refiner", "scheduler": "K_EULER", "lora_scale": 0.8, "num_outputs": 1, "controlnet_1": "soft_edge_hed", "controlnet_2": "none", "controlnet_3": "none", "lora_weights": "https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar", "guidance_scale": 7.5, "apply_watermark": false, "negative_prompt": "rainbow", "prompt_strength": 0.8, "sizing_strategy": "width_height", "controlnet_1_end": 1, "controlnet_2_end": 1, "controlnet_3_end": 1, "controlnet_1_image": "https://replicate.delivery/pbxt/JsfCQE8k1lsCinW1yo76yKMQe6R5MRt9WLL3H5T5Ypc5wasq/020e656d-0c71-45c3-a7f5-1facf7d52d4f.png", "controlnet_1_start": 0, "controlnet_2_start": 0, "controlnet_3_start": 0, "num_inference_steps": 30, "controlnet_1_conditioning_scale": 0.8, "controlnet_2_conditioning_scale": 0.8, "controlnet_3_conditioning_scale": 0.75 }, "logs": "Using seed: 22801\nUsing given dimensions\nEnsuring enough disk space...\nFree disk space: 1784535842816\nDownloading weights: https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar\nb'Downloaded 186 MB bytes in 0.786s (237 MB/s)\\nExtracted 186 MB in 0.070s (2.6 GB/s)\\n'\nDownloaded weights in 1.5510847568511963 seconds\nLoading fine-tuned model\nDoes not have Unet. assume we are using LoRA\nLoading Unet LoRA\nPrompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm\nProcessing image with soft_edge_hed\nLoading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]\nLoading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 13617.87it/s]\nYou have 1 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts.\n 0%| | 0/30 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`\ndeprecate(\n 3%|▎ | 1/30 [00:02<01:20, 2.76s/it]\n 7%|▋ | 2/30 [00:02<00:35, 1.26s/it]\n 10%|█ | 3/30 [00:03<00:21, 1.27it/s]\n 13%|█▎ | 4/30 [00:03<00:14, 1.78it/s]\n 17%|█▋ | 5/30 [00:03<00:10, 2.29it/s]\n 20%|██ | 6/30 [00:03<00:08, 2.76it/s]\n 23%|██▎ | 7/30 [00:04<00:07, 3.18it/s]\n 27%|██▋ | 8/30 [00:04<00:06, 3.53it/s]\n 30%|███ | 9/30 [00:04<00:05, 3.82it/s]\n 33%|███▎ | 10/30 [00:04<00:04, 4.04it/s]\n 37%|███▋ | 11/30 [00:04<00:04, 4.21it/s]\n 40%|████ | 12/30 [00:05<00:04, 4.32it/s]\n 43%|████▎ | 13/30 [00:05<00:03, 4.37it/s]\n 47%|████▋ | 14/30 [00:05<00:03, 4.45it/s]\n 50%|█████ | 15/30 [00:05<00:03, 4.51it/s]\n 53%|█████▎ | 16/30 [00:06<00:03, 4.51it/s]\n 57%|█████▋ | 17/30 [00:06<00:02, 4.56it/s]\n 60%|██████ | 18/30 [00:06<00:02, 4.58it/s]\n 63%|██████▎ | 19/30 [00:06<00:02, 4.60it/s]\n 67%|██████▋ | 20/30 [00:06<00:02, 4.62it/s]\n 70%|███████ | 21/30 [00:07<00:01, 4.63it/s]\n 73%|███████▎ | 22/30 [00:07<00:01, 4.64it/s]\n 77%|███████▋ | 23/30 [00:07<00:01, 4.64it/s]\n 80%|████████ | 24/30 [00:07<00:01, 4.64it/s]\n 83%|████████▎ | 25/30 [00:07<00:01, 4.65it/s]\n 87%|████████▋ | 26/30 [00:08<00:00, 4.65it/s]\n 90%|█████████ | 27/30 [00:08<00:00, 4.65it/s]\n 93%|█████████▎| 28/30 [00:08<00:00, 4.65it/s]\n 97%|█████████▋| 29/30 [00:08<00:00, 4.65it/s]\n100%|██████████| 30/30 [00:09<00:00, 4.65it/s]\n100%|██████████| 30/30 [00:09<00:00, 3.32it/s]", "metrics": { "predict_time": 16.948953, "total_time": 150.593779 }, "output": [ "https://replicate.delivery/pbxt/8486bdfefoHlkpbI4m98dnsm2azyIgIB8MVq3ilVpBgwdi0jA/control-0.png", "https://replicate.delivery/pbxt/GR5kmreA4fjf9JZIFh0GhIoEGEnJj6SmwYTszYVezXWm7EpHB/out-0.png" ], "started_at": "2023-11-20T13:39:52.664271Z", "status": "succeeded", "urls": { "get": "https://api.replicate.com/v1/predictions/qzr4q2tbveegedi7eawwj5u4zm", "cancel": "https://api.replicate.com/v1/predictions/qzr4q2tbveegedi7eawwj5u4zm/cancel" }, "version": "4af60df9514adb91683d0bd7a6980dee8c4d74e3a0c1896eeade402ce2ad9278" }
Generated inUsing seed: 22801 Using given dimensions Ensuring enough disk space... Free disk space: 1784535842816 Downloading weights: https://replicate.delivery/pbxt/hKhpVe6O7EwXNCiWORev3OEDRCoWeMlqZMLQDEvwDyHV3hvjA/trained_model.tar b'Downloaded 186 MB bytes in 0.786s (237 MB/s)\nExtracted 186 MB in 0.070s (2.6 GB/s)\n' Downloaded weights in 1.5510847568511963 seconds Loading fine-tuned model Does not have Unet. assume we are using LoRA Loading Unet LoRA Prompt: A <s0><s1> photo, extreme macro photo of a golden astronaut riding a unicorn statue, in a museum, bokeh, 50mm Processing image with soft_edge_hed Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s] Loading pipeline components...: 100%|██████████| 7/7 [00:00<00:00, 13617.87it/s] You have 1 ControlNets and you have passed 1 prompts. The conditionings will be fixed across the prompts. 0%| | 0/30 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.18/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights` deprecate( 3%|▎ | 1/30 [00:02<01:20, 2.76s/it] 7%|▋ | 2/30 [00:02<00:35, 1.26s/it] 10%|█ | 3/30 [00:03<00:21, 1.27it/s] 13%|█▎ | 4/30 [00:03<00:14, 1.78it/s] 17%|█▋ | 5/30 [00:03<00:10, 2.29it/s] 20%|██ | 6/30 [00:03<00:08, 2.76it/s] 23%|██▎ | 7/30 [00:04<00:07, 3.18it/s] 27%|██▋ | 8/30 [00:04<00:06, 3.53it/s] 30%|███ | 9/30 [00:04<00:05, 3.82it/s] 33%|███▎ | 10/30 [00:04<00:04, 4.04it/s] 37%|███▋ | 11/30 [00:04<00:04, 4.21it/s] 40%|████ | 12/30 [00:05<00:04, 4.32it/s] 43%|████▎ | 13/30 [00:05<00:03, 4.37it/s] 47%|████▋ | 14/30 [00:05<00:03, 4.45it/s] 50%|█████ | 15/30 [00:05<00:03, 4.51it/s] 53%|█████▎ | 16/30 [00:06<00:03, 4.51it/s] 57%|█████▋ | 17/30 [00:06<00:02, 4.56it/s] 60%|██████ | 18/30 [00:06<00:02, 4.58it/s] 63%|██████▎ | 19/30 [00:06<00:02, 4.60it/s] 67%|██████▋ | 20/30 [00:06<00:02, 4.62it/s] 70%|███████ | 21/30 [00:07<00:01, 4.63it/s] 73%|███████▎ | 22/30 [00:07<00:01, 4.64it/s] 77%|███████▋ | 23/30 [00:07<00:01, 4.64it/s] 80%|████████ | 24/30 [00:07<00:01, 4.64it/s] 83%|████████▎ | 25/30 [00:07<00:01, 4.65it/s] 87%|████████▋ | 26/30 [00:08<00:00, 4.65it/s] 90%|█████████ | 27/30 [00:08<00:00, 4.65it/s] 93%|█████████▎| 28/30 [00:08<00:00, 4.65it/s] 97%|█████████▋| 29/30 [00:08<00:00, 4.65it/s] 100%|██████████| 30/30 [00:09<00:00, 4.65it/s] 100%|██████████| 30/30 [00:09<00:00, 3.32it/s]
Want to make some of these yourself?
Run this model