Failed to load versions. Head to the versions page to see all versions for this model.
You're looking at a specific version of this model. Jump to the model overview.
lucataco /realvisxl2-lora-inference:9b5a0c77
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
import fs from "node:fs";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run lucataco/realvisxl2-lora-inference using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"lucataco/realvisxl2-lora-inference:9b5a0c77cd4f6bdb53a2c3d05b4774df02876d21dd7d37f13f518c03e996945b",
{
input: {
seed: 6995,
width: 1024,
height: 1024,
prompt: "A photo of TOK",
refine: "no_refiner",
lora_url: "https://replicate.delivery/pbxt/L5zHkM0OHX4ZF1Ipnaiok6GHGvrRgZHBqbz2JjtBAtWz8mdE/trained_model.tar",
scheduler: "DPMSolverMultistep",
lora_scale: 0.6,
num_outputs: 1,
guidance_scale: 7.5,
apply_watermark: true,
high_noise_frac: 0.8,
negative_prompt: "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
prompt_strength: 0.8,
num_inference_steps: 50
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run lucataco/realvisxl2-lora-inference using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"lucataco/realvisxl2-lora-inference:9b5a0c77cd4f6bdb53a2c3d05b4774df02876d21dd7d37f13f518c03e996945b",
input={
"seed": 6995,
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK",
"refine": "no_refiner",
"lora_url": "https://replicate.delivery/pbxt/L5zHkM0OHX4ZF1Ipnaiok6GHGvrRgZHBqbz2JjtBAtWz8mdE/trained_model.tar",
"scheduler": "DPMSolverMultistep",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": True,
"high_noise_frac": 0.8,
"negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run lucataco/realvisxl2-lora-inference using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "lucataco/realvisxl2-lora-inference:9b5a0c77cd4f6bdb53a2c3d05b4774df02876d21dd7d37f13f518c03e996945b",
"input": {
"seed": 6995,
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK",
"refine": "no_refiner",
"lora_url": "https://replicate.delivery/pbxt/L5zHkM0OHX4ZF1Ipnaiok6GHGvrRgZHBqbz2JjtBAtWz8mdE/trained_model.tar",
"scheduler": "DPMSolverMultistep",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
"prompt_strength": 0.8,
"num_inference_steps": 50
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
Output
{
"completed_at": "2023-11-08T22:28:45.440078Z",
"created_at": "2023-11-08T22:26:49.691869Z",
"data_removed": false,
"error": null,
"id": "75pamcdbkupkj2mueyccx6auqe",
"input": {
"seed": 6995,
"width": 1024,
"height": 1024,
"prompt": "A photo of TOK",
"refine": "no_refiner",
"lora_url": "https://replicate.delivery/pbxt/L5zHkM0OHX4ZF1Ipnaiok6GHGvrRgZHBqbz2JjtBAtWz8mdE/trained_model.tar",
"scheduler": "DPMSolverMultistep",
"lora_scale": 0.6,
"num_outputs": 1,
"guidance_scale": 7.5,
"apply_watermark": true,
"high_noise_frac": 0.8,
"negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
"prompt_strength": 0.8,
"num_inference_steps": 50
},
"logs": "LORA\nLoading ssd txt2img pipeline...\nLoading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]\nLoading pipeline components...: 29%|██▊ | 2/7 [00:00<00:00, 18.02it/s]\nLoading pipeline components...: 57%|█████▋ | 4/7 [00:00<00:00, 12.25it/s]\nLoading pipeline components...: 86%|████████▌ | 6/7 [00:00<00:00, 6.93it/s]\nLoading pipeline components...: 100%|██████████| 7/7 [00:01<00:00, 5.83it/s]\nLoading pipeline components...: 100%|██████████| 7/7 [00:01<00:00, 6.97it/s]\nLoading ssd lora weights...\nLoading fine-tuned model\nDoes not have Unet. Assume we are using LoRA\nLoading Unet LoRA\nUsing seed: 6995\nPrompt: A photo of <s0><s1>\ntxt2img mode\n 0%| | 0/50 [00:00<?, ?it/s]/root/.pyenv/versions/3.11.6/lib/python3.11/site-packages/diffusers/models/attention_processor.py:1815: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`\ndeprecate(\n 2%|▏ | 1/50 [00:00<00:18, 2.59it/s]\n 4%|▍ | 2/50 [00:00<00:13, 3.65it/s]\n 6%|▌ | 3/50 [00:00<00:12, 3.68it/s]\n 8%|▊ | 4/50 [00:01<00:12, 3.70it/s]\n 10%|█ | 5/50 [00:01<00:12, 3.70it/s]\n 12%|█▏ | 6/50 [00:01<00:11, 3.70it/s]\n 14%|█▍ | 7/50 [00:01<00:11, 3.71it/s]\n 16%|█▌ | 8/50 [00:02<00:11, 3.71it/s]\n 18%|█▊ | 9/50 [00:02<00:11, 3.71it/s]\n 20%|██ | 10/50 [00:02<00:10, 3.70it/s]\n 22%|██▏ | 11/50 [00:03<00:10, 3.71it/s]\n 24%|██▍ | 12/50 [00:03<00:10, 3.70it/s]\n 26%|██▌ | 13/50 [00:03<00:09, 3.71it/s]\n 28%|██▊ | 14/50 [00:03<00:09, 3.71it/s]\n 30%|███ | 15/50 [00:04<00:09, 3.71it/s]\n 32%|███▏ | 16/50 [00:04<00:09, 3.70it/s]\n 34%|███▍ | 17/50 [00:04<00:08, 3.70it/s]\n 36%|███▌ | 18/50 [00:04<00:08, 3.70it/s]\n 38%|███▊ | 19/50 [00:05<00:08, 3.70it/s]\n 40%|████ | 20/50 [00:05<00:08, 3.70it/s]\n 42%|████▏ | 21/50 [00:05<00:07, 3.70it/s]\n 44%|████▍ | 22/50 [00:05<00:07, 3.70it/s]\n 46%|████▌ | 23/50 [00:06<00:07, 3.70it/s]\n 48%|████▊ | 24/50 [00:06<00:07, 3.70it/s]\n 50%|█████ | 25/50 [00:06<00:06, 3.70it/s]\n 52%|█████▏ | 26/50 [00:07<00:06, 3.70it/s]\n 54%|█████▍ | 27/50 [00:07<00:06, 3.70it/s]\n 56%|█████▌ | 28/50 [00:07<00:05, 3.70it/s]\n 58%|█████▊ | 29/50 [00:07<00:05, 3.70it/s]\n 60%|██████ | 30/50 [00:08<00:05, 3.70it/s]\n 62%|██████▏ | 31/50 [00:08<00:05, 3.70it/s]\n 64%|██████▍ | 32/50 [00:08<00:04, 3.70it/s]\n 66%|██████▌ | 33/50 [00:08<00:04, 3.70it/s]\n 68%|██████▊ | 34/50 [00:09<00:04, 3.70it/s]\n 70%|███████ | 35/50 [00:09<00:04, 3.69it/s]\n 72%|███████▏ | 36/50 [00:09<00:03, 3.69it/s]\n 74%|███████▍ | 37/50 [00:10<00:03, 3.69it/s]\n 76%|███████▌ | 38/50 [00:10<00:03, 3.69it/s]\n 78%|███████▊ | 39/50 [00:10<00:02, 3.69it/s]\n 80%|████████ | 40/50 [00:10<00:02, 3.69it/s]\n 82%|████████▏ | 41/50 [00:11<00:02, 3.69it/s]\n 84%|████████▍ | 42/50 [00:11<00:02, 3.69it/s]\n 86%|████████▌ | 43/50 [00:11<00:01, 3.69it/s]\n 88%|████████▊ | 44/50 [00:11<00:01, 3.69it/s]\n 90%|█████████ | 45/50 [00:12<00:01, 3.69it/s]\n 92%|█████████▏| 46/50 [00:12<00:01, 3.68it/s]\n 94%|█████████▍| 47/50 [00:12<00:00, 3.68it/s]\n 96%|█████████▌| 48/50 [00:13<00:00, 3.68it/s]\n 98%|█████████▊| 49/50 [00:13<00:00, 3.68it/s]\n100%|██████████| 50/50 [00:13<00:00, 3.68it/s]\n100%|██████████| 50/50 [00:13<00:00, 3.69it/s]",
"metrics": {
"predict_time": 25.235308,
"total_time": 115.748209
},
"output": [
"https://replicate.delivery/pbxt/u3l5edtwjTziNio7SouYF3SEoyT3lfVwXSYSAQDrV6pc2b2RA/out-0.png"
],
"started_at": "2023-11-08T22:28:20.204770Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/75pamcdbkupkj2mueyccx6auqe",
"cancel": "https://api.replicate.com/v1/predictions/75pamcdbkupkj2mueyccx6auqe/cancel"
},
"version": "9b5a0c77cd4f6bdb53a2c3d05b4774df02876d21dd7d37f13f518c03e996945b"
}
LORA
Loading ssd txt2img pipeline...
Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]
Loading pipeline components...: 29%|██▊ | 2/7 [00:00<00:00, 18.02it/s]
Loading pipeline components...: 57%|█████▋ | 4/7 [00:00<00:00, 12.25it/s]
Loading pipeline components...: 86%|████████▌ | 6/7 [00:00<00:00, 6.93it/s]
Loading pipeline components...: 100%|██████████| 7/7 [00:01<00:00, 5.83it/s]
Loading pipeline components...: 100%|██████████| 7/7 [00:01<00:00, 6.97it/s]
Loading ssd lora weights...
Loading fine-tuned model
Does not have Unet. Assume we are using LoRA
Loading Unet LoRA
Using seed: 6995
Prompt: A photo of <s0><s1>
txt2img mode
0%| | 0/50 [00:00<?, ?it/s]/root/.pyenv/versions/3.11.6/lib/python3.11/site-packages/diffusers/models/attention_processor.py:1815: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▏ | 1/50 [00:00<00:18, 2.59it/s]
4%|▍ | 2/50 [00:00<00:13, 3.65it/s]
6%|▌ | 3/50 [00:00<00:12, 3.68it/s]
8%|▊ | 4/50 [00:01<00:12, 3.70it/s]
10%|█ | 5/50 [00:01<00:12, 3.70it/s]
12%|█▏ | 6/50 [00:01<00:11, 3.70it/s]
14%|█▍ | 7/50 [00:01<00:11, 3.71it/s]
16%|█▌ | 8/50 [00:02<00:11, 3.71it/s]
18%|█▊ | 9/50 [00:02<00:11, 3.71it/s]
20%|██ | 10/50 [00:02<00:10, 3.70it/s]
22%|██▏ | 11/50 [00:03<00:10, 3.71it/s]
24%|██▍ | 12/50 [00:03<00:10, 3.70it/s]
26%|██▌ | 13/50 [00:03<00:09, 3.71it/s]
28%|██▊ | 14/50 [00:03<00:09, 3.71it/s]
30%|███ | 15/50 [00:04<00:09, 3.71it/s]
32%|███▏ | 16/50 [00:04<00:09, 3.70it/s]
34%|███▍ | 17/50 [00:04<00:08, 3.70it/s]
36%|███▌ | 18/50 [00:04<00:08, 3.70it/s]
38%|███▊ | 19/50 [00:05<00:08, 3.70it/s]
40%|████ | 20/50 [00:05<00:08, 3.70it/s]
42%|████▏ | 21/50 [00:05<00:07, 3.70it/s]
44%|████▍ | 22/50 [00:05<00:07, 3.70it/s]
46%|████▌ | 23/50 [00:06<00:07, 3.70it/s]
48%|████▊ | 24/50 [00:06<00:07, 3.70it/s]
50%|█████ | 25/50 [00:06<00:06, 3.70it/s]
52%|█████▏ | 26/50 [00:07<00:06, 3.70it/s]
54%|█████▍ | 27/50 [00:07<00:06, 3.70it/s]
56%|█████▌ | 28/50 [00:07<00:05, 3.70it/s]
58%|█████▊ | 29/50 [00:07<00:05, 3.70it/s]
60%|██████ | 30/50 [00:08<00:05, 3.70it/s]
62%|██████▏ | 31/50 [00:08<00:05, 3.70it/s]
64%|██████▍ | 32/50 [00:08<00:04, 3.70it/s]
66%|██████▌ | 33/50 [00:08<00:04, 3.70it/s]
68%|██████▊ | 34/50 [00:09<00:04, 3.70it/s]
70%|███████ | 35/50 [00:09<00:04, 3.69it/s]
72%|███████▏ | 36/50 [00:09<00:03, 3.69it/s]
74%|███████▍ | 37/50 [00:10<00:03, 3.69it/s]
76%|███████▌ | 38/50 [00:10<00:03, 3.69it/s]
78%|███████▊ | 39/50 [00:10<00:02, 3.69it/s]
80%|████████ | 40/50 [00:10<00:02, 3.69it/s]
82%|████████▏ | 41/50 [00:11<00:02, 3.69it/s]
84%|████████▍ | 42/50 [00:11<00:02, 3.69it/s]
86%|████████▌ | 43/50 [00:11<00:01, 3.69it/s]
88%|████████▊ | 44/50 [00:11<00:01, 3.69it/s]
90%|█████████ | 45/50 [00:12<00:01, 3.69it/s]
92%|█████████▏| 46/50 [00:12<00:01, 3.68it/s]
94%|█████████▍| 47/50 [00:12<00:00, 3.68it/s]
96%|█████████▌| 48/50 [00:13<00:00, 3.68it/s]
98%|█████████▊| 49/50 [00:13<00:00, 3.68it/s]
100%|██████████| 50/50 [00:13<00:00, 3.68it/s]
100%|██████████| 50/50 [00:13<00:00, 3.69it/s]