You're looking at a specific version of this model. Jump to the model overview.
zylim0702 /sdxl-lora-customize-training:a4eefb9b
Input
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run zylim0702/sdxl-lora-customize-training using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"zylim0702/sdxl-lora-customize-training:a4eefb9bbc944eaefc56d67acba982ab73d85ec449de6606cf48b13f1483a495",
{
input: {
ti_lr: 0.0003,
is_lora: true,
lora_lr: 0.0001,
verbose: true,
lora_rank: 32,
resolution: 768,
lr_scheduler: "constant",
token_string: "TOK",
caption_prefix: "a photo of TOK, ",
lr_warmup_steps: 100,
max_train_steps: 1000,
num_train_epochs: 4000,
train_batch_size: 4,
unet_learning_rate: 0.000001,
checkpointing_steps: 999999,
clipseg_temperature: 1,
input_images_filetype: "infer",
crop_based_on_salience: true,
use_face_detection_instead: false
}
}
);
console.log(output);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run zylim0702/sdxl-lora-customize-training using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"zylim0702/sdxl-lora-customize-training:a4eefb9bbc944eaefc56d67acba982ab73d85ec449de6606cf48b13f1483a495",
input={
"ti_lr": 0.0003,
"is_lora": True,
"lora_lr": 0.0001,
"verbose": True,
"lora_rank": 32,
"resolution": 768,
"lr_scheduler": "constant",
"token_string": "TOK",
"caption_prefix": "a photo of TOK, ",
"lr_warmup_steps": 100,
"max_train_steps": 1000,
"num_train_epochs": 4000,
"train_batch_size": 4,
"unet_learning_rate": 0.000001,
"checkpointing_steps": 999999,
"clipseg_temperature": 1,
"input_images_filetype": "infer",
"crop_based_on_salience": True,
"use_face_detection_instead": False
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variableexport REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run zylim0702/sdxl-lora-customize-training using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "a4eefb9bbc944eaefc56d67acba982ab73d85ec449de6606cf48b13f1483a495",
"input": {
"ti_lr": 0.0003,
"is_lora": true,
"lora_lr": 0.0001,
"verbose": true,
"lora_rank": 32,
"resolution": 768,
"lr_scheduler": "constant",
"token_string": "TOK",
"caption_prefix": "a photo of TOK, ",
"lr_warmup_steps": 100,
"max_train_steps": 1000,
"num_train_epochs": 4000,
"train_batch_size": 4,
"unet_learning_rate": 0.000001,
"checkpointing_steps": 999999,
"clipseg_temperature": 1,
"input_images_filetype": "infer",
"crop_based_on_salience": true,
"use_face_detection_instead": false
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
brew install cog
If you don’t have Homebrew, there are other installation options available.
Pull and run zylim0702/sdxl-lora-customize-training using Cog (this will download the full model and run it in your local environment):
cog predict r8.im/zylim0702/sdxl-lora-customize-training@sha256:a4eefb9bbc944eaefc56d67acba982ab73d85ec449de6606cf48b13f1483a495 \
-i 'ti_lr=0.0003' \
-i 'is_lora=true' \
-i 'lora_lr=0.0001' \
-i 'verbose=true' \
-i 'lora_rank=32' \
-i 'resolution=768' \
-i 'lr_scheduler="constant"' \
-i 'token_string="TOK"' \
-i 'caption_prefix="a photo of TOK, "' \
-i 'lr_warmup_steps=100' \
-i 'max_train_steps=1000' \
-i 'num_train_epochs=4000' \
-i 'train_batch_size=4' \
-i 'unet_learning_rate=0.000001' \
-i 'checkpointing_steps=999999' \
-i 'clipseg_temperature=1' \
-i 'input_images_filetype="infer"' \
-i 'crop_based_on_salience=true' \
-i 'use_face_detection_instead=false'
To learn more, take a look at the Cog documentation.
Pull and run zylim0702/sdxl-lora-customize-training using Docker (this will download the full model and run it in your local environment):
docker run -d -p 5000:5000 --gpus=all r8.im/zylim0702/sdxl-lora-customize-training@sha256:a4eefb9bbc944eaefc56d67acba982ab73d85ec449de6606cf48b13f1483a495
curl -s -X POST \ -H "Content-Type: application/json" \ -d $'{ "input": { "ti_lr": 0.0003, "is_lora": true, "lora_lr": 0.0001, "verbose": true, "lora_rank": 32, "resolution": 768, "lr_scheduler": "constant", "token_string": "TOK", "caption_prefix": "a photo of TOK, ", "lr_warmup_steps": 100, "max_train_steps": 1000, "num_train_epochs": 4000, "train_batch_size": 4, "unet_learning_rate": 0.000001, "checkpointing_steps": 999999, "clipseg_temperature": 1, "input_images_filetype": "infer", "crop_based_on_salience": true, "use_face_detection_instead": false } }' \ http://localhost:5000/predictions
Add a payment method to run this model.
Each run costs approximately $0.49. Alternatively, try out our featured models for free.
By signing in, you agree to our
terms of service and privacy policy
Output
No output yet! Press "Submit" to start a prediction.