alexgenovese
/
train-sdxl-kohya
Kohya Training for XL models
Run alexgenovese/train-sdxl-kohya with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
pretrained_model_name_or_path |
string
|
stabilityai/stable-diffusion-xl-base-1.0
|
base model name or path
|
train_data_zip |
string
|
Upload image dataset in zip format using this naming convention: XX_token className.zip
|
|
network_weights |
string
|
Pretrained LoRA weights
|
|
output_name |
string
|
new_model_name
|
Model name
|
save_model_as |
string
(enum)
|
safetensors
Options: ckpt, pt, safetensors |
model save extension | ckpt, pt, safetensors
|
resolution |
string
|
1024
|
image resolution must be 'size' or 'width,height'.
|
batch_size |
integer
|
1
Min: 1 |
batch size
|
max_train_epoches |
integer
|
20
Min: 1 |
max train epoches
|
save_every_n_epochs |
integer
|
5
Min: 1 |
save every n epochs
|
train_unet_only |
boolean
|
False
|
train U-Net only
|
train_text_encoder_only |
boolean
|
False
|
train Text Encoder only
|
seed |
integer
|
98796
Min: 1 |
reproducable seed
|
noise_offset |
number
|
0
Max: 1 |
noise offset
|
keep_tokens |
integer
|
0
|
keep heading N tokens when shuffling caption tokens
|
learning_rate |
number
|
4
Min: 1 Max: 9 |
Learning rate. It means 0.0001 or 0.0009
|
unet_lr |
number
|
1
Min: 1 Max: 9 |
UNet learning rate. It means 0.0001 or 0.0009
|
text_encoder_lr |
number
|
1
Min: 1 Max: 9 |
Text Encoder learning rate. It means 0.0001 or 0.0009
|
lr_scheduler |
string
(enum)
|
cosine
Options: linear, cosine, cosine_with_restarts, polynomial, constant, constant_with_warmup |
"linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup
|
lr_warmup_steps |
integer
|
0
|
warmup steps
|
lr_scheduler_num_cycles |
integer
|
1
Min: 1 |
cosine_with_restarts restart cycles
|
min_bucket_reso |
integer
|
256
Min: 1 |
arb min resolution
|
max_bucket_reso |
integer
|
1024
Min: 1 |
arb max resolution
|
persistent_data_loader_workers |
boolean
|
True
|
makes workers persistent, further reduces/eliminates the lag in between epochs. however it may increase memory usage
|
clip_skip |
integer
|
1
|
clip skip
|
optimizer_type |
string
(enum)
|
Lion
Options: adaFactor, AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, Prodigy |
adaFactor","AdamW","AdamW8bit","Lion","SGDNesterov","SGDNesterov8bit","DAdaptation", "Lion", "Prodigy
|
network_module |
string
(enum)
|
networks.lora
Options: networks.lora, networks.dylora, lycoris.kohya |
Network module
|
network_dim |
integer
|
32
Min: 1 |
network dimension
|
network_alpha |
integer
|
16
Min: 1 |
network alpha
|
{
"type": "object",
"title": "Input",
"required": [
"train_data_zip"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"default": 98796,
"minimum": 1,
"x-order": 11,
"description": "reproducable seed"
},
"unet_lr": {
"type": "number",
"title": "Unet Lr",
"default": 1,
"maximum": 9,
"minimum": 1,
"x-order": 15,
"description": "UNet learning rate. It means 0.0001 or 0.0009"
},
"clip_skip": {
"type": "integer",
"title": "Clip Skip",
"default": 1,
"minimum": 0,
"x-order": 23,
"description": "clip skip"
},
"batch_size": {
"type": "integer",
"title": "Batch Size",
"default": 1,
"minimum": 1,
"x-order": 6,
"description": "batch size"
},
"resolution": {
"type": "string",
"title": "Resolution",
"default": "1024",
"x-order": 5,
"description": "image resolution must be 'size' or 'width,height'."
},
"keep_tokens": {
"type": "integer",
"title": "Keep Tokens",
"default": 0,
"minimum": 0,
"x-order": 13,
"description": "keep heading N tokens when shuffling caption tokens"
},
"network_dim": {
"type": "integer",
"title": "Network Dim",
"default": 32,
"minimum": 1,
"x-order": 26,
"description": "network dimension"
},
"output_name": {
"type": "string",
"title": "Output Name",
"default": "new_model_name",
"x-order": 3,
"description": "Model name"
},
"lr_scheduler": {
"enum": [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup"
],
"type": "string",
"title": "lr_scheduler",
"description": "\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup",
"default": "cosine",
"x-order": 17
},
"noise_offset": {
"type": "number",
"title": "Noise Offset",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 12,
"description": "noise offset"
},
"learning_rate": {
"type": "number",
"title": "Learning Rate",
"default": 4,
"maximum": 9,
"minimum": 1,
"x-order": 14,
"description": "Learning rate. It means 0.0001 or 0.0009"
},
"network_alpha": {
"type": "integer",
"title": "Network Alpha",
"default": 16,
"minimum": 1,
"x-order": 27,
"description": "network alpha"
},
"save_model_as": {
"enum": [
"ckpt",
"pt",
"safetensors"
],
"type": "string",
"title": "save_model_as",
"description": "model save extension | ckpt, pt, safetensors",
"default": "safetensors",
"x-order": 4
},
"network_module": {
"enum": [
"networks.lora",
"networks.dylora",
"lycoris.kohya"
],
"type": "string",
"title": "network_module",
"description": "Network module",
"default": "networks.lora",
"x-order": 25
},
"optimizer_type": {
"enum": [
"adaFactor",
"AdamW",
"AdamW8bit",
"Lion",
"SGDNesterov",
"SGDNesterov8bit",
"DAdaptation",
"Prodigy"
],
"type": "string",
"title": "optimizer_type",
"description": "adaFactor\",\"AdamW\",\"AdamW8bit\",\"Lion\",\"SGDNesterov\",\"SGDNesterov8bit\",\"DAdaptation\", \"Lion\", \"Prodigy",
"default": "Lion",
"x-order": 24
},
"train_data_zip": {
"type": "string",
"title": "Train Data Zip",
"format": "uri",
"x-order": 1,
"description": "Upload image dataset in zip format using this naming convention: XX_token className.zip"
},
"lr_warmup_steps": {
"type": "integer",
"title": "Lr Warmup Steps",
"default": 0,
"minimum": 0,
"x-order": 18,
"description": "warmup steps"
},
"max_bucket_reso": {
"type": "integer",
"title": "Max Bucket Reso",
"default": 1024,
"minimum": 1,
"x-order": 21,
"description": "arb max resolution"
},
"min_bucket_reso": {
"type": "integer",
"title": "Min Bucket Reso",
"default": 256,
"minimum": 1,
"x-order": 20,
"description": "arb min resolution"
},
"network_weights": {
"type": "string",
"title": "Network Weights",
"format": "uri",
"x-order": 2,
"description": "Pretrained LoRA weights"
},
"text_encoder_lr": {
"type": "number",
"title": "Text Encoder Lr",
"default": 1,
"maximum": 9,
"minimum": 1,
"x-order": 16,
"description": "Text Encoder learning rate. It means 0.0001 or 0.0009"
},
"train_unet_only": {
"type": "boolean",
"title": "Train Unet Only",
"default": false,
"x-order": 9,
"description": "train U-Net only"
},
"max_train_epoches": {
"type": "integer",
"title": "Max Train Epoches",
"default": 20,
"minimum": 1,
"x-order": 7,
"description": "max train epoches"
},
"save_every_n_epochs": {
"type": "integer",
"title": "Save Every N Epochs",
"default": 5,
"minimum": 1,
"x-order": 8,
"description": "save every n epochs"
},
"lr_scheduler_num_cycles": {
"type": "integer",
"title": "Lr Scheduler Num Cycles",
"default": 1,
"minimum": 1,
"x-order": 19,
"description": "cosine_with_restarts restart cycles"
},
"train_text_encoder_only": {
"type": "boolean",
"title": "Train Text Encoder Only",
"default": false,
"x-order": 10,
"description": "train Text Encoder only"
},
"pretrained_model_name_or_path": {
"type": "string",
"title": "Pretrained Model Name Or Path",
"default": "stabilityai/stable-diffusion-xl-base-1.0",
"x-order": 0,
"description": "base model name or path"
},
"persistent_data_loader_workers": {
"type": "boolean",
"title": "Persistent Data Loader Workers",
"default": true,
"x-order": 22,
"description": "makes workers persistent, further reduces/eliminates the lag in between epochs. however it may increase memory usage"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "string",
"title": "Output",
"format": "uri"
}