lightweight-ai/q_l_t
Run lightweight-ai/q_l_t with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
| Field | Type | Default value | Description |
|---|---|---|---|
| batch_size |
None
|
1
|
Batch size
|
| optimizer |
None
|
adamw8bit
|
Optimizer
|
| content_or_style |
None
|
balanced
|
Content/style weighting
|
| train_dtype |
None
|
bf16
|
Training dtype
|
| arch |
string
|
qwen_image
|
Model arch key for ai-toolkit
|
| seed |
integer
|
42
|
Random seed (None=random)
|
| steps |
integer
|
3000
Min: 100 Max: 6000 |
Training steps
|
| device |
string
|
cuda
|
Device for ai-toolkit trainer
|
| dataset |
string
|
ZIP with images and optional .txt captions (same basenames)
|
|
| ema_use |
boolean
|
False
|
Use EMA
|
| subject |
string
|
Karina
|
Subject identifier to bind (e.g., a name)
|
| ema_decay |
number
|
0.99
Max: 0.9999 |
EMA decay
|
| lora_conv |
integer
|
16
Max: 256 |
LoRA conv rank
|
| base_model |
string
|
/home/freek/Qwen-Image
|
Base model path or HF ID
|
| save_every |
integer
|
250
Min: 1 Max: 10000 |
Checkpoint/sample every N steps
|
| caption_ext |
string
|
txt
|
Caption file extension
|
| lokr_factor |
integer
|
-1
|
LoKr factor (-1 = auto)
|
| lora_linear |
integer
|
32
Min: 1 Max: 256 |
LoRA linear rank
|
| pack_samples |
boolean
|
True
|
Include generated sample images in ZIP
|
| sample_every |
integer
|
250
Max: 10000 |
Sample every N steps
|
| sample_steps |
integer
|
25
Min: 1 Max: 200 |
Sampling steps
|
| sample_width |
integer
|
1024
Min: 64 Max: 2048 |
Sample width
|
| weight_decay |
number
|
0.0001
Max: 1 |
Weight decay
|
| learning_rate |
number
|
0.0002
Min: 0.00001 Max: 0.001 |
Learning rate
|
| sample_height |
integer
|
1024
Min: 64 Max: 2048 |
Sample height
|
| timestep_type |
string
|
sigmoid
|
Timestep shaping
|
| guidance_scale |
number
|
4
Max: 20 |
Guidance scale
|
| lokr_full_rank |
boolean
|
True
|
Use full-rank LoKr
|
| sqlite_db_path |
string
|
./aitk_db.db
|
SQLite DB path for trainer
|
| default_caption |
string
|
|
Default caption for images without .txt (overridden by subject if empty)
|
| lora_conv_alpha |
integer
|
16
Max: 1024 |
LoRA conv alpha
|
| noise_scheduler |
string
|
flowmatch
|
Noise scheduler
|
| pack_checkpoints |
boolean
|
True
|
Include all intermediate .safetensors in ZIP
|
| lora_linear_alpha |
integer
|
32
Min: 1 Max: 1024 |
LoRA linear alpha
|
| resolution_list_csv |
string
|
512,768,1024
|
Comma-separated short-side resolutions
|
| caption_dropout_rate |
number
|
0.05
Max: 1 |
Caption dropout rate
|
| gradient_accumulation |
integer
|
1
Min: 1 Max: 64 |
Gradient accumulation
|
| gradient_checkpointing |
boolean
|
True
|
Enable gradient checkpointing
|
| ignore_if_contains_csv |
string
|
|
Comma-separated module substrings to ignore for LoRA
|
{
"type": "object",
"title": "Input",
"required": [
"dataset"
],
"properties": {
"arch": {
"type": "string",
"title": "Arch",
"default": "qwen_image",
"description": "Model arch key for ai-toolkit"
},
"seed": {
"type": "integer",
"title": "Seed",
"default": 42,
"nullable": true,
"description": "Random seed (None=random)"
},
"steps": {
"type": "integer",
"title": "Steps",
"default": 3000,
"maximum": 6000,
"minimum": 100,
"description": "Training steps"
},
"device": {
"type": "string",
"title": "Device",
"default": "cuda",
"description": "Device for ai-toolkit trainer"
},
"dataset": {
"type": "string",
"title": "Dataset",
"format": "uri",
"description": "ZIP with images and optional .txt captions (same basenames)"
},
"ema_use": {
"type": "boolean",
"title": "Ema Use",
"default": false,
"description": "Use EMA"
},
"subject": {
"type": "string",
"title": "Subject",
"default": "Karina",
"description": "Subject identifier to bind (e.g., a name)"
},
"ema_decay": {
"type": "number",
"title": "Ema Decay",
"default": 0.99,
"maximum": 0.9999,
"minimum": 0,
"description": "EMA decay"
},
"lora_conv": {
"type": "integer",
"title": "Lora Conv",
"default": 16,
"maximum": 256,
"minimum": 0,
"description": "LoRA conv rank"
},
"optimizer": {
"enum": [
"adamw8bit",
"adamw",
"adam8bit",
"prodigy"
],
"type": "string",
"title": "optimizer",
"description": "Optimizer",
"default": "adamw8bit",
"x-order": 18
},
"base_model": {
"type": "string",
"title": "Base Model",
"default": "/home/freek/Qwen-Image",
"description": "Base model path or HF ID"
},
"batch_size": {
"enum": [
1,
2,
4
],
"type": "integer",
"title": "batch_size",
"description": "Batch size",
"default": 1,
"x-order": 3
},
"save_every": {
"type": "integer",
"title": "Save Every",
"default": 250,
"maximum": 10000,
"minimum": 1,
"description": "Checkpoint/sample every N steps"
},
"caption_ext": {
"type": "string",
"title": "Caption Ext",
"default": "txt",
"description": "Caption file extension"
},
"lokr_factor": {
"type": "integer",
"title": "Lokr Factor",
"default": -1,
"description": "LoKr factor (-1 = auto)"
},
"lora_linear": {
"type": "integer",
"title": "Lora Linear",
"default": 32,
"maximum": 256,
"minimum": 1,
"description": "LoRA linear rank"
},
"train_dtype": {
"enum": [
"fp32",
"fp16",
"bf16"
],
"type": "string",
"title": "train_dtype",
"description": "Training dtype",
"default": "bf16",
"x-order": 26
},
"pack_samples": {
"type": "boolean",
"title": "Pack Samples",
"default": true,
"description": "Include generated sample images in ZIP"
},
"sample_every": {
"type": "integer",
"title": "Sample Every",
"default": 250,
"maximum": 10000,
"minimum": 0,
"description": "Sample every N steps"
},
"sample_steps": {
"type": "integer",
"title": "Sample Steps",
"default": 25,
"maximum": 200,
"minimum": 1,
"description": "Sampling steps"
},
"sample_width": {
"type": "integer",
"title": "Sample Width",
"default": 1024,
"maximum": 2048,
"minimum": 64,
"description": "Sample width"
},
"weight_decay": {
"type": "number",
"title": "Weight Decay",
"default": 0.0001,
"maximum": 1,
"minimum": 0,
"description": "Weight decay"
},
"learning_rate": {
"type": "number",
"title": "Learning Rate",
"default": 0.0002,
"maximum": 0.001,
"minimum": 1e-05,
"description": "Learning rate"
},
"sample_height": {
"type": "integer",
"title": "Sample Height",
"default": 1024,
"maximum": 2048,
"minimum": 64,
"description": "Sample height"
},
"timestep_type": {
"type": "string",
"title": "Timestep Type",
"default": "sigmoid",
"description": "Timestep shaping"
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 4,
"maximum": 20,
"minimum": 0,
"description": "Guidance scale"
},
"lokr_full_rank": {
"type": "boolean",
"title": "Lokr Full Rank",
"default": true,
"description": "Use full-rank LoKr"
},
"sqlite_db_path": {
"type": "string",
"title": "Sqlite Db Path",
"default": "./aitk_db.db",
"description": "SQLite DB path for trainer"
},
"default_caption": {
"type": "string",
"title": "Default Caption",
"default": "",
"description": "Default caption for images without .txt (overridden by subject if empty)"
},
"lora_conv_alpha": {
"type": "integer",
"title": "Lora Conv Alpha",
"default": 16,
"maximum": 1024,
"minimum": 0,
"description": "LoRA conv alpha"
},
"noise_scheduler": {
"type": "string",
"title": "Noise Scheduler",
"default": "flowmatch",
"description": "Noise scheduler"
},
"content_or_style": {
"enum": [
"content",
"style",
"balanced"
],
"type": "string",
"title": "content_or_style",
"description": "Content/style weighting",
"default": "balanced",
"x-order": 21
},
"pack_checkpoints": {
"type": "boolean",
"title": "Pack Checkpoints",
"default": true,
"description": "Include all intermediate .safetensors in ZIP"
},
"lora_linear_alpha": {
"type": "integer",
"title": "Lora Linear Alpha",
"default": 32,
"maximum": 1024,
"minimum": 1,
"description": "LoRA linear alpha"
},
"resolution_list_csv": {
"type": "string",
"title": "Resolution List Csv",
"default": "512,768,1024",
"description": "Comma-separated short-side resolutions"
},
"caption_dropout_rate": {
"type": "number",
"title": "Caption Dropout Rate",
"default": 0.05,
"maximum": 1,
"minimum": 0,
"description": "Caption dropout rate"
},
"gradient_accumulation": {
"type": "integer",
"title": "Gradient Accumulation",
"default": 1,
"maximum": 64,
"minimum": 1,
"description": "Gradient accumulation"
},
"gradient_checkpointing": {
"type": "boolean",
"title": "Gradient Checkpointing",
"default": true,
"description": "Enable gradient checkpointing"
},
"ignore_if_contains_csv": {
"type": "string",
"title": "Ignore If Contains Csv",
"default": "",
"description": "Comma-separated module substrings to ignore for LoRA"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "object",
"title": "Output",
"required": [
"weights"
],
"properties": {
"weights": {
"type": "string",
"title": "Weights",
"format": "uri"
}
}
}