lightweight-ai/q_l_t
Public
189
runs
Run lightweight-ai/q_l_t with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
| Field | Type | Default value | Description |
|---|---|---|---|
| rank |
integer
|
16
|
None
|
| img_dir |
string
|
학습 데이터셋 (.zip 또는 .tar.gz 파일).
|
|
| adam8bit |
boolean
|
True
|
8bit Adam optimizer 사용
|
| img_size |
integer
|
1024
|
None
|
| quantize |
boolean
|
True
|
모델을 퀀타이즈하여 VRAM/속도 최적화
|
| report_to |
string
|
|
wandb 등 로깅 백엔드 이름 (사용 안 하면 비워두기)
|
| adam_beta1 |
number
|
0.9
|
None
|
| adam_beta2 |
number
|
0.999
|
None
|
| output_dir |
string
|
./output
|
학습 결과(LoRA 체크포인트)가 저장될 디렉토리
|
| logging_dir |
string
|
logs
|
None
|
| num_workers |
integer
|
4
|
None
|
| adam_epsilon |
number
|
0.00000001
|
None
|
| caption_type |
string
|
txt
|
None
|
| lr_scheduler |
string
|
constant
|
None
|
| random_ratio |
boolean
|
False
|
None
|
| train_script |
string
|
train_A.py
|
실행할 학습 스크립트 파일명 (예: train_A.py, train_B.py). /src 기준 경로.
|
| trigger_word |
string
|
txt 캡션이 없는 경우 자동 생성할 트리거 워드
|
|
| learning_rate |
number
|
0.0003
|
학습률 (4090 추천값: 3e-4)
|
| max_grad_norm |
number
|
1
|
None
|
| num_processes |
integer
|
1
|
accelerate 에 사용할 프로세스 수 (GPU 수)
|
| lr_warmup_steps |
integer
|
20
|
None
|
| max_train_steps |
integer
|
1500
|
None
|
| mixed_precision |
string
|
bf16
|
mixed precision 모드 ("fp16", "bf16", "no")
|
| train_batch_size |
integer
|
1
|
None
|
| adam_weight_decay |
number
|
0.01
|
None
|
| save_cache_on_disk |
boolean
|
True
|
전처리 캐시를 디스크에 저장 (재사용 가능)
|
| caption_dropout_rate |
number
|
0.1
|
None
|
| tracker_project_name |
string
|
lora_test
|
None
|
| resume_from_checkpoint |
string
|
latest
|
None
|
| checkpoints_total_limit |
integer
|
1
|
최대 유지할 체크포인트 개수
|
| precompute_text_embeddings |
boolean
|
True
|
텍스트 임베딩을 미리 계산하여 속도/메모리 최적화
|
| gradient_accumulation_steps |
integer
|
1
|
None
|
| precompute_image_embeddings |
boolean
|
True
|
이미지 임베딩을 미리 계산하여 속도/메모리 최적화
|
| pretrained_model_name_or_path |
string
|
Qwen/Qwen-Image
|
베이스 모델 (HF 이름 또는 로컬 경로)
|
{
"type": "object",
"title": "Input",
"required": [
"img_dir"
],
"properties": {
"rank": {
"type": "integer",
"title": "Rank",
"default": 16
},
"img_dir": {
"type": "string",
"title": "Img Dir",
"format": "uri",
"description": "\ud559\uc2b5 \ub370\uc774\ud130\uc14b (.zip \ub610\ub294 .tar.gz \ud30c\uc77c)."
},
"adam8bit": {
"type": "boolean",
"title": "Adam8Bit",
"default": true,
"description": "8bit Adam optimizer \uc0ac\uc6a9"
},
"img_size": {
"type": "integer",
"title": "Img Size",
"default": 1024
},
"quantize": {
"type": "boolean",
"title": "Quantize",
"default": true,
"description": "\ubaa8\ub378\uc744 \ud000\ud0c0\uc774\uc988\ud558\uc5ec VRAM/\uc18d\ub3c4 \ucd5c\uc801\ud654"
},
"report_to": {
"type": "string",
"title": "Report To",
"default": "",
"description": "wandb \ub4f1 \ub85c\uae45 \ubc31\uc5d4\ub4dc \uc774\ub984 (\uc0ac\uc6a9 \uc548 \ud558\uba74 \ube44\uc6cc\ub450\uae30)"
},
"adam_beta1": {
"type": "number",
"title": "Adam Beta1",
"default": 0.9
},
"adam_beta2": {
"type": "number",
"title": "Adam Beta2",
"default": 0.999
},
"output_dir": {
"type": "string",
"title": "Output Dir",
"default": "./output",
"description": "\ud559\uc2b5 \uacb0\uacfc(LoRA \uccb4\ud06c\ud3ec\uc778\ud2b8)\uac00 \uc800\uc7a5\ub420 \ub514\ub809\ud1a0\ub9ac"
},
"logging_dir": {
"type": "string",
"title": "Logging Dir",
"default": "logs"
},
"num_workers": {
"type": "integer",
"title": "Num Workers",
"default": 4
},
"adam_epsilon": {
"type": "number",
"title": "Adam Epsilon",
"default": 1e-08
},
"caption_type": {
"type": "string",
"title": "Caption Type",
"default": "txt"
},
"lr_scheduler": {
"type": "string",
"title": "Lr Scheduler",
"default": "constant"
},
"random_ratio": {
"type": "boolean",
"title": "Random Ratio",
"default": false
},
"train_script": {
"type": "string",
"title": "Train Script",
"default": "train_A.py",
"description": "\uc2e4\ud589\ud560 \ud559\uc2b5 \uc2a4\ud06c\ub9bd\ud2b8 \ud30c\uc77c\uba85 (\uc608: train_A.py, train_B.py). /src \uae30\uc900 \uacbd\ub85c."
},
"trigger_word": {
"type": "string",
"title": "Trigger Word",
"nullable": true,
"description": "txt \ucea1\uc158\uc774 \uc5c6\ub294 \uacbd\uc6b0 \uc790\ub3d9 \uc0dd\uc131\ud560 \ud2b8\ub9ac\uac70 \uc6cc\ub4dc"
},
"learning_rate": {
"type": "number",
"title": "Learning Rate",
"default": 0.0003,
"description": "\ud559\uc2b5\ub960 (4090 \ucd94\ucc9c\uac12: 3e-4)"
},
"max_grad_norm": {
"type": "number",
"title": "Max Grad Norm",
"default": 1
},
"num_processes": {
"type": "integer",
"title": "Num Processes",
"default": 1,
"description": "accelerate \uc5d0 \uc0ac\uc6a9\ud560 \ud504\ub85c\uc138\uc2a4 \uc218 (GPU \uc218)"
},
"lr_warmup_steps": {
"type": "integer",
"title": "Lr Warmup Steps",
"default": 20
},
"max_train_steps": {
"type": "integer",
"title": "Max Train Steps",
"default": 1500
},
"mixed_precision": {
"type": "string",
"title": "Mixed Precision",
"default": "bf16",
"description": "mixed precision \ubaa8\ub4dc (\"fp16\", \"bf16\", \"no\")"
},
"train_batch_size": {
"type": "integer",
"title": "Train Batch Size",
"default": 1
},
"adam_weight_decay": {
"type": "number",
"title": "Adam Weight Decay",
"default": 0.01
},
"save_cache_on_disk": {
"type": "boolean",
"title": "Save Cache On Disk",
"default": true,
"description": "\uc804\ucc98\ub9ac \uce90\uc2dc\ub97c \ub514\uc2a4\ud06c\uc5d0 \uc800\uc7a5 (\uc7ac\uc0ac\uc6a9 \uac00\ub2a5)"
},
"caption_dropout_rate": {
"type": "number",
"title": "Caption Dropout Rate",
"default": 0.1
},
"tracker_project_name": {
"type": "string",
"title": "Tracker Project Name",
"default": "lora_test"
},
"resume_from_checkpoint": {
"type": "string",
"title": "Resume From Checkpoint",
"default": "latest"
},
"checkpoints_total_limit": {
"type": "integer",
"title": "Checkpoints Total Limit",
"default": 1,
"description": "\ucd5c\ub300 \uc720\uc9c0\ud560 \uccb4\ud06c\ud3ec\uc778\ud2b8 \uac1c\uc218"
},
"precompute_text_embeddings": {
"type": "boolean",
"title": "Precompute Text Embeddings",
"default": true,
"description": "\ud14d\uc2a4\ud2b8 \uc784\ubca0\ub529\uc744 \ubbf8\ub9ac \uacc4\uc0b0\ud558\uc5ec \uc18d\ub3c4/\uba54\ubaa8\ub9ac \ucd5c\uc801\ud654"
},
"gradient_accumulation_steps": {
"type": "integer",
"title": "Gradient Accumulation Steps",
"default": 1
},
"precompute_image_embeddings": {
"type": "boolean",
"title": "Precompute Image Embeddings",
"default": true,
"description": "\uc774\ubbf8\uc9c0 \uc784\ubca0\ub529\uc744 \ubbf8\ub9ac \uacc4\uc0b0\ud558\uc5ec \uc18d\ub3c4/\uba54\ubaa8\ub9ac \ucd5c\uc801\ud654"
},
"pretrained_model_name_or_path": {
"type": "string",
"title": "Pretrained Model Name Or Path",
"default": "Qwen/Qwen-Image",
"description": "\ubca0\uc774\uc2a4 \ubaa8\ub378 (HF \uc774\ub984 \ub610\ub294 \ub85c\uceec \uacbd\ub85c)"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
Schema
{
"type": "string",
"title": "Output",
"format": "uri"
}