zeke
/
dreambooth
A copy of replicate/dreambooth for testing
- Public
- 1 run
Run zeke/dreambooth with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
instance_prompt |
string
|
The prompt with identifier specifying the instance
|
|
class_prompt |
string
|
The prompt to specify images in the same class as provided instance images.
|
|
instance_data |
string
|
A ZIP file containing the training data of instance images
|
|
class_data |
string
|
A ZIP file containing the training data of class images. Images will be generated if you do not provide.
|
|
num_class_images |
integer
|
50
|
Minimal class images for prior preservation loss. If not enough images are provided in class_data, additional images will be sampled with class_prompt.
|
save_sample_prompt |
string
|
The prompt used to generate sample outputs to save.
|
|
save_sample_negative_prompt |
string
|
The negative prompt used to generate sample outputs to save.
|
|
n_save_sample |
integer
|
4
|
The number of samples to save.
|
save_guidance_scale |
number
|
7.5
|
CFG for save sample.
|
save_infer_steps |
integer
|
50
|
The number of inference steps for save sample.
|
pad_tokens |
boolean
|
False
|
Flag to pad tokens to length 77.
|
with_prior_preservation |
boolean
|
True
|
Flag to add prior preservation loss.
|
prior_loss_weight |
number
|
1
|
Weight of prior preservation loss.
|
seed |
integer
|
1337
|
A seed for reproducible training
|
resolution |
integer
|
512
|
The resolution for input images. All the images in the train/validation dataset will be resized to this resolution.
|
center_crop |
boolean
|
False
|
Whether to center crop images before resizing to resolution
|
train_text_encoder |
boolean
|
True
|
Whether to train the text encoder
|
train_batch_size |
integer
|
1
|
Batch size (per device) for the training dataloader.
|
sample_batch_size |
integer
|
4
|
Batch size (per device) for sampling images.
|
num_train_epochs |
integer
|
1
|
None
|
max_train_steps |
integer
|
2000
|
Total number of training steps to perform. If provided, overrides num_train_epochs.
|
gradient_accumulation_steps |
integer
|
1
|
Number of updates steps to accumulate before performing a backward/update pass.
|
gradient_checkpointing |
boolean
|
False
|
Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.
|
learning_rate |
number
|
0.000001
|
Initial learning rate (after the potential warmup period) to use.
|
scale_lr |
boolean
|
False
|
Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.
|
lr_scheduler |
string
(enum)
|
constant
Options: linear, cosine, cosine_with_restarts, polynomial, constant, constant_with_warmup |
The scheduler type to use
|
lr_warmup_steps |
integer
|
0
|
Number of steps for the warmup in the lr scheduler.
|
use_8bit_adam |
boolean
|
False
|
Whether or not to use 8-bit Adam from bitsandbytes.
|
adam_beta1 |
number
|
0.9
|
The beta1 parameter for the Adam optimizer.
|
adam_beta2 |
number
|
0.999
|
The beta2 parameter for the Adam optimizer.
|
adam_weight_decay |
number
|
0.01
|
Weight decay to use
|
adam_epsilon |
number
|
0.00000001
|
Epsilon value for the Adam optimizer
|
max_grad_norm |
number
|
1
|
Max gradient norm.
|
{
"type": "object",
"title": "Input",
"required": [
"instance_prompt",
"class_prompt",
"instance_data"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"default": 1337,
"x-order": 13,
"description": "A seed for reproducible training"
},
"scale_lr": {
"type": "boolean",
"title": "Scale Lr",
"default": false,
"x-order": 24,
"description": "Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size."
},
"adam_beta1": {
"type": "number",
"title": "Adam Beta1",
"default": 0.9,
"x-order": 28,
"description": "The beta1 parameter for the Adam optimizer."
},
"adam_beta2": {
"type": "number",
"title": "Adam Beta2",
"default": 0.999,
"x-order": 29,
"description": "The beta2 parameter for the Adam optimizer."
},
"class_data": {
"type": "string",
"title": "Class Data",
"format": "uri",
"x-order": 3,
"description": "A ZIP file containing the training data of class images. Images will be generated if you do not provide."
},
"pad_tokens": {
"type": "boolean",
"title": "Pad Tokens",
"default": false,
"x-order": 10,
"description": "Flag to pad tokens to length 77."
},
"resolution": {
"type": "integer",
"title": "Resolution",
"default": 512,
"x-order": 14,
"description": "The resolution for input images. All the images in the train/validation dataset will be resized to this resolution."
},
"center_crop": {
"type": "boolean",
"title": "Center Crop",
"default": false,
"x-order": 15,
"description": "Whether to center crop images before resizing to resolution"
},
"adam_epsilon": {
"type": "number",
"title": "Adam Epsilon",
"default": 1e-08,
"x-order": 31,
"description": "Epsilon value for the Adam optimizer"
},
"class_prompt": {
"type": "string",
"title": "Class Prompt",
"x-order": 1,
"description": "The prompt to specify images in the same class as provided instance images."
},
"lr_scheduler": {
"enum": [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup"
],
"type": "string",
"title": "lr_scheduler",
"description": "The scheduler type to use",
"default": "constant",
"x-order": 25
},
"instance_data": {
"type": "string",
"title": "Instance Data",
"format": "uri",
"x-order": 2,
"description": "A ZIP file containing the training data of instance images"
},
"learning_rate": {
"type": "number",
"title": "Learning Rate",
"default": 1e-06,
"x-order": 23,
"description": "Initial learning rate (after the potential warmup period) to use."
},
"max_grad_norm": {
"type": "number",
"title": "Max Grad Norm",
"default": 1,
"x-order": 32,
"description": "Max gradient norm."
},
"n_save_sample": {
"type": "integer",
"title": "N Save Sample",
"default": 4,
"x-order": 7,
"description": "The number of samples to save."
},
"use_8bit_adam": {
"type": "boolean",
"title": "Use 8Bit Adam",
"default": false,
"x-order": 27,
"description": "Whether or not to use 8-bit Adam from bitsandbytes."
},
"instance_prompt": {
"type": "string",
"title": "Instance Prompt",
"x-order": 0,
"description": "The prompt with identifier specifying the instance"
},
"lr_warmup_steps": {
"type": "integer",
"title": "Lr Warmup Steps",
"default": 0,
"x-order": 26,
"description": "Number of steps for the warmup in the lr scheduler."
},
"max_train_steps": {
"type": "integer",
"title": "Max Train Steps",
"default": 2000,
"x-order": 20,
"description": "Total number of training steps to perform. If provided, overrides num_train_epochs."
},
"num_class_images": {
"type": "integer",
"title": "Num Class Images",
"default": 50,
"x-order": 4,
"description": "Minimal class images for prior preservation loss. If not enough images are provided in class_data, additional images will be sampled with class_prompt."
},
"num_train_epochs": {
"type": "integer",
"title": "Num Train Epochs",
"default": 1,
"x-order": 19
},
"save_infer_steps": {
"type": "integer",
"title": "Save Infer Steps",
"default": 50,
"x-order": 9,
"description": "The number of inference steps for save sample."
},
"train_batch_size": {
"type": "integer",
"title": "Train Batch Size",
"default": 1,
"x-order": 17,
"description": "Batch size (per device) for the training dataloader."
},
"adam_weight_decay": {
"type": "number",
"title": "Adam Weight Decay",
"default": 0.01,
"x-order": 30,
"description": "Weight decay to use"
},
"prior_loss_weight": {
"type": "number",
"title": "Prior Loss Weight",
"default": 1,
"x-order": 12,
"description": "Weight of prior preservation loss."
},
"sample_batch_size": {
"type": "integer",
"title": "Sample Batch Size",
"default": 4,
"x-order": 18,
"description": "Batch size (per device) for sampling images."
},
"save_sample_prompt": {
"type": "string",
"title": "Save Sample Prompt",
"x-order": 5,
"description": "The prompt used to generate sample outputs to save."
},
"train_text_encoder": {
"type": "boolean",
"title": "Train Text Encoder",
"default": true,
"x-order": 16,
"description": "Whether to train the text encoder"
},
"save_guidance_scale": {
"type": "number",
"title": "Save Guidance Scale",
"default": 7.5,
"x-order": 8,
"description": "CFG for save sample."
},
"gradient_checkpointing": {
"type": "boolean",
"title": "Gradient Checkpointing",
"default": false,
"x-order": 22,
"description": "Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass."
},
"with_prior_preservation": {
"type": "boolean",
"title": "With Prior Preservation",
"default": true,
"x-order": 11,
"description": "Flag to add prior preservation loss."
},
"gradient_accumulation_steps": {
"type": "integer",
"title": "Gradient Accumulation Steps",
"default": 1,
"x-order": 21,
"description": "Number of updates steps to accumulate before performing a backward/update pass."
},
"save_sample_negative_prompt": {
"type": "string",
"title": "Save Sample Negative Prompt",
"x-order": 6,
"description": "The negative prompt used to generate sample outputs to save."
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
Schema
{
"type": "string",
"title": "Output",
"format": "uri"
}