okaris
/
controlnet
ControlNet implementation with custom SD1.5 fine tuned models
Run okaris/controlnet with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
controlnet_model |
string
(enum)
|
Options: Canny, Depth, HED, Normal, MLSD, OpenPose, Scribble, Seg |
Type of ControlNet model to use
|
base_model |
string
|
Type of base model to use
|
|
image |
string
|
Input image
|
|
prompt |
string
|
Prompt for the model
|
|
num_samples |
string
(enum)
|
1
Options: 1, 4 |
Number of samples (higher values may OOM)
|
image_resolution |
string
(enum)
|
512
Options: 256, 512, 768 |
Image resolution to be generated
|
low_threshold |
integer
|
100
Min: 1 Max: 255 |
Canny low threshold (only applicable when model type is 'canny')
|
high_threshold |
integer
|
200
Min: 1 Max: 255 |
Canny high threshold (only applicable when model type is 'canny')
|
ddim_steps |
integer
|
20
|
Steps
|
scale |
number
|
9
Min: 0.1 Max: 30 |
Guidance Scale
|
seed |
integer
|
Seed
|
|
eta |
number
|
0
|
eta (DDIM)
|
a_prompt |
string
|
best quality, extremely detailed
|
Added Prompt
|
n_prompt |
string
|
longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality
|
Negative Prompt
|
detect_resolution |
integer
|
512
Min: 128 Max: 1024 |
Resolution for detection)
|
bg_threshold |
number
|
0
Max: 1 |
Background Threshold (only applicable when model type is 'normal')
|
value_threshold |
number
|
0.1
Min: 0.01 Max: 2 |
Value Threshold (only applicable when model type is 'MLSD')
|
distance_threshold |
number
|
0.1
Min: 0.01 Max: 20 |
Distance Threshold (only applicable when model type is 'MLSD')
|
{
"type": "object",
"title": "Input",
"required": [
"image",
"prompt"
],
"properties": {
"eta": {
"type": "number",
"title": "Eta",
"default": 0,
"x-order": 11,
"description": "eta (DDIM)"
},
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 10,
"description": "Seed"
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 2,
"description": "Input image"
},
"scale": {
"type": "number",
"title": "Scale",
"default": 9,
"maximum": 30,
"minimum": 0.1,
"x-order": 9,
"description": "Guidance Scale"
},
"prompt": {
"type": "string",
"title": "Prompt",
"x-order": 3,
"description": "Prompt for the model"
},
"a_prompt": {
"type": "string",
"title": "A Prompt",
"default": "best quality, extremely detailed",
"x-order": 12,
"description": "Added Prompt"
},
"n_prompt": {
"type": "string",
"title": "N Prompt",
"default": "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
"x-order": 13,
"description": "Negative Prompt"
},
"base_model": {
"type": "string",
"title": "Base Model",
"x-order": 1,
"description": "Type of base model to use"
},
"ddim_steps": {
"type": "integer",
"title": "Ddim Steps",
"default": 20,
"x-order": 8,
"description": "Steps"
},
"num_samples": {
"enum": [
"1",
"4"
],
"type": "string",
"title": "num_samples",
"description": "Number of samples (higher values may OOM)",
"default": "1",
"x-order": 4
},
"bg_threshold": {
"type": "number",
"title": "Bg Threshold",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 15,
"description": "Background Threshold (only applicable when model type is 'normal')"
},
"low_threshold": {
"type": "integer",
"title": "Low Threshold",
"default": 100,
"maximum": 255,
"minimum": 1,
"x-order": 6,
"description": "Canny low threshold (only applicable when model type is 'canny')"
},
"high_threshold": {
"type": "integer",
"title": "High Threshold",
"default": 200,
"maximum": 255,
"minimum": 1,
"x-order": 7,
"description": "Canny high threshold (only applicable when model type is 'canny')"
},
"value_threshold": {
"type": "number",
"title": "Value Threshold",
"default": 0.1,
"maximum": 2,
"minimum": 0.01,
"x-order": 16,
"description": "Value Threshold (only applicable when model type is 'MLSD')"
},
"controlnet_model": {
"enum": [
"Canny",
"Depth",
"HED",
"Normal",
"MLSD",
"OpenPose",
"Scribble",
"Seg"
],
"type": "string",
"title": "controlnet_model",
"description": "Type of ControlNet model to use",
"x-order": 0
},
"image_resolution": {
"enum": [
"256",
"512",
"768"
],
"type": "string",
"title": "image_resolution",
"description": "Image resolution to be generated",
"default": "512",
"x-order": 5
},
"detect_resolution": {
"type": "integer",
"title": "Detect Resolution",
"default": 512,
"maximum": 1024,
"minimum": 128,
"x-order": 14,
"description": "Resolution for detection)"
},
"distance_threshold": {
"type": "number",
"title": "Distance Threshold",
"default": 0.1,
"maximum": 20,
"minimum": 0.01,
"x-order": 17,
"description": "Distance Threshold (only applicable when model type is 'MLSD')"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}