juergengunz
/
comfyui
- Public
- 30 runs
Run juergengunz/comfyui with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
prompt |
string
|
|
None
|
negative_prompt |
string
|
|
Things you do not want to see in your image
|
guidance_scale |
number
|
3.5
Max: 5 |
Guidance scale
|
steps |
integer
|
28
Min: 1 Max: 50 |
Number of steps
|
control_type |
string
(enum)
|
depth
Options: canny, soft_edge, depth |
Type of control net
|
control_strength |
number
|
0.5
Max: 3 |
Strength of control net. Different controls work better with different strengths. Canny works best with 0.5, soft edge works best with 0.4, and depth works best between 0.5 and 0.75. If images are low quality, try reducing the strength and try reducing the guidance scale.
|
control_image |
string
|
Image to use with control net
|
|
image_to_image_strength |
number
|
0
Max: 1 |
Strength of image to image control. 0 means none of the control image is used. 1 means the control image is returned used as is. Try values between 0 and 0.25 for best results.
|
depth_preprocessor |
string
(enum)
|
DepthAnything
Options: Midas, Zoe, DepthAnything, Zoe-DepthAnything |
Preprocessor to use with depth control net
|
soft_edge_preprocessor |
string
(enum)
|
HED
Options: HED, TEED, PiDiNet |
Preprocessor to use with soft edge control net
|
lora_url |
string
|
|
Optional LoRA model to use. Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link.
|
lora_strength |
number
|
1
Min: -1 Max: 3 |
Strength of LoRA model
|
return_preprocessed_image |
boolean
|
False
|
Return the preprocessed image used to control the generation process. Useful for debugging.
|
output_format |
string
(enum)
|
webp
Options: webp, jpg, png |
Format of the output images
|
output_quality |
integer
|
80
Max: 100 |
Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.
|
seed |
integer
|
Set a seed for reproducibility. Random by default.
|
|
num_outputs |
integer
|
1
Min: 1 Max: 4 |
Number of images to generate
|
{
"type": "object",
"title": "Input",
"required": [
"control_image"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 15,
"description": "Set a seed for reproducibility. Random by default."
},
"steps": {
"type": "integer",
"title": "Steps",
"default": 28,
"maximum": 50,
"minimum": 1,
"x-order": 3,
"description": "Number of steps"
},
"prompt": {
"type": "string",
"title": "Prompt",
"default": "",
"x-order": 0
},
"lora_url": {
"type": "string",
"title": "Lora Url",
"default": "",
"x-order": 10,
"description": "Optional LoRA model to use. Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link."
},
"num_outputs": {
"type": "integer",
"title": "Num Outputs",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 16,
"description": "Number of images to generate"
},
"control_type": {
"enum": [
"canny",
"soft_edge",
"depth"
],
"type": "string",
"title": "control_type",
"description": "Type of control net",
"default": "depth",
"x-order": 4
},
"control_image": {
"type": "string",
"title": "Control Image",
"format": "uri",
"x-order": 6,
"description": "Image to use with control net"
},
"lora_strength": {
"type": "number",
"title": "Lora Strength",
"default": 1,
"maximum": 3,
"minimum": -1,
"x-order": 11,
"description": "Strength of LoRA model"
},
"output_format": {
"enum": [
"webp",
"jpg",
"png"
],
"type": "string",
"title": "output_format",
"description": "Format of the output images",
"default": "webp",
"x-order": 13
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 3.5,
"maximum": 5,
"minimum": 0,
"x-order": 2,
"description": "Guidance scale"
},
"output_quality": {
"type": "integer",
"title": "Output Quality",
"default": 80,
"maximum": 100,
"minimum": 0,
"x-order": 14,
"description": "Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality."
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "",
"x-order": 1,
"description": "Things you do not want to see in your image"
},
"control_strength": {
"type": "number",
"title": "Control Strength",
"default": 0.5,
"maximum": 3,
"minimum": 0,
"x-order": 5,
"description": "Strength of control net. Different controls work better with different strengths. Canny works best with 0.5, soft edge works best with 0.4, and depth works best between 0.5 and 0.75. If images are low quality, try reducing the strength and try reducing the guidance scale."
},
"depth_preprocessor": {
"enum": [
"Midas",
"Zoe",
"DepthAnything",
"Zoe-DepthAnything"
],
"type": "string",
"title": "depth_preprocessor",
"description": "Preprocessor to use with depth control net",
"default": "DepthAnything",
"x-order": 8
},
"soft_edge_preprocessor": {
"enum": [
"HED",
"TEED",
"PiDiNet"
],
"type": "string",
"title": "soft_edge_preprocessor",
"description": "Preprocessor to use with soft edge control net",
"default": "HED",
"x-order": 9
},
"image_to_image_strength": {
"type": "number",
"title": "Image To Image Strength",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 7,
"description": "Strength of image to image control. 0 means none of the control image is used. 1 means the control image is returned used as is. Try values between 0 and 0.25 for best results."
},
"return_preprocessed_image": {
"type": "boolean",
"title": "Return Preprocessed Image",
"default": false,
"x-order": 12,
"description": "Return the preprocessed image used to control the generation process. Useful for debugging."
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}