edenartlab
/
comfyui-workflows
- Public
- 61.7K runs
Run edenartlab/comfyui-workflows with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
mode |
string
|
txt2vid
|
txt2vid, img2vid, vid2vid, upscale, txt2img, inpaint, makeitrad
|
text_input |
string
|
prompt
|
|
interpolation_texts |
string
|
| separated list of prompts for txt2vid)
|
|
input_images |
string
|
Input image(s) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)
|
|
style_images |
string
|
Input style image(s) (for IP_adapter) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)
|
|
mask_images |
string
|
Input mask image(s) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)
|
|
input_video_path |
string
|
For vid2vid. Load source video from file, url, or base64 string
|
|
steps |
integer
|
25
Min: 10 Max: 40 |
Steps
|
width |
integer
|
1280
Min: 512 Max: 3840 |
Width
|
height |
integer
|
1280
Min: 512 Max: 3840 |
Height
|
n_frames |
integer
|
40
Min: 16 Max: 264 |
Total number of frames (txt2vid, vid2vid, img2vid)
|
ip_adapter_weight |
number
|
0.65
Max: 2 |
Strenght of the IP_adapter style
|
motion_scale |
number
|
1.1
Max: 2 |
Motion scale (AnimateDiff)
|
n_samples |
integer
|
1
Min: 1 Max: 4 |
batch size
|
control_method |
string
(enum)
|
coarse
Options: coarse, fine |
Shape Control method (coarse usually gives nicer results, fine is more precise to the input video)
|
controlnet_strength |
number
|
0.85
Max: 1.5 |
Strength of controlnet guidance
|
denoise_strength |
number
|
1
Max: 1 |
How much denoising to apply (1.0 = start from full noise, 0.0 = return input image)
|
blend_value |
number
|
0.5
Max: 1 |
Blend factor (weight of the first image vs the second)
|
loop |
boolean
|
False
|
Try to make a loopable video
|
guidance_scale |
number
|
7.5
Min: 1 Max: 20 |
Strength of text conditioning guidance
|
negative_prompt |
string
|
nude, naked, text, watermark, low-quality, signature, padding, margins, white borders, padded border, moiré pattern, downsampling, aliasing, distorted, blurry, blur, jpeg artifacts, compression artifacts, poorly drawn, low-resolution, bad, grainy, error, bad-contrast
|
Negative Prompt
|
seed |
integer
|
Sampling seed, leave Empty for Random
|
{
"type": "object",
"title": "Input",
"properties": {
"loop": {
"type": "boolean",
"title": "Loop",
"default": false,
"x-order": 18,
"description": "Try to make a loopable video"
},
"mode": {
"type": "string",
"title": "Mode",
"default": "txt2vid",
"x-order": 0,
"description": "txt2vid, img2vid, vid2vid, upscale, txt2img, inpaint, makeitrad"
},
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 21,
"description": "Sampling seed, leave Empty for Random"
},
"steps": {
"type": "integer",
"title": "Steps",
"default": 25,
"maximum": 40,
"minimum": 10,
"x-order": 7,
"description": "Steps"
},
"width": {
"type": "integer",
"title": "Width",
"default": 1280,
"maximum": 3840,
"minimum": 512,
"x-order": 8,
"description": "Width"
},
"height": {
"type": "integer",
"title": "Height",
"default": 1280,
"maximum": 3840,
"minimum": 512,
"x-order": 9,
"description": "Height"
},
"n_frames": {
"type": "integer",
"title": "N Frames",
"default": 40,
"maximum": 264,
"minimum": 16,
"x-order": 10,
"description": "Total number of frames (txt2vid, vid2vid, img2vid)"
},
"n_samples": {
"type": "integer",
"title": "N Samples",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 13,
"description": "batch size"
},
"text_input": {
"type": "string",
"title": "Text Input",
"x-order": 1,
"description": "prompt"
},
"blend_value": {
"type": "number",
"title": "Blend Value",
"default": 0.5,
"maximum": 1,
"minimum": 0,
"x-order": 17,
"description": "Blend factor (weight of the first image vs the second)"
},
"mask_images": {
"type": "string",
"title": "Mask Images",
"x-order": 5,
"description": "Input mask image(s) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)"
},
"input_images": {
"type": "string",
"title": "Input Images",
"x-order": 3,
"description": "Input image(s) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)"
},
"motion_scale": {
"type": "number",
"title": "Motion Scale",
"default": 1.1,
"maximum": 2,
"minimum": 0,
"x-order": 12,
"description": "Motion scale (AnimateDiff)"
},
"style_images": {
"type": "string",
"title": "Style Images",
"x-order": 4,
"description": "Input style image(s) (for IP_adapter) for various endpoints. Load-able from file, url, or base64 string, (urls separated by pipe symbol)"
},
"control_method": {
"enum": [
"coarse",
"fine"
],
"type": "string",
"title": "control_method",
"description": "Shape Control method (coarse usually gives nicer results, fine is more precise to the input video)",
"default": "coarse",
"x-order": 14
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 7.5,
"maximum": 20,
"minimum": 1,
"x-order": 19,
"description": "Strength of text conditioning guidance"
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "nude, naked, text, watermark, low-quality, signature, padding, margins, white borders, padded border, moir\u00e9 pattern, downsampling, aliasing, distorted, blurry, blur, jpeg artifacts, compression artifacts, poorly drawn, low-resolution, bad, grainy, error, bad-contrast",
"x-order": 20,
"description": "Negative Prompt"
},
"denoise_strength": {
"type": "number",
"title": "Denoise Strength",
"default": 1,
"maximum": 1,
"minimum": 0,
"x-order": 16,
"description": "How much denoising to apply (1.0 = start from full noise, 0.0 = return input image)"
},
"input_video_path": {
"type": "string",
"title": "Input Video Path",
"x-order": 6,
"description": "For vid2vid. Load source video from file, url, or base64 string"
},
"ip_adapter_weight": {
"type": "number",
"title": "Ip Adapter Weight",
"default": 0.65,
"maximum": 2,
"minimum": 0,
"x-order": 11,
"description": "Strenght of the IP_adapter style"
},
"controlnet_strength": {
"type": "number",
"title": "Controlnet Strength",
"default": 0.85,
"maximum": 1.5,
"minimum": 0,
"x-order": 15,
"description": "Strength of controlnet guidance"
},
"interpolation_texts": {
"type": "string",
"title": "Interpolation Texts",
"x-order": 2,
"description": "| separated list of prompts for txt2vid)"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "object",
"title": "CogOutput",
"required": [
"files"
],
"properties": {
"name": {
"type": "string",
"title": "Name"
},
"files": {
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Files"
},
"isFinal": {
"type": "boolean",
"title": "Isfinal",
"default": false
},
"progress": {
"type": "number",
"title": "Progress"
},
"attributes": {
"type": "object",
"title": "Attributes"
},
"thumbnails": {
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Thumbnails"
}
}
},
"title": "Output",
"x-cog-array-type": "iterator"
}