edenartlab
/
sdxl-pipelines
- Public
- 487.7K runs
Run edenartlab/sdxl-pipelines with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
mode |
string
(enum)
|
create
Options: create, remix, upscale, blend, controlnet, interpolate, real2real, real2real_audio, interrogate, kojii/huemin |
Mode
|
stream |
boolean
|
False
|
yield individual results if True
|
stream_every |
integer
|
1
Min: 1 Max: 25 |
for mode create, how many steps per update to stream (steam must be set to True)
|
width |
integer
|
1024
Min: 512 Max: 2048 |
Width
|
height |
integer
|
1024
Min: 512 Max: 2048 |
Height
|
checkpoint |
string
(enum)
|
juggernaut_XL2
Options: sdxl-v1.0, juggernaut_XL2 |
Which Stable Diffusion checkpoint to use
|
lora |
string
|
(optional) URL of Lora finetuning
|
|
lora_scale |
number
|
0.7
Max: 1.5 |
Lora scale (how much of the Lora finetuning to apply)
|
sampler |
string
(enum)
|
euler
Options: ddim, ddpm, klms, euler, euler_ancestral, dpm, kdpm2, kdpm2_ancestral, pndm |
Which sampler to use
|
steps |
integer
|
35
Min: 10 Max: 70 |
Diffusion steps
|
guidance_scale |
number
|
7.5
Max: 20 |
Strength of text conditioning guidance
|
upscale_f |
number
|
1
Min: 1 Max: 2 |
Upscaling resolution
|
init_image |
string
|
Load initial image from file, url, or base64 string
|
|
init_image_strength |
number
|
0
Max: 1 |
Strength of initial image
|
adopt_aspect_from_init_img |
boolean
|
True
|
Adopt aspect ratio from init image
|
controlnet_type |
string
(enum)
|
off
Options: off, canny-edge, depth, luminance |
Controlnet type
|
control_image |
string
|
image for controlnet guidance
|
|
control_image_strength |
number
|
0
Max: 1.5 |
Strength of control image
|
ip_image |
string
|
Load ip_adapter image from file, url, or base64 string
|
|
ip_image_strength |
number
|
0.65
Max: 1.25 |
Strength of image conditioning from ip_adapter (vs txt conditioning from clip-interrogator or prompt) (used in remix, upscale, blend and real2real)
|
text_input |
string
|
Text input
|
|
text_inputs_to_interpolate |
string
|
Text inputs to interpolate, separated by |
|
|
text_inputs_to_interpolate_weights |
string
|
Text input weights to interpolate, separated by |
|
|
uc_text |
string
|
nude, naked, text, watermark, low-quality, signature, padding, margins, white borders, padded border, moiré pattern, downsampling, aliasing, distorted, blurry, blur, jpeg artifacts, compression artifacts, poorly drawn, low-resolution, bad, grainy, error, bad-contrast
|
Negative text input (mode==all)
|
seed |
integer
|
13
Max: 10000000000 |
random seed
|
n_samples |
integer
|
1
Min: 1 Max: 4 |
batch size
|
n_frames |
integer
|
40
Min: 3 Max: 1000 |
Total number of frames for video modes
|
interpolation_texts |
string
|
Interpolation texts for video modes
|
|
interpolation_seeds |
string
|
Seeds for interpolated texts for video modes
|
|
interpolation_init_images |
string
|
Interpolation init images, file paths or urls for video modes
|
|
interpolation_init_images_power |
number
|
2.5
Min: 0.5 Max: 5 |
Power for interpolation_init_images prompts for video modes
|
interpolation_init_images_min_strength |
number
|
0.05
Max: 1 |
Minimum init image strength for interpolation_init_images prompts for video modes
|
interpolation_init_images_max_strength |
number
|
0.95
Max: 1 |
Maximum init image strength for interpolation_init_images prompts for video modes
|
audio_file |
string
|
An audio file to use for real2real_audio
|
|
loop |
boolean
|
True
|
Loops (mode==interpolate & real2real)
|
smooth |
boolean
|
True
|
Smooth (mode==interpolate & real2real)
|
latent_blending_skip_f |
string
|
0.05|0.6
|
What fraction of the denoising trajectory to skip at the start and end of each interpolation phase, two floats, separated by a pipe (|)
|
n_anchor_imgs |
integer
|
3
Min: 3 Max: 6 |
Number of anchor frames to render (including keyframes) before activating latent blending
|
n_film |
integer
|
1
Max: 3 |
Number of times to smooth final frames with FILM (default is 0) (mode==interpolate)
|
fps |
integer
|
12
Min: 1 Max: 30 |
Frames per second (mode==interpolate & real2real)
|
use_lcm |
boolean
|
False
|
Smooth (mode==interpolate & real2real)
|
{
"type": "object",
"title": "Input",
"properties": {
"fps": {
"type": "integer",
"title": "Fps",
"default": 12,
"maximum": 30,
"minimum": 1,
"x-order": 39,
"description": "Frames per second (mode==interpolate & real2real)"
},
"loop": {
"type": "boolean",
"title": "Loop",
"default": true,
"x-order": 34,
"description": "Loops (mode==interpolate & real2real)"
},
"lora": {
"type": "string",
"title": "Lora",
"x-order": 6,
"description": "(optional) URL of Lora finetuning"
},
"mode": {
"enum": [
"create",
"remix",
"upscale",
"blend",
"controlnet",
"interpolate",
"real2real",
"real2real_audio",
"interrogate",
"kojii/huemin"
],
"type": "string",
"title": "mode",
"description": "Mode",
"default": "create",
"x-order": 0
},
"seed": {
"type": "integer",
"title": "Seed",
"default": 13,
"maximum": 10000000000,
"minimum": 0,
"x-order": 24,
"description": "random seed"
},
"steps": {
"type": "integer",
"title": "Steps",
"default": 35,
"maximum": 70,
"minimum": 10,
"x-order": 9,
"description": "Diffusion steps"
},
"width": {
"type": "integer",
"title": "Width",
"default": 1024,
"maximum": 2048,
"minimum": 512,
"x-order": 3,
"description": "Width"
},
"height": {
"type": "integer",
"title": "Height",
"default": 1024,
"maximum": 2048,
"minimum": 512,
"x-order": 4,
"description": "Height"
},
"n_film": {
"type": "integer",
"title": "N Film",
"default": 1,
"maximum": 3,
"minimum": 0,
"x-order": 38,
"description": "Number of times to smooth final frames with FILM (default is 0) (mode==interpolate)"
},
"smooth": {
"type": "boolean",
"title": "Smooth",
"default": true,
"x-order": 35,
"description": "Smooth (mode==interpolate & real2real)"
},
"stream": {
"type": "boolean",
"title": "Stream",
"default": false,
"x-order": 1,
"description": "yield individual results if True"
},
"sampler": {
"enum": [
"ddim",
"ddpm",
"klms",
"euler",
"euler_ancestral",
"dpm",
"kdpm2",
"kdpm2_ancestral",
"pndm"
],
"type": "string",
"title": "sampler",
"description": "Which sampler to use",
"default": "euler",
"x-order": 8
},
"uc_text": {
"type": "string",
"title": "Uc Text",
"default": "nude, naked, text, watermark, low-quality, signature, padding, margins, white borders, padded border, moir\u00e9 pattern, downsampling, aliasing, distorted, blurry, blur, jpeg artifacts, compression artifacts, poorly drawn, low-resolution, bad, grainy, error, bad-contrast",
"x-order": 23,
"description": "Negative text input (mode==all)"
},
"use_lcm": {
"type": "boolean",
"title": "Use Lcm",
"default": false,
"x-order": 40,
"description": "Smooth (mode==interpolate & real2real)"
},
"ip_image": {
"type": "string",
"title": "Ip Image",
"x-order": 18,
"description": "Load ip_adapter image from file, url, or base64 string"
},
"n_frames": {
"type": "integer",
"title": "N Frames",
"default": 40,
"maximum": 1000,
"minimum": 3,
"x-order": 26,
"description": "Total number of frames for video modes"
},
"n_samples": {
"type": "integer",
"title": "N Samples",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 25,
"description": "batch size"
},
"upscale_f": {
"type": "number",
"title": "Upscale F",
"default": 1,
"maximum": 2,
"minimum": 1,
"x-order": 11,
"description": "Upscaling resolution"
},
"audio_file": {
"type": "string",
"title": "Audio File",
"x-order": 33,
"description": "An audio file to use for real2real_audio"
},
"checkpoint": {
"enum": [
"sdxl-v1.0",
"juggernaut_XL2"
],
"type": "string",
"title": "checkpoint",
"description": "Which Stable Diffusion checkpoint to use",
"default": "juggernaut_XL2",
"x-order": 5
},
"init_image": {
"type": "string",
"title": "Init Image",
"x-order": 12,
"description": "Load initial image from file, url, or base64 string"
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.7,
"maximum": 1.5,
"minimum": 0,
"x-order": 7,
"description": "Lora scale (how much of the Lora finetuning to apply)"
},
"text_input": {
"type": "string",
"title": "Text Input",
"x-order": 20,
"description": "Text input"
},
"stream_every": {
"type": "integer",
"title": "Stream Every",
"default": 1,
"maximum": 25,
"minimum": 1,
"x-order": 2,
"description": "for mode create, how many steps per update to stream (steam must be set to True)"
},
"control_image": {
"type": "string",
"title": "Control Image",
"x-order": 16,
"description": "image for controlnet guidance"
},
"n_anchor_imgs": {
"type": "integer",
"title": "N Anchor Imgs",
"default": 3,
"maximum": 6,
"minimum": 3,
"x-order": 37,
"description": "Number of anchor frames to render (including keyframes) before activating latent blending"
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 7.5,
"maximum": 20,
"minimum": 0,
"x-order": 10,
"description": "Strength of text conditioning guidance"
},
"controlnet_type": {
"enum": [
"off",
"canny-edge",
"depth",
"luminance"
],
"type": "string",
"title": "controlnet_type",
"description": "Controlnet type",
"default": "off",
"x-order": 15
},
"ip_image_strength": {
"type": "number",
"title": "Ip Image Strength",
"default": 0.65,
"maximum": 1.25,
"minimum": 0,
"x-order": 19,
"description": "Strength of image conditioning from ip_adapter (vs txt conditioning from clip-interrogator or prompt) (used in remix, upscale, blend and real2real)"
},
"init_image_strength": {
"type": "number",
"title": "Init Image Strength",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 13,
"description": "Strength of initial image"
},
"interpolation_seeds": {
"type": "string",
"title": "Interpolation Seeds",
"x-order": 28,
"description": "Seeds for interpolated texts for video modes"
},
"interpolation_texts": {
"type": "string",
"title": "Interpolation Texts",
"x-order": 27,
"description": "Interpolation texts for video modes"
},
"control_image_strength": {
"type": "number",
"title": "Control Image Strength",
"default": 0,
"maximum": 1.5,
"minimum": 0,
"x-order": 17,
"description": "Strength of control image"
},
"latent_blending_skip_f": {
"type": "string",
"title": "Latent Blending Skip F",
"default": "0.05|0.6",
"x-order": 36,
"description": "What fraction of the denoising trajectory to skip at the start and end of each interpolation phase, two floats, separated by a pipe (|)"
},
"interpolation_init_images": {
"type": "string",
"title": "Interpolation Init Images",
"x-order": 29,
"description": "Interpolation init images, file paths or urls for video modes"
},
"adopt_aspect_from_init_img": {
"type": "boolean",
"title": "Adopt Aspect From Init Img",
"default": true,
"x-order": 14,
"description": "Adopt aspect ratio from init image"
},
"text_inputs_to_interpolate": {
"type": "string",
"title": "Text Inputs To Interpolate",
"x-order": 21,
"description": "Text inputs to interpolate, separated by |"
},
"interpolation_init_images_power": {
"type": "number",
"title": "Interpolation Init Images Power",
"default": 2.5,
"maximum": 5,
"minimum": 0.5,
"x-order": 30,
"description": "Power for interpolation_init_images prompts for video modes"
},
"text_inputs_to_interpolate_weights": {
"type": "string",
"title": "Text Inputs To Interpolate Weights",
"x-order": 22,
"description": "Text input weights to interpolate, separated by |"
},
"interpolation_init_images_max_strength": {
"type": "number",
"title": "Interpolation Init Images Max Strength",
"default": 0.95,
"maximum": 1,
"minimum": 0,
"x-order": 32,
"description": "Maximum init image strength for interpolation_init_images prompts for video modes"
},
"interpolation_init_images_min_strength": {
"type": "number",
"title": "Interpolation Init Images Min Strength",
"default": 0.05,
"maximum": 1,
"minimum": 0,
"x-order": 31,
"description": "Minimum init image strength for interpolation_init_images prompts for video modes"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "object",
"title": "CogOutput",
"properties": {
"name": {
"type": "string",
"title": "Name"
},
"files": {
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Files",
"default": []
},
"isFinal": {
"type": "boolean",
"title": "Isfinal",
"default": false
},
"progress": {
"type": "number",
"title": "Progress"
},
"attributes": {
"type": "object",
"title": "Attributes"
},
"thumbnails": {
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Thumbnails",
"default": []
}
}
},
"title": "Output",
"x-cog-array-type": "iterator"
}