
s-henmind/xz-ctr
Run s-henmind/xz-ctr with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
prompt |
string
|
Prompt for generated image
|
|
conditioning_scale |
number
|
0.5
Max: 1 |
ControlNet strength, depth works best at 0.2, canny works best at 0.4. Recommended range is 0.3-0.8
|
image |
string
|
The image to restyle
|
|
strength |
number
|
1
Max: 1 |
Img2Img strength
|
guidance_scale |
number
|
3.5
Max: 30 |
Guidance scale
|
keep_hair |
boolean
|
False
|
keep hair or not
|
use_canny |
boolean
|
False
|
use_canny
|
use_depth |
boolean
|
False
|
use_depth
|
use_pose |
boolean
|
True
|
use_pose
|
num_inference_steps |
integer
|
22
Min: 1 Max: 38 |
Number of inference steps
|
seed |
integer
|
Random seed. Set for reproducible generation
|
|
output_format |
None
|
jpg
|
Format of the output images
|
output_quality |
integer
|
100
Max: 100 |
Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
|
style |
string
|
通用
|
style
|
lora_scale |
number
|
0.99
Max: 2 |
Scale for the LoRA weights
|
id_image |
string
|
id image
|
|
id_image_1 |
string
|
id image
|
|
id_image_2 |
string
|
id image
|
|
id_weight |
number
|
0.85
Max: 1 |
id weight
|
num_outputs |
integer
|
1
Min: 1 Max: 4 |
Number of images to output.
|
dilate_pixels |
integer
|
2
Max: 30 |
dilate pixels
|
repaint_steps |
integer
|
4
Max: 30 |
Number of repaint steps
|
{
"type": "object",
"title": "Input",
"required": [
"prompt"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 10,
"description": "Random seed. Set for reproducible generation"
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 2,
"description": "The image to restyle"
},
"style": {
"type": "string",
"title": "Style",
"default": "\u901a\u7528",
"x-order": 13,
"description": "style"
},
"prompt": {
"type": "string",
"title": "Prompt",
"x-order": 0,
"description": "Prompt for generated image"
},
"id_image": {
"type": "string",
"title": "Id Image",
"format": "uri",
"x-order": 15,
"description": "id image"
},
"strength": {
"type": "number",
"title": "Strength",
"default": 1,
"maximum": 1,
"minimum": 0,
"x-order": 3,
"description": "Img2Img strength"
},
"use_pose": {
"type": "boolean",
"title": "Use Pose",
"default": true,
"x-order": 8,
"description": "use_pose"
},
"id_weight": {
"type": "number",
"title": "Id Weight",
"default": 0.85,
"maximum": 1,
"minimum": 0,
"x-order": 18,
"description": "id weight"
},
"keep_hair": {
"type": "boolean",
"title": "Keep Hair",
"default": false,
"x-order": 5,
"description": "keep hair or not"
},
"use_canny": {
"type": "boolean",
"title": "Use Canny",
"default": false,
"x-order": 6,
"description": "use_canny"
},
"use_depth": {
"type": "boolean",
"title": "Use Depth",
"default": false,
"x-order": 7,
"description": "use_depth"
},
"id_image_1": {
"type": "string",
"title": "Id Image 1",
"format": "uri",
"x-order": 16,
"description": "id image"
},
"id_image_2": {
"type": "string",
"title": "Id Image 2",
"format": "uri",
"x-order": 17,
"description": "id image"
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.99,
"maximum": 2,
"minimum": 0,
"x-order": 14,
"description": "Scale for the LoRA weights"
},
"num_outputs": {
"type": "integer",
"title": "Num Outputs",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 19,
"description": "Number of images to output."
},
"dilate_pixels": {
"type": "integer",
"title": "Dilate Pixels",
"default": 2,
"maximum": 30,
"minimum": 0,
"x-order": 20,
"description": "dilate pixels"
},
"output_format": {
"enum": [
"webp",
"jpg",
"png"
],
"type": "string",
"title": "output_format",
"description": "Format of the output images",
"default": "jpg",
"x-order": 11
},
"repaint_steps": {
"type": "integer",
"title": "Repaint Steps",
"default": 4,
"maximum": 30,
"minimum": 0,
"x-order": 21,
"description": "Number of repaint steps"
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 3.5,
"maximum": 30,
"minimum": 0,
"x-order": 4,
"description": "Guidance scale"
},
"output_quality": {
"type": "integer",
"title": "Output Quality",
"default": 100,
"maximum": 100,
"minimum": 0,
"x-order": 12,
"description": "Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs"
},
"conditioning_scale": {
"type": "number",
"title": "Conditioning Scale",
"default": 0.5,
"maximum": 1,
"minimum": 0,
"x-order": 1,
"description": "ControlNet strength, depth works best at 0.2, canny works best at 0.4. Recommended range is 0.3-0.8"
},
"num_inference_steps": {
"type": "integer",
"title": "Num Inference Steps",
"default": 22,
"maximum": 38,
"minimum": 1,
"x-order": 9,
"description": "Number of inference steps"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}