prompthunt
/
cog-realvisxl2-lora-inference
- Public
- 274 runs
Run prompthunt/cog-realvisxl2-lora-inference with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
lora_url |
string
|
Load Lora model
|
|
prompt |
string
|
A photo of TOK
|
Input prompt
|
negative_prompt |
string
|
plastic, blurry, grainy, [deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry
|
Input Negative Prompt
|
image |
string
|
Input image for img2img or inpaint mode
|
|
mask |
string
|
Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted.
|
|
pose_image |
string
|
Input pose image for controlnet mode
|
|
width |
integer
|
768
|
Width of output image
|
height |
integer
|
1024
|
Height of output image
|
num_outputs |
integer
|
1
Min: 1 Max: 100 |
Number of images to output.
|
scheduler |
string
(enum)
|
DPM++SDEKarras
Options: DDIM, DPMSolverMultistep, HeunDiscrete, KarrasDPM, K_EULER_ANCESTRAL, K_EULER, PNDM, DPM++SDEKarras |
scheduler
|
num_inference_steps |
integer
|
25
Min: 1 Max: 500 |
Number of denoising steps
|
guidance_scale |
number
|
3
Min: 1 Max: 50 |
Scale for classifier-free guidance
|
prompt_strength |
number
|
0.8
Max: 1 |
Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image
|
seed |
integer
|
Random seed. Leave blank to randomize the seed
|
|
refine |
string
(enum)
|
no_refiner
Options: no_refiner, expert_ensemble_refiner, base_image_refiner |
Which refine style to use
|
high_noise_frac |
number
|
0.8
Max: 1 |
For expert_ensemble_refiner, the fraction of noise to use
|
refine_steps |
integer
|
For base_image_refiner, the number of steps to refine, defaults to num_inference_steps
|
|
lora_scale |
number
|
0.8
Max: 1 |
LoRA additive scale. Only applicable on trained models.
|
mask_blur_amount |
number
|
8
|
Amount of blur to apply to the mask.
|
face_padding |
number
|
1.5
|
Amount of padding (as percentage) to add to the face bounding box.
|
face_resize_to |
integer
|
1024
|
Resize the face bounding box to this size (in pixels).
|
inpaint_prompt |
string
|
A photo of TOK
|
Input inpaint prompt
|
inpaint_negative_prompt |
string
|
(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth
|
Input inpaint negative prompt
|
inpaint_strength |
number
|
0.35
Max: 1 |
Prompt strength when using inpaint. 1.0 corresponds to full destruction of information in image
|
inpaint_num_inference_steps |
integer
|
25
Min: 1 Max: 500 |
Number of denoising steps for inpainting
|
inpaint_guidance_scale |
number
|
3
Min: 1 Max: 50 |
Scale for classifier-free guidance for inpainting
|
inpaint_lora_scale |
number
|
0.8
Max: 1 |
LoRA additive scale. Only applicable on trained models.
|
controlnet_conditioning_scale |
number
|
1
|
Scale for guidance for controlnet
|
upscale_face_image |
boolean
|
False
|
Upscales face image before inpainting
|
upscale_final_image |
boolean
|
False
|
Upscales final image before returning
|
include_debug_output_images |
boolean
|
True
|
Include debug output in the output
|
{
"type": "object",
"title": "Input",
"required": [
"lora_url"
],
"properties": {
"mask": {
"type": "string",
"title": "Mask",
"format": "uri",
"x-order": 4,
"description": "Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted."
},
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 13,
"description": "Random seed. Leave blank to randomize the seed"
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 3,
"description": "Input image for img2img or inpaint mode"
},
"width": {
"type": "integer",
"title": "Width",
"default": 768,
"x-order": 6,
"description": "Width of output image"
},
"height": {
"type": "integer",
"title": "Height",
"default": 1024,
"x-order": 7,
"description": "Height of output image"
},
"prompt": {
"type": "string",
"title": "Prompt",
"default": "A photo of TOK",
"x-order": 1,
"description": "Input prompt"
},
"refine": {
"enum": [
"no_refiner",
"expert_ensemble_refiner",
"base_image_refiner"
],
"type": "string",
"title": "refine",
"description": "Which refine style to use",
"default": "no_refiner",
"x-order": 14
},
"lora_url": {
"type": "string",
"title": "Lora Url",
"x-order": 0,
"description": "Load Lora model"
},
"scheduler": {
"enum": [
"DDIM",
"DPMSolverMultistep",
"HeunDiscrete",
"KarrasDPM",
"K_EULER_ANCESTRAL",
"K_EULER",
"PNDM",
"DPM++SDEKarras"
],
"type": "string",
"title": "scheduler",
"description": "scheduler",
"default": "DPM++SDEKarras",
"x-order": 9
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 17,
"description": "LoRA additive scale. Only applicable on trained models."
},
"pose_image": {
"type": "string",
"title": "Pose Image",
"format": "uri",
"x-order": 5,
"description": "Input pose image for controlnet mode"
},
"num_outputs": {
"type": "integer",
"title": "Num Outputs",
"default": 1,
"maximum": 100,
"minimum": 1,
"x-order": 8,
"description": "Number of images to output."
},
"face_padding": {
"type": "number",
"title": "Face Padding",
"default": 1.5,
"x-order": 19,
"description": "Amount of padding (as percentage) to add to the face bounding box."
},
"refine_steps": {
"type": "integer",
"title": "Refine Steps",
"x-order": 16,
"description": "For base_image_refiner, the number of steps to refine, defaults to num_inference_steps"
},
"face_resize_to": {
"type": "integer",
"title": "Face Resize To",
"default": 1024,
"x-order": 20,
"description": "Resize the face bounding box to this size (in pixels)."
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 3,
"maximum": 50,
"minimum": 1,
"x-order": 11,
"description": "Scale for classifier-free guidance"
},
"inpaint_prompt": {
"type": "string",
"title": "Inpaint Prompt",
"default": "A photo of TOK",
"x-order": 21,
"description": "Input inpaint prompt"
},
"high_noise_frac": {
"type": "number",
"title": "High Noise Frac",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 15,
"description": "For expert_ensemble_refiner, the fraction of noise to use"
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "plastic, blurry, grainy, [deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry",
"x-order": 2,
"description": "Input Negative Prompt"
},
"prompt_strength": {
"type": "number",
"title": "Prompt Strength",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 12,
"description": "Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image"
},
"inpaint_strength": {
"type": "number",
"title": "Inpaint Strength",
"default": 0.35,
"maximum": 1,
"minimum": 0,
"x-order": 23,
"description": "Prompt strength when using inpaint. 1.0 corresponds to full destruction of information in image"
},
"mask_blur_amount": {
"type": "number",
"title": "Mask Blur Amount",
"default": 8,
"x-order": 18,
"description": "Amount of blur to apply to the mask."
},
"inpaint_lora_scale": {
"type": "number",
"title": "Inpaint Lora Scale",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 26,
"description": "LoRA additive scale. Only applicable on trained models."
},
"upscale_face_image": {
"type": "boolean",
"title": "Upscale Face Image",
"default": false,
"x-order": 28,
"description": "Upscales face image before inpainting"
},
"num_inference_steps": {
"type": "integer",
"title": "Num Inference Steps",
"default": 25,
"maximum": 500,
"minimum": 1,
"x-order": 10,
"description": "Number of denoising steps"
},
"upscale_final_image": {
"type": "boolean",
"title": "Upscale Final Image",
"default": false,
"x-order": 29,
"description": "Upscales final image before returning"
},
"inpaint_guidance_scale": {
"type": "number",
"title": "Inpaint Guidance Scale",
"default": 3,
"maximum": 50,
"minimum": 1,
"x-order": 25,
"description": "Scale for classifier-free guidance for inpainting"
},
"inpaint_negative_prompt": {
"type": "string",
"title": "Inpaint Negative Prompt",
"default": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
"x-order": 22,
"description": "Input inpaint negative prompt"
},
"include_debug_output_images": {
"type": "boolean",
"title": "Include Debug Output Images",
"default": true,
"x-order": 30,
"description": "Include debug output in the output"
},
"inpaint_num_inference_steps": {
"type": "integer",
"title": "Inpaint Num Inference Steps",
"default": 25,
"maximum": 500,
"minimum": 1,
"x-order": 24,
"description": "Number of denoising steps for inpainting"
},
"controlnet_conditioning_scale": {
"type": "number",
"title": "Controlnet Conditioning Scale",
"default": 1,
"x-order": 27,
"description": "Scale for guidance for controlnet"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}