skytells-research
/
cog-lora-inference
T4 LoRa Inference with custom diffusers model selection
- Public
- 13 runs
- GitHub
Run skytells-research/cog-lora-inference with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
model |
string
|
SG161222/RealVisXL_V4.0
|
Diffusers Model
|
lora_url |
string
|
Load Lora model
|
|
prompt |
string
|
A photo of TOK
|
Input prompt
|
negative_prompt |
string
|
(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), tooth, open mouth,bad hand, bad fingers, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, long neck, out of frame, extra fingers, mutated hands, monochrome, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, glitchy, bokeh, (((long neck))), ((flat chested)), ((((visible hand)))), ((((ugly)))), (((duplicate))), ((morbid)), ((mutilated)), [out of frame], extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), out of frame, ugly, extra limbs, (bad anatomy), gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))) red eyes, multiple subjects, extra heads
|
Input Negative Prompt
|
image |
string
|
Input image for img2img or inpaint mode
|
|
mask |
string
|
Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted.
|
|
width |
integer
|
1024
|
Width of output image
|
height |
integer
|
1024
|
Height of output image
|
num_outputs |
integer
|
1
Min: 1 Max: 4 |
Number of images to output.
|
scheduler |
string
(enum)
|
DPMSolverMultistep
Options: DDIM, DPMSolverMultistep, HeunDiscrete, KarrasDPM, K_EULER_ANCESTRAL, K_EULER, PNDM |
scheduler
|
num_inference_steps |
integer
|
50
Min: 1 Max: 500 |
Number of denoising steps
|
guidance_scale |
number
|
7.5
Min: 1 Max: 50 |
Scale for classifier-free guidance
|
prompt_strength |
number
|
0.8
Max: 1 |
Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image
|
seed |
integer
|
Random seed. Leave blank to randomize the seed
|
|
refine |
string
(enum)
|
no_refiner
Options: no_refiner, expert_ensemble_refiner, base_image_refiner |
Which refine style to use
|
high_noise_frac |
number
|
0.8
Max: 1 |
For expert_ensemble_refiner, the fraction of noise to use
|
refine_steps |
integer
|
For base_image_refiner, the number of steps to refine, defaults to num_inference_steps
|
|
apply_watermark |
boolean
|
True
|
Applies a watermark to enable determining if an image is generated in downstream applications. If you have other provisions for generating or deploying images safely, you can use this to disable watermarking.
|
lora_scale |
number
|
0.6
Max: 1 |
LoRA additive scale. Only applicable on trained models.
|
{
"type": "object",
"title": "Input",
"required": [
"lora_url"
],
"properties": {
"mask": {
"type": "string",
"title": "Mask",
"format": "uri",
"x-order": 5,
"description": "Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted."
},
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 13,
"description": "Random seed. Leave blank to randomize the seed"
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 4,
"description": "Input image for img2img or inpaint mode"
},
"model": {
"type": "string",
"title": "Model",
"default": "SG161222/RealVisXL_V4.0",
"x-order": 0,
"description": "Diffusers Model"
},
"width": {
"type": "integer",
"title": "Width",
"default": 1024,
"x-order": 6,
"description": "Width of output image"
},
"height": {
"type": "integer",
"title": "Height",
"default": 1024,
"x-order": 7,
"description": "Height of output image"
},
"prompt": {
"type": "string",
"title": "Prompt",
"default": "A photo of TOK",
"x-order": 2,
"description": "Input prompt"
},
"refine": {
"enum": [
"no_refiner",
"expert_ensemble_refiner",
"base_image_refiner"
],
"type": "string",
"title": "refine",
"description": "Which refine style to use",
"default": "no_refiner",
"x-order": 14
},
"lora_url": {
"type": "string",
"title": "Lora Url",
"x-order": 1,
"description": "Load Lora model"
},
"scheduler": {
"enum": [
"DDIM",
"DPMSolverMultistep",
"HeunDiscrete",
"KarrasDPM",
"K_EULER_ANCESTRAL",
"K_EULER",
"PNDM"
],
"type": "string",
"title": "scheduler",
"description": "scheduler",
"default": "DPMSolverMultistep",
"x-order": 9
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.6,
"maximum": 1,
"minimum": 0,
"x-order": 18,
"description": "LoRA additive scale. Only applicable on trained models."
},
"num_outputs": {
"type": "integer",
"title": "Num Outputs",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 8,
"description": "Number of images to output."
},
"refine_steps": {
"type": "integer",
"title": "Refine Steps",
"x-order": 16,
"description": "For base_image_refiner, the number of steps to refine, defaults to num_inference_steps"
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 7.5,
"maximum": 50,
"minimum": 1,
"x-order": 11,
"description": "Scale for classifier-free guidance"
},
"apply_watermark": {
"type": "boolean",
"title": "Apply Watermark",
"default": true,
"x-order": 17,
"description": "Applies a watermark to enable determining if an image is generated in downstream applications. If you have other provisions for generating or deploying images safely, you can use this to disable watermarking."
},
"high_noise_frac": {
"type": "number",
"title": "High Noise Frac",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 15,
"description": "For expert_ensemble_refiner, the fraction of noise to use"
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), tooth, open mouth,bad hand, bad fingers, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, long neck, out of frame, extra fingers, mutated hands, monochrome, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, glitchy, bokeh, (((long neck))), ((flat chested)), ((((visible hand)))), ((((ugly)))), (((duplicate))), ((morbid)), ((mutilated)), [out of frame], extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), out of frame, ugly, extra limbs, (bad anatomy), gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))) red eyes, multiple subjects, extra heads",
"x-order": 3,
"description": "Input Negative Prompt"
},
"prompt_strength": {
"type": "number",
"title": "Prompt Strength",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 12,
"description": "Prompt strength when using img2img / inpaint. 1.0 corresponds to full destruction of information in image"
},
"num_inference_steps": {
"type": "integer",
"title": "Num Inference Steps",
"default": 50,
"maximum": 500,
"minimum": 1,
"x-order": 10,
"description": "Number of denoising steps"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}