
s-henmind/redraw
Run s-henmind/redraw with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
prompt |
string
|
Prompt for generated image
|
|
conditioning_scale |
number
|
0
Max: 1 |
ControlNet strength, depth works best at 0.2, canny works best at 0.4. Recommended range is 0.3-0.8
|
strength |
number
|
0.8
Max: 1 |
Img2Img strength
|
guidance_scale |
number
|
3.5
Max: 30 |
Guidance scale
|
enable_hyper_flux_8_step |
boolean
|
False
|
Whether to use Hyper-FLUX.1-dev-8steps or not. If False, make sure to increase your number of inference steps
|
num_inference_steps |
integer
|
8
Min: 1 Max: 28 |
Number of inference steps
|
seed |
integer
|
Random seed. Set for reproducible generation
|
|
output_format |
None
|
jpg
|
Format of the output images
|
output_quality |
integer
|
100
Max: 100 |
Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs
|
lora_weights |
string
|
Huggingface path, or URL to the LoRA weights. Ex: alvdansen/frosting_lane_flux
|
|
lora_scale |
number
|
0.8
Max: 1 |
Scale for the LoRA weights
|
image |
string
|
id image
|
|
num_outputs |
integer
|
1
Min: 1 Max: 4 |
Number of images to output.
|
style |
string
|
style
|
{
"type": "object",
"title": "Input",
"required": [
"prompt"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 6,
"description": "Random seed. Set for reproducible generation"
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 11,
"description": "id image"
},
"style": {
"type": "string",
"title": "Style",
"x-order": 13,
"description": "style"
},
"prompt": {
"type": "string",
"title": "Prompt",
"x-order": 0,
"description": "Prompt for generated image"
},
"strength": {
"type": "number",
"title": "Strength",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 2,
"description": "Img2Img strength"
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.8,
"maximum": 1,
"minimum": 0,
"x-order": 10,
"description": "Scale for the LoRA weights"
},
"num_outputs": {
"type": "integer",
"title": "Num Outputs",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 12,
"description": "Number of images to output."
},
"lora_weights": {
"type": "string",
"title": "Lora Weights",
"x-order": 9,
"description": "Huggingface path, or URL to the LoRA weights. Ex: alvdansen/frosting_lane_flux"
},
"output_format": {
"enum": [
"webp",
"jpg",
"png"
],
"type": "string",
"title": "output_format",
"description": "Format of the output images",
"default": "jpg",
"x-order": 7
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 3.5,
"maximum": 30,
"minimum": 0,
"x-order": 3,
"description": "Guidance scale"
},
"output_quality": {
"type": "integer",
"title": "Output Quality",
"default": 100,
"maximum": 100,
"minimum": 0,
"x-order": 8,
"description": "Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs"
},
"conditioning_scale": {
"type": "number",
"title": "Conditioning Scale",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 1,
"description": "ControlNet strength, depth works best at 0.2, canny works best at 0.4. Recommended range is 0.3-0.8"
},
"num_inference_steps": {
"type": "integer",
"title": "Num Inference Steps",
"default": 8,
"maximum": 28,
"minimum": 1,
"x-order": 5,
"description": "Number of inference steps"
},
"enable_hyper_flux_8_step": {
"type": "boolean",
"title": "Enable Hyper Flux 8 Step",
"default": false,
"x-order": 4,
"description": "Whether to use Hyper-FLUX.1-dev-8steps or not. If False, make sure to increase your number of inference steps"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}