fofr
/
sdxl-lcm-video2video
- Public
- 146 runs
Run fofr/sdxl-lcm-video2video with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
prompt |
string
|
An astronaut riding a rainbow unicorn
|
Input prompt
|
negative_prompt |
string
|
|
Negative Prompt
|
video |
string
|
Video to split into frames
|
|
fps |
integer
|
8
Min: 1 |
Number of images per second of video, when not exporting all frames
|
extract_all_frames |
boolean
|
False
|
Get every frame of the video. Ignores fps. Slow for large videos.
|
max_width |
integer
|
512
Min: 1 |
Maximum width of the video. Maintains aspect ratio.
|
num_inference_steps |
integer
|
4
Min: 1 Max: 30 |
Number of denoising steps
|
guidance_scale |
number
|
1.1
Max: 5 |
Scale for classifier-free guidance
|
prompt_strength |
number
|
0.5
Max: 1 |
Prompt strength. 1.0 corresponds to full destruction of information in image
|
seed |
integer
|
Random seed. Leave blank to randomize the seed
|
|
lora_scale |
number
|
0.6
Max: 1 |
LoRA additive scale. Only applicable on trained models.
|
lora_weights |
string
|
Replicate LoRA weights to use. Leave blank to use the default weights.
|
|
controlnet_1 |
string
(enum)
|
none
Options: none, edge_canny, illusion, depth_leres, depth_midas, soft_edge_pidi, soft_edge_hed, lineart, lineart_anime, openpose |
Controlnet
|
controlnet_1_conditioning_scale |
number
|
0.75
Max: 4 |
How strong the controlnet conditioning is
|
controlnet_1_start |
number
|
0
Max: 1 |
When controlnet conditioning starts
|
controlnet_1_end |
number
|
1
Max: 1 |
When controlnet conditioning ends
|
controlnet_2 |
string
(enum)
|
none
Options: none, edge_canny, illusion, depth_leres, depth_midas, soft_edge_pidi, soft_edge_hed, lineart, lineart_anime, openpose |
Controlnet
|
controlnet_2_conditioning_scale |
number
|
0.75
Max: 4 |
How strong the controlnet conditioning is
|
controlnet_2_start |
number
|
0
Max: 1 |
When controlnet conditioning starts
|
controlnet_2_end |
number
|
1
Max: 1 |
When controlnet conditioning ends
|
controlnet_3 |
string
(enum)
|
none
Options: none, edge_canny, illusion, depth_leres, depth_midas, soft_edge_pidi, soft_edge_hed, lineart, lineart_anime, openpose |
Controlnet
|
controlnet_3_conditioning_scale |
number
|
0.75
Max: 4 |
How strong the controlnet conditioning is
|
controlnet_3_start |
number
|
0
Max: 1 |
When controlnet conditioning starts
|
controlnet_3_end |
number
|
1
Max: 1 |
When controlnet conditioning ends
|
return_frames |
boolean
|
False
|
Return a tar file with all the frames alongside the video
|
{
"type": "object",
"title": "Input",
"required": [
"video"
],
"properties": {
"fps": {
"type": "integer",
"title": "Fps",
"default": 8,
"minimum": 1,
"x-order": 3,
"description": "Number of images per second of video, when not exporting all frames"
},
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 9,
"description": "Random seed. Leave blank to randomize the seed"
},
"video": {
"type": "string",
"title": "Video",
"format": "uri",
"x-order": 2,
"description": "Video to split into frames"
},
"prompt": {
"type": "string",
"title": "Prompt",
"default": "An astronaut riding a rainbow unicorn",
"x-order": 0,
"description": "Input prompt"
},
"max_width": {
"type": "integer",
"title": "Max Width",
"default": 512,
"minimum": 1,
"x-order": 5,
"description": "Maximum width of the video. Maintains aspect ratio."
},
"lora_scale": {
"type": "number",
"title": "Lora Scale",
"default": 0.6,
"maximum": 1,
"minimum": 0,
"x-order": 10,
"description": "LoRA additive scale. Only applicable on trained models."
},
"controlnet_1": {
"enum": [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose"
],
"type": "string",
"title": "controlnet_1",
"description": "Controlnet",
"default": "none",
"x-order": 12
},
"controlnet_2": {
"enum": [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose"
],
"type": "string",
"title": "controlnet_2",
"description": "Controlnet",
"default": "none",
"x-order": 16
},
"controlnet_3": {
"enum": [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose"
],
"type": "string",
"title": "controlnet_3",
"description": "Controlnet",
"default": "none",
"x-order": 20
},
"lora_weights": {
"type": "string",
"title": "Lora Weights",
"x-order": 11,
"description": "Replicate LoRA weights to use. Leave blank to use the default weights."
},
"return_frames": {
"type": "boolean",
"title": "Return Frames",
"default": false,
"x-order": 24,
"description": "Return a tar file with all the frames alongside the video"
},
"guidance_scale": {
"type": "number",
"title": "Guidance Scale",
"default": 1.1,
"maximum": 5,
"minimum": 0,
"x-order": 7,
"description": "Scale for classifier-free guidance"
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "",
"x-order": 1,
"description": "Negative Prompt"
},
"prompt_strength": {
"type": "number",
"title": "Prompt Strength",
"default": 0.5,
"maximum": 1,
"minimum": 0,
"x-order": 8,
"description": "Prompt strength. 1.0 corresponds to full destruction of information in image"
},
"controlnet_1_end": {
"type": "number",
"title": "Controlnet 1 End",
"default": 1,
"maximum": 1,
"minimum": 0,
"x-order": 15,
"description": "When controlnet conditioning ends"
},
"controlnet_2_end": {
"type": "number",
"title": "Controlnet 2 End",
"default": 1,
"maximum": 1,
"minimum": 0,
"x-order": 19,
"description": "When controlnet conditioning ends"
},
"controlnet_3_end": {
"type": "number",
"title": "Controlnet 3 End",
"default": 1,
"maximum": 1,
"minimum": 0,
"x-order": 23,
"description": "When controlnet conditioning ends"
},
"controlnet_1_start": {
"type": "number",
"title": "Controlnet 1 Start",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 14,
"description": "When controlnet conditioning starts"
},
"controlnet_2_start": {
"type": "number",
"title": "Controlnet 2 Start",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 18,
"description": "When controlnet conditioning starts"
},
"controlnet_3_start": {
"type": "number",
"title": "Controlnet 3 Start",
"default": 0,
"maximum": 1,
"minimum": 0,
"x-order": 22,
"description": "When controlnet conditioning starts"
},
"extract_all_frames": {
"type": "boolean",
"title": "Extract All Frames",
"default": false,
"x-order": 4,
"description": "Get every frame of the video. Ignores fps. Slow for large videos."
},
"num_inference_steps": {
"type": "integer",
"title": "Num Inference Steps",
"default": 4,
"maximum": 30,
"minimum": 1,
"x-order": 6,
"description": "Number of denoising steps"
},
"controlnet_1_conditioning_scale": {
"type": "number",
"title": "Controlnet 1 Conditioning Scale",
"default": 0.75,
"maximum": 4,
"minimum": 0,
"x-order": 13,
"description": "How strong the controlnet conditioning is"
},
"controlnet_2_conditioning_scale": {
"type": "number",
"title": "Controlnet 2 Conditioning Scale",
"default": 0.75,
"maximum": 4,
"minimum": 0,
"x-order": 17,
"description": "How strong the controlnet conditioning is"
},
"controlnet_3_conditioning_scale": {
"type": "number",
"title": "Controlnet 3 Conditioning Scale",
"default": 0.75,
"maximum": 4,
"minimum": 0,
"x-order": 21,
"description": "How strong the controlnet conditioning is"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}