
phxdev1/multi-lora-wan
Run phxdev1/multi-lora-wan with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
prompt |
string
|
Text prompt for video generation
|
|
negative_prompt |
string
|
|
Things you do not want to see in your video
|
image |
string
|
Image to use as a starting frame for image to video generation.
|
|
aspect_ratio |
None
|
16:9
|
The aspect ratio of the video. 16:9, 9:16, 1:1, etc.
|
frames |
None
|
81
|
The number of frames to generate (1 to 5 seconds)
|
model |
None
|
14b
|
The model to use. 1.3b is faster, but 14b is better quality. A LORA either works with 1.3b or 14b, depending on the version it was trained on.
|
resolution |
None
|
480p
|
The resolution of the video. 720p is not supported for 1.3b.
|
lora_url |
string
|
Optional: The URL of a LORA to use (for single LoRA compatibility)
|
|
lora_strength_model |
number
|
1
|
Strength of the LORA applied to the model. 0.0 is no LORA (for single LoRA compatibility).
|
lora_strength_clip |
number
|
1
|
Strength of the LORA applied to the CLIP model. 0.0 is no LORA (for single LoRA compatibility).
|
loras |
string
|
JSON string of LoRAs to apply. Format: "[{'url': 'lora1.safetensors', 'strength_model': 1.0, 'strength_clip': 1.0, 'enabled': true}]"
|
|
enable_lora_memory_management |
boolean
|
False
|
Use progressive LoRA chaining (reduces VRAM) vs all-at-once chaining (faster but more VRAM)
|
fast_mode |
None
|
Balanced
|
Speed up generation with different levels of acceleration. V2.1 mode uses LCM sampling for maximum speed.
|
sample_steps |
integer
|
30
Min: 1 Max: 60 |
Number of generation steps. Fewer steps means faster generation, at the expensive of output quality. 30 steps is sufficient for most prompts
|
sample_guide_scale |
number
|
5
Max: 10 |
Higher guide scale makes prompt adherence better, but can reduce variation
|
sample_shift |
number
|
8
Max: 10 |
Sample shift factor
|
seed |
integer
|
Set a seed for reproducibility. Random by default.
|
|
interpolation_multiplier |
integer
|
1
Min: 1 Max: 4 |
Frame interpolation multiplier for smoother video (V2.1 feature). 1 = no interpolation, 2 = double frames, etc.
|
output_fps |
number
|
16
Min: 8 Max: 60 |
Target output FPS for interpolated video (V2.1 feature)
|
{
"type": "object",
"title": "Input",
"required": [
"prompt"
],
"properties": {
"seed": {
"type": "integer",
"title": "Seed",
"x-order": 16,
"description": "Set a seed for reproducibility. Random by default."
},
"image": {
"type": "string",
"title": "Image",
"format": "uri",
"x-order": 2,
"description": "Image to use as a starting frame for image to video generation."
},
"loras": {
"type": "string",
"title": "Loras",
"x-order": 10,
"description": "JSON string of LoRAs to apply. Format: \"[{'url': 'lora1.safetensors', 'strength_model': 1.0, 'strength_clip': 1.0, 'enabled': true}]\""
},
"model": {
"enum": [
"1.3b",
"14b"
],
"type": "string",
"title": "model",
"description": "The model to use. 1.3b is faster, but 14b is better quality. A LORA either works with 1.3b or 14b, depending on the version it was trained on.",
"default": "14b",
"x-order": 5
},
"frames": {
"enum": [
17,
33,
49,
65,
81
],
"type": "integer",
"title": "frames",
"description": "The number of frames to generate (1 to 5 seconds)",
"default": 81,
"x-order": 4
},
"prompt": {
"type": "string",
"title": "Prompt",
"x-order": 0,
"description": "Text prompt for video generation"
},
"lora_url": {
"type": "string",
"title": "Lora Url",
"x-order": 7,
"description": "Optional: The URL of a LORA to use (for single LoRA compatibility)"
},
"fast_mode": {
"enum": [
"Off",
"Balanced",
"Fast",
"V2.1"
],
"type": "string",
"title": "fast_mode",
"description": "Speed up generation with different levels of acceleration. V2.1 mode uses LCM sampling for maximum speed.",
"default": "Balanced",
"x-order": 12
},
"output_fps": {
"type": "number",
"title": "Output Fps",
"default": 16,
"maximum": 60,
"minimum": 8,
"x-order": 18,
"description": "Target output FPS for interpolated video (V2.1 feature)"
},
"resolution": {
"enum": [
"480p",
"720p"
],
"type": "string",
"title": "resolution",
"description": "The resolution of the video. 720p is not supported for 1.3b.",
"default": "480p",
"x-order": 6
},
"aspect_ratio": {
"enum": [
"16:9",
"9:16",
"1:1"
],
"type": "string",
"title": "aspect_ratio",
"description": "The aspect ratio of the video. 16:9, 9:16, 1:1, etc.",
"default": "16:9",
"x-order": 3
},
"sample_shift": {
"type": "number",
"title": "Sample Shift",
"default": 8,
"maximum": 10,
"minimum": 0,
"x-order": 15,
"description": "Sample shift factor"
},
"sample_steps": {
"type": "integer",
"title": "Sample Steps",
"default": 30,
"maximum": 60,
"minimum": 1,
"x-order": 13,
"description": "Number of generation steps. Fewer steps means faster generation, at the expensive of output quality. 30 steps is sufficient for most prompts"
},
"negative_prompt": {
"type": "string",
"title": "Negative Prompt",
"default": "",
"x-order": 1,
"description": "Things you do not want to see in your video"
},
"lora_strength_clip": {
"type": "number",
"title": "Lora Strength Clip",
"default": 1,
"x-order": 9,
"description": "Strength of the LORA applied to the CLIP model. 0.0 is no LORA (for single LoRA compatibility)."
},
"sample_guide_scale": {
"type": "number",
"title": "Sample Guide Scale",
"default": 5,
"maximum": 10,
"minimum": 0,
"x-order": 14,
"description": "Higher guide scale makes prompt adherence better, but can reduce variation"
},
"lora_strength_model": {
"type": "number",
"title": "Lora Strength Model",
"default": 1,
"x-order": 8,
"description": "Strength of the LORA applied to the model. 0.0 is no LORA (for single LoRA compatibility)."
},
"interpolation_multiplier": {
"type": "integer",
"title": "Interpolation Multiplier",
"default": 1,
"maximum": 4,
"minimum": 1,
"x-order": 17,
"description": "Frame interpolation multiplier for smoother video (V2.1 feature). 1 = no interpolation, 2 = double frames, etc."
},
"enable_lora_memory_management": {
"type": "boolean",
"title": "Enable Lora Memory Management",
"default": false,
"x-order": 11,
"description": "Use progressive LoRA chaining (reduces VRAM) vs all-at-once chaining (faster but more VRAM)"
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
{
"type": "array",
"items": {
"type": "string",
"format": "uri"
},
"title": "Output"
}