victor-upmeet
/
whisperx-full
- Public
- 1.3K runs
Run victor-upmeet/whisperx-full with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
audio_file |
string
|
Audio file
|
|
language |
string
|
ISO code of the language spoken in the audio, specify None to perform language detection
|
|
initial_prompt |
string
|
Optional text to provide as a prompt for the first window
|
|
batch_size |
integer
|
64
|
Parallelization of input audio transcription
|
temperature |
number
|
0
|
Temperature to use for sampling
|
vad_onset |
number
|
0.5
|
VAD onset
|
vad_offset |
number
|
0.363
|
VAD offset
|
align_output |
boolean
|
False
|
Aligns whisper output to get accurate word-level timestamps
|
diarization |
boolean
|
False
|
Assign speaker ID labels
|
huggingface_access_token |
string
|
To enable diarization, please enter your HuggingFace token (read). You need to accept the user agreement for the models specified in the README.
|
|
min_speakers |
integer
|
Minimum number of speakers if diarization is activated (leave blank if unknown)
|
|
max_speakers |
integer
|
Maximum number of speakers if diarization is activated (leave blank if unknown)
|
|
debug |
boolean
|
False
|
Print out compute/inference times and memory usage information
|
{
"type": "object",
"title": "Input",
"required": [
"audio_file"
],
"properties": {
"debug": {
"type": "boolean",
"title": "Debug",
"default": false,
"x-order": 12,
"description": "Print out compute/inference times and memory usage information"
},
"language": {
"type": "string",
"title": "Language",
"x-order": 1,
"description": "ISO code of the language spoken in the audio, specify None to perform language detection"
},
"vad_onset": {
"type": "number",
"title": "Vad Onset",
"default": 0.5,
"x-order": 5,
"description": "VAD onset"
},
"audio_file": {
"type": "string",
"title": "Audio File",
"format": "uri",
"x-order": 0,
"description": "Audio file"
},
"batch_size": {
"type": "integer",
"title": "Batch Size",
"default": 64,
"x-order": 3,
"description": "Parallelization of input audio transcription"
},
"vad_offset": {
"type": "number",
"title": "Vad Offset",
"default": 0.363,
"x-order": 6,
"description": "VAD offset"
},
"diarization": {
"type": "boolean",
"title": "Diarization",
"default": false,
"x-order": 8,
"description": "Assign speaker ID labels"
},
"temperature": {
"type": "number",
"title": "Temperature",
"default": 0,
"x-order": 4,
"description": "Temperature to use for sampling"
},
"align_output": {
"type": "boolean",
"title": "Align Output",
"default": false,
"x-order": 7,
"description": "Aligns whisper output to get accurate word-level timestamps"
},
"max_speakers": {
"type": "integer",
"title": "Max Speakers",
"x-order": 11,
"description": "Maximum number of speakers if diarization is activated (leave blank if unknown)"
},
"min_speakers": {
"type": "integer",
"title": "Min Speakers",
"x-order": 10,
"description": "Minimum number of speakers if diarization is activated (leave blank if unknown)"
},
"initial_prompt": {
"type": "string",
"title": "Initial Prompt",
"x-order": 2,
"description": "Optional text to provide as a prompt for the first window"
},
"huggingface_access_token": {
"type": "string",
"title": "Huggingface Access Token",
"x-order": 9,
"description": "To enable diarization, please enter your HuggingFace token (read). You need to accept the user agreement for the models specified in the README."
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
Schema
{
"type": "object",
"title": "ModelOutput",
"required": [
"detected_language"
],
"properties": {
"segments": {
"title": "Segments"
},
"detected_language": {
"type": "string",
"title": "Detected Language"
}
}
}