victor-upmeet
/
whisperx-a100-80gb
- Public
- 7.4K runs
Run victor-upmeet/whisperx-a100-80gb with an API
Use one of our client libraries to get started quickly. Clicking on a library will take you to the Playground tab where you can tweak different inputs, see the results, and copy the corresponding code to use in your own project.
Input schema
The fields you can use to run this model with an API. If you don't give a value for a field its default value will be used.
Field | Type | Default value | Description |
---|---|---|---|
audio_file |
string
|
Audio file
|
|
language |
string
|
ISO code of the language spoken in the audio, specify None to perform language detection
|
|
language_detection_min_prob |
number
|
0
|
If language is not specified, then the language will be detected recursively on different parts of the file until it reaches the given probability
|
language_detection_max_tries |
integer
|
5
|
If language is not specified, then the language will be detected following the logic of language_detection_min_prob parameter, but will stop after the given max retries. If max retries is reached, the most probable language is kept.
|
initial_prompt |
string
|
Optional text to provide as a prompt for the first window
|
|
batch_size |
integer
|
64
|
Parallelization of input audio transcription
|
temperature |
number
|
0
|
Temperature to use for sampling
|
vad_onset |
number
|
0.5
|
VAD onset
|
vad_offset |
number
|
0.363
|
VAD offset
|
align_output |
boolean
|
False
|
Aligns whisper output to get accurate word-level timestamps
|
diarization |
boolean
|
False
|
Assign speaker ID labels
|
huggingface_access_token |
string
|
To enable diarization, please enter your HuggingFace token (read). You need to accept the user agreement for the models specified in the README.
|
|
min_speakers |
integer
|
Minimum number of speakers if diarization is activated (leave blank if unknown)
|
|
max_speakers |
integer
|
Maximum number of speakers if diarization is activated (leave blank if unknown)
|
|
debug |
boolean
|
False
|
Print out compute/inference times and memory usage information
|
{
"type": "object",
"title": "Input",
"required": [
"audio_file"
],
"properties": {
"debug": {
"type": "boolean",
"title": "Debug",
"default": false,
"x-order": 14,
"description": "Print out compute/inference times and memory usage information"
},
"language": {
"type": "string",
"title": "Language",
"x-order": 1,
"description": "ISO code of the language spoken in the audio, specify None to perform language detection"
},
"vad_onset": {
"type": "number",
"title": "Vad Onset",
"default": 0.5,
"x-order": 7,
"description": "VAD onset"
},
"audio_file": {
"type": "string",
"title": "Audio File",
"format": "uri",
"x-order": 0,
"description": "Audio file"
},
"batch_size": {
"type": "integer",
"title": "Batch Size",
"default": 64,
"x-order": 5,
"description": "Parallelization of input audio transcription"
},
"vad_offset": {
"type": "number",
"title": "Vad Offset",
"default": 0.363,
"x-order": 8,
"description": "VAD offset"
},
"diarization": {
"type": "boolean",
"title": "Diarization",
"default": false,
"x-order": 10,
"description": "Assign speaker ID labels"
},
"temperature": {
"type": "number",
"title": "Temperature",
"default": 0,
"x-order": 6,
"description": "Temperature to use for sampling"
},
"align_output": {
"type": "boolean",
"title": "Align Output",
"default": false,
"x-order": 9,
"description": "Aligns whisper output to get accurate word-level timestamps"
},
"max_speakers": {
"type": "integer",
"title": "Max Speakers",
"x-order": 13,
"description": "Maximum number of speakers if diarization is activated (leave blank if unknown)"
},
"min_speakers": {
"type": "integer",
"title": "Min Speakers",
"x-order": 12,
"description": "Minimum number of speakers if diarization is activated (leave blank if unknown)"
},
"initial_prompt": {
"type": "string",
"title": "Initial Prompt",
"x-order": 4,
"description": "Optional text to provide as a prompt for the first window"
},
"huggingface_access_token": {
"type": "string",
"title": "Huggingface Access Token",
"x-order": 11,
"description": "To enable diarization, please enter your HuggingFace token (read). You need to accept the user agreement for the models specified in the README."
},
"language_detection_min_prob": {
"type": "number",
"title": "Language Detection Min Prob",
"default": 0,
"x-order": 2,
"description": "If language is not specified, then the language will be detected recursively on different parts of the file until it reaches the given probability"
},
"language_detection_max_tries": {
"type": "integer",
"title": "Language Detection Max Tries",
"default": 5,
"x-order": 3,
"description": "If language is not specified, then the language will be detected following the logic of language_detection_min_prob parameter, but will stop after the given max retries. If max retries is reached, the most probable language is kept."
}
}
}
Output schema
The shape of the response you’ll get when you run this model with an API.
Schema
{
"type": "object",
"title": "Output",
"required": [
"detected_language"
],
"properties": {
"segments": {
"title": "Segments"
},
"detected_language": {
"type": "string",
"title": "Detected Language"
}
}
}