mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
Compare commits
64 Commits
v3.3.1
...
b93e9b6f57
Author | SHA1 | Date | |
---|---|---|---|
b93e9b6f57 | |||
844736e4e4 | |||
220fec9aea | |||
1631c3040f | |||
d700b56c9c | |||
b343241253 | |||
6fe0a8784a | |||
5012650d0f | |||
108bd0c400 | |||
b2d50a027b | |||
36d552cad3 | |||
7d36b832f9 | |||
d2a493e910 | |||
f5b40b5366 | |||
ac0c8bd79a | |||
cd59f21d1a | |||
0aed874589 | |||
f10dbf6ab1 | |||
a7564c2ad6 | |||
e7712f496e | |||
8e53866704 | |||
3205436d58 | |||
8c58c54635 | |||
0d9807adc5 | |||
4db839018c | |||
f8d11df727 | |||
d2f0e53f71 | |||
7489ebf876 | |||
90256cc481 | |||
b41ebd4871 | |||
63bc1903c1 | |||
272714e07d | |||
44e8bf5bb6 | |||
7b3c9ce629 | |||
36d2622e27 | |||
8bfa12193b | |||
acbeba6057 | |||
fca563a782 | |||
2117909bf6 | |||
de0d8fe313 | |||
355f8e06f7 | |||
86e2b3ee74 | |||
70c639cdb5 | |||
235536e28d | |||
12604a48ea | |||
ffbc73664c | |||
289eadfc76 | |||
22a93f2932 | |||
1027367b79 | |||
5e54b872a9 | |||
6be02cccfa | |||
2f93e029c7 | |||
024bc8481b | |||
f286e7f3de | |||
73e644559d | |||
1ec527375a | |||
6695426a85 | |||
7a98456321 | |||
aaddb83aa5 | |||
c288f4812a | |||
4ebfb078c5 | |||
65b2332e13 | |||
69281f3a29 | |||
79eb8fa53d |
23
.github/workflows/build-and-release.yml
vendored
23
.github/workflows/build-and-release.yml
vendored
@ -11,25 +11,24 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.5.14"
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install build
|
||||
- name: Check if lockfile is up to date
|
||||
run: uv lock --check
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m build --wheel
|
||||
- name: Build package
|
||||
run: uv build
|
||||
|
||||
- name: Release to Github
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: dist/*
|
||||
files: dist/*.whl
|
||||
|
||||
- name: Publish package to PyPi
|
||||
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
|
||||
with:
|
||||
user: __token__
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: uv publish
|
||||
env:
|
||||
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
18
.github/workflows/python-compatibility.yml
vendored
18
.github/workflows/python-compatibility.yml
vendored
@ -5,7 +5,7 @@ on:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@ -17,16 +17,18 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "0.5.14"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install package
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install .
|
||||
- name: Check if lockfile is up to date
|
||||
run: uv lock --check
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras
|
||||
|
||||
- name: Test import
|
||||
run: |
|
||||
python -c "import whisperx; print('Successfully imported whisperx')"
|
||||
uv run python -c "import whisperx; print('Successfully imported whisperx')"
|
||||
|
35
.github/workflows/tmp.yml
vendored
35
.github/workflows/tmp.yml
vendored
@ -1,35 +0,0 @@
|
||||
name: Python Compatibility Test (PyPi)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install package
|
||||
run: |
|
||||
pip install whisperx
|
||||
|
||||
- name: Print packages
|
||||
run: |
|
||||
pip list
|
||||
|
||||
- name: Test import
|
||||
run: |
|
||||
python -c "import whisperx; print('Successfully imported whisperx')"
|
152
README.md
152
README.md
@ -22,16 +22,12 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<img width="1216" align="center" alt="whisperx-arch" src="https://raw.githubusercontent.com/m-bain/whisperX/refs/heads/main/figures/pipeline.png">
|
||||
|
||||
|
||||
<!-- <p align="left">Whisper-Based Automatic Speech Recognition (ASR) with improved timestamp accuracy + quality via forced phoneme alignment and voice-activity based batching for fast inference.</p> -->
|
||||
|
||||
|
||||
<!-- <h2 align="left", id="what-is-it">What is it 🔎</h2> -->
|
||||
|
||||
|
||||
This repository provides fast automatic speech recognition (70x realtime with large-v2) with word-level timestamps and speaker diarization.
|
||||
|
||||
- ⚡️ Batched inference for 70x realtime transcription using whisper large-v2
|
||||
@ -40,8 +36,6 @@ This repository provides fast automatic speech recognition (70x realtime with la
|
||||
- 👯♂️ Multispeaker ASR using speaker diarization from [pyannote-audio](https://github.com/pyannote/pyannote-audio) (speaker ID labels)
|
||||
- 🗣️ VAD preprocessing, reduces hallucination & batching with no WER degradation
|
||||
|
||||
|
||||
|
||||
**Whisper** is an ASR model [developed by OpenAI](https://github.com/openai/whisper), trained on a large dataset of diverse audio. Whilst it does produces highly accurate transcriptions, the corresponding timestamps are at the utterance-level, not per word, and can be inaccurate by several seconds. OpenAI's whisper does not natively support batching.
|
||||
|
||||
**Phoneme-Based ASR** A suite of models finetuned to recognise the smallest unit of speech distinguishing one word from another, e.g. the element p in "tap". A popular example model is [wav2vec2.0](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self).
|
||||
@ -54,85 +48,90 @@ This repository provides fast automatic speech recognition (70x realtime with la
|
||||
|
||||
<h2 align="left", id="highlights">New🚨</h2>
|
||||
|
||||
- 1st place at [Ego4d transcription challenge](https://eval.ai/web/challenges/challenge-page/1637/leaderboard/3931/WER) 🏆
|
||||
- 1st place at [Ego4d transcription challenge](https://eval.ai/web/challenges/challenge-page/1637/leaderboard/3931/WER) 🏆
|
||||
- _WhisperX_ accepted at INTERSPEECH 2023
|
||||
- v3 transcript segment-per-sentence: using nltk sent_tokenize for better subtitlting & better diarization
|
||||
- v3 released, 70x speed-up open-sourced. Using batched whisper with [faster-whisper](https://github.com/guillaumekln/faster-whisper) backend!
|
||||
- v2 released, code cleanup, imports whisper library VAD filtering is now turned on by default, as in the paper.
|
||||
- Paper drop🎓👨🏫! Please see our [ArxiV preprint](https://arxiv.org/abs/2303.00747) for benchmarking and details of WhisperX. We also introduce more efficient batch inference resulting in large-v2 with *60-70x REAL TIME speed.
|
||||
- Paper drop🎓👨🏫! Please see our [ArxiV preprint](https://arxiv.org/abs/2303.00747) for benchmarking and details of WhisperX. We also introduce more efficient batch inference resulting in large-v2 with \*60-70x REAL TIME speed.
|
||||
|
||||
<h2 align="left" id="setup">Setup ⚙️</h2>
|
||||
Tested for PyTorch 2.0, Python 3.10 (use other versions at your own risk!)
|
||||
|
||||
GPU execution requires the NVIDIA libraries cuBLAS 11.x and cuDNN 8.x to be installed on the system. Please refer to the [CTranslate2 documentation](https://opennmt.net/CTranslate2/installation.html).
|
||||
### 1. Simple Installation (Recommended)
|
||||
|
||||
|
||||
### 1. Create Python3.10 environment
|
||||
|
||||
`conda create --name whisperx python=3.10`
|
||||
|
||||
`conda activate whisperx`
|
||||
|
||||
|
||||
### 2. Install PyTorch, e.g. for Linux and Windows CUDA11.8:
|
||||
|
||||
`conda install pytorch==2.0.0 torchaudio==2.0.0 pytorch-cuda=11.8 -c pytorch -c nvidia`
|
||||
|
||||
See other methods [here.](https://pytorch.org/get-started/previous-versions/#v200)
|
||||
|
||||
### 3. Install WhisperX
|
||||
|
||||
You have several installation options:
|
||||
|
||||
#### Option A: Stable Release (recommended)
|
||||
Install the latest stable version from PyPI:
|
||||
The easiest way to install WhisperX is through PyPi:
|
||||
|
||||
```bash
|
||||
pip install whisperx
|
||||
```
|
||||
|
||||
#### Option B: Development Version
|
||||
Install the latest development version directly from GitHub (may be unstable):
|
||||
Or if using [uvx](https://docs.astral.sh/uv/guides/tools/#running-tools):
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/m-bain/whisperx.git
|
||||
uvx whisperx
|
||||
```
|
||||
|
||||
If already installed, update to the most recent commit:
|
||||
### 2. Advanced Installation Options
|
||||
|
||||
These installation methods are for developers or users with specific needs. If you're not sure, stick with the simple installation above.
|
||||
|
||||
#### Option A: Install from GitHub
|
||||
|
||||
To install directly from the GitHub repository:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/m-bain/whisperx.git --upgrade
|
||||
uvx git+https://github.com/m-bain/whisperX.git
|
||||
```
|
||||
|
||||
#### Option C: Development Mode
|
||||
If you wish to modify the package, clone and install in editable mode:
|
||||
#### Option B: Developer Installation
|
||||
|
||||
If you want to modify the code or contribute to the project:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/m-bain/whisperX.git
|
||||
cd whisperX
|
||||
pip install -e .
|
||||
uv sync --all-extras --dev
|
||||
```
|
||||
|
||||
> **Note**: The development version may contain experimental features and bugs. Use the stable PyPI release for production environments.
|
||||
|
||||
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
|
||||
|
||||
### Common Issues & Troubleshooting 🔧
|
||||
|
||||
#### libcudnn Dependencies (GPU Users)
|
||||
|
||||
If you're using WhisperX with GPU support and encounter errors like:
|
||||
|
||||
- `Could not load library libcudnn_ops_infer.so.8`
|
||||
- `Unable to load any of {libcudnn_cnn.so.9.1.0, libcudnn_cnn.so.9.1, libcudnn_cnn.so.9, libcudnn_cnn.so}`
|
||||
- `libcudnn_ops_infer.so.8: cannot open shared object file: No such file or directory`
|
||||
|
||||
This means your system is missing the CUDA Deep Neural Network library (cuDNN). This library is needed for GPU acceleration but isn't always installed by default.
|
||||
|
||||
**Install cuDNN (example for apt based systems):**
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install libcudnn8 libcudnn8-dev -y
|
||||
```
|
||||
|
||||
### Speaker Diarization
|
||||
|
||||
To **enable Speaker Diarization**, include your Hugging Face access token (read) that you can generate from [Here](https://huggingface.co/settings/tokens) after the `--hf_token` argument and accept the user agreement for the following models: [Segmentation](https://huggingface.co/pyannote/segmentation-3.0) and [Speaker-Diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1) (if you choose to use Speaker-Diarization 2.x, follow requirements [here](https://huggingface.co/pyannote/speaker-diarization) instead.)
|
||||
|
||||
> **Note**<br>
|
||||
> As of Oct 11, 2023, there is a known issue regarding slow performance with pyannote/Speaker-Diarization-3.0 in whisperX. It is due to dependency conflicts between faster-whisper and pyannote-audio 3.0.0. Please see [this issue](https://github.com/m-bain/whisperX/issues/499) for more details and potential workarounds.
|
||||
|
||||
|
||||
<h2 align="left" id="example">Usage 💬 (command line)</h2>
|
||||
|
||||
### English
|
||||
|
||||
Run whisper on example segment (using default params, whisper small) add `--highlight_words True` to visualise word timings in the .srt file.
|
||||
|
||||
whisperx examples/sample01.wav
|
||||
whisperx path/to/audio.wav
|
||||
|
||||
|
||||
Result using *WhisperX* with forced alignment to wav2vec2.0 large:
|
||||
Result using _WhisperX_ with forced alignment to wav2vec2.0 large:
|
||||
|
||||
https://user-images.githubusercontent.com/36994049/208253969-7e35fe2a-7541-434a-ae91-8e919540555d.mp4
|
||||
|
||||
@ -140,37 +139,34 @@ Compare this to original whisper out the box, where many transcriptions are out
|
||||
|
||||
https://user-images.githubusercontent.com/36994049/207743923-b4f0d537-29ae-4be2-b404-bb941db73652.mov
|
||||
|
||||
|
||||
For increased timestamp accuracy, at the cost of higher gpu mem, use bigger models (bigger alignment model not found to be that helpful, see paper) e.g.
|
||||
|
||||
whisperx examples/sample01.wav --model large-v2 --align_model WAV2VEC2_ASR_LARGE_LV60K_960H --batch_size 4
|
||||
|
||||
whisperx path/to/audio.wav --model large-v2 --align_model WAV2VEC2_ASR_LARGE_LV60K_960H --batch_size 4
|
||||
|
||||
To label the transcript with speaker ID's (set number of speakers if known e.g. `--min_speakers 2` `--max_speakers 2`):
|
||||
|
||||
whisperx examples/sample01.wav --model large-v2 --diarize --highlight_words True
|
||||
whisperx path/to/audio.wav --model large-v2 --diarize --highlight_words True
|
||||
|
||||
To run on CPU instead of GPU (and for running on Mac OS X):
|
||||
|
||||
whisperx examples/sample01.wav --compute_type int8
|
||||
whisperx path/to/audio.wav --compute_type int8
|
||||
|
||||
### Other languages
|
||||
|
||||
The phoneme ASR alignment model is *language-specific*, for tested languages these models are [automatically picked from torchaudio pipelines or huggingface](https://github.com/m-bain/whisperX/blob/e909f2f766b23b2000f2d95df41f9b844ac53e49/whisperx/transcribe.py#L22).
|
||||
The phoneme ASR alignment model is _language-specific_, for tested languages these models are [automatically picked from torchaudio pipelines or huggingface](https://github.com/m-bain/whisperX/blob/f2da2f858e99e4211fe4f64b5f2938b007827e17/whisperx/alignment.py#L24-L58).
|
||||
Just pass in the `--language` code, and use the whisper `--model large`.
|
||||
|
||||
Currently default models provided for `{en, fr, de, es, it, ja, zh, nl, uk, pt}`. If the detected language is not in this list, you need to find a phoneme-based ASR model from [huggingface model hub](https://huggingface.co/models) and test it on your data.
|
||||
|
||||
Currently default models provided for `{en, fr, de, es, it}` via torchaudio pipelines and many other languages via Hugging Face. Please find the list of currently supported languages under `DEFAULT_ALIGN_MODELS_HF` on [alignment.py](https://github.com/m-bain/whisperX/blob/main/whisperx/alignment.py). If the detected language is not in this list, you need to find a phoneme-based ASR model from [huggingface model hub](https://huggingface.co/models) and test it on your data.
|
||||
|
||||
#### E.g. German
|
||||
whisperx --model large-v2 --language de examples/sample_de_01.wav
|
||||
|
||||
whisperx --model large-v2 --language de path/to/audio.wav
|
||||
|
||||
https://user-images.githubusercontent.com/36994049/208298811-e36002ba-3698-4731-97d4-0aebd07e0eb3.mov
|
||||
|
||||
|
||||
See more examples in other languages [here](EXAMPLES.md).
|
||||
|
||||
## Python usage 🐍
|
||||
## Python usage 🐍
|
||||
|
||||
```python
|
||||
import whisperx
|
||||
@ -193,7 +189,7 @@ result = model.transcribe(audio, batch_size=batch_size)
|
||||
print(result["segments"]) # before alignment
|
||||
|
||||
# delete model if low on GPU resources
|
||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model
|
||||
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model
|
||||
|
||||
# 2. Align whisper output
|
||||
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
|
||||
@ -202,10 +198,10 @@ result = whisperx.align(result["segments"], model_a, metadata, audio, device, re
|
||||
print(result["segments"]) # after alignment
|
||||
|
||||
# delete model if low on GPU resources
|
||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model_a
|
||||
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model_a
|
||||
|
||||
# 3. Assign speaker labels
|
||||
diarize_model = whisperx.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
|
||||
diarize_model = whisperx.diarize.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
|
||||
|
||||
# add min/max number of speakers if known
|
||||
diarize_segments = diarize_model(audio)
|
||||
@ -229,14 +225,16 @@ If you don't have access to your own GPUs, use the links above to try out Whispe
|
||||
For specific details on the batching and alignment, the effect of VAD, as well as the chosen alignment model, see the preprint [paper](https://www.robots.ox.ac.uk/~vgg/publications/2023/Bain23/bain23.pdf).
|
||||
|
||||
To reduce GPU memory requirements, try any of the following (2. & 3. can affect quality):
|
||||
|
||||
1. reduce batch size, e.g. `--batch_size 4`
|
||||
2. use a smaller ASR model `--model base`
|
||||
3. Use lighter compute type `--compute_type int8`
|
||||
2. use a smaller ASR model `--model base`
|
||||
3. Use lighter compute type `--compute_type int8`
|
||||
|
||||
Transcription differences from openai's whisper:
|
||||
|
||||
1. Transcription without timestamps. To enable single pass batching, whisper inference is performed `--without_timestamps True`, this ensures 1 forward pass per sample in the batch. However, this can cause discrepancies the default whisper output.
|
||||
2. VAD-based segment transcription, unlike the buffered transcription of openai's. In the WhisperX paper we show this reduces WER, and enables accurate batched inference
|
||||
3. `--condition_on_prev_text` is set to `False` by default (reduces hallucination)
|
||||
3. `--condition_on_prev_text` is set to `False` by default (reduces hallucination)
|
||||
|
||||
<h2 align="left" id="limitations">Limitations ⚠️</h2>
|
||||
|
||||
@ -245,7 +243,6 @@ Transcription differences from openai's whisper:
|
||||
- Diarization is far from perfect
|
||||
- Language specific wav2vec2 model is needed
|
||||
|
||||
|
||||
<h2 align="left" id="contribute">Contribute 🧑🏫</h2>
|
||||
|
||||
If you are multilingual, a major way you can contribute to this project is to find phoneme models on huggingface (or train your own) and test them on speech for the target language. If the results look good send a pull request and some examples showing its success.
|
||||
@ -254,43 +251,40 @@ Bug finding and pull requests are also highly appreciated to keep this project g
|
||||
|
||||
<h2 align="left" id="coming-soon">TODO 🗓</h2>
|
||||
|
||||
* [x] Multilingual init
|
||||
- [x] Multilingual init
|
||||
|
||||
* [x] Automatic align model selection based on language detection
|
||||
- [x] Automatic align model selection based on language detection
|
||||
|
||||
* [x] Python usage
|
||||
- [x] Python usage
|
||||
|
||||
* [x] Incorporating speaker diarization
|
||||
- [x] Incorporating speaker diarization
|
||||
|
||||
* [x] Model flush, for low gpu mem resources
|
||||
- [x] Model flush, for low gpu mem resources
|
||||
|
||||
* [x] Faster-whisper backend
|
||||
- [x] Faster-whisper backend
|
||||
|
||||
* [x] Add max-line etc. see (openai's whisper utils.py)
|
||||
- [x] Add max-line etc. see (openai's whisper utils.py)
|
||||
|
||||
* [x] Sentence-level segments (nltk toolbox)
|
||||
- [x] Sentence-level segments (nltk toolbox)
|
||||
|
||||
* [x] Improve alignment logic
|
||||
- [x] Improve alignment logic
|
||||
|
||||
* [ ] update examples with diarization and word highlighting
|
||||
- [ ] update examples with diarization and word highlighting
|
||||
|
||||
* [ ] Subtitle .ass output <- bring this back (removed in v3)
|
||||
- [ ] Subtitle .ass output <- bring this back (removed in v3)
|
||||
|
||||
* [ ] Add benchmarking code (TEDLIUM for spd/WER & word segmentation)
|
||||
- [ ] Add benchmarking code (TEDLIUM for spd/WER & word segmentation)
|
||||
|
||||
* [ ] Allow silero-vad as alternative VAD option
|
||||
|
||||
* [ ] Improve diarization (word level). *Harder than first thought...*
|
||||
- [x] Allow silero-vad as alternative VAD option
|
||||
|
||||
- [ ] Improve diarization (word level). _Harder than first thought..._
|
||||
|
||||
<h2 align="left" id="contact">Contact/Support 📇</h2>
|
||||
|
||||
|
||||
Contact maxhbain@gmail.com for queries.
|
||||
|
||||
<a href="https://www.buymeacoffee.com/maxhbain" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/default-orange.png" alt="Buy Me A Coffee" height="41" width="174"></a>
|
||||
|
||||
|
||||
<h2 align="left" id="acks">Acknowledgements 🙏</h2>
|
||||
|
||||
This work, and my PhD, is supported by the [VGG (Visual Geometry Group)](https://www.robots.ox.ac.uk/~vgg/) and the University of Oxford.
|
||||
@ -299,8 +293,10 @@ Of course, this is builds on [openAI's whisper](https://github.com/openai/whispe
|
||||
Borrows important alignment code from [PyTorch tutorial on forced alignment](https://pytorch.org/tutorials/intermediate/forced_alignment_with_torchaudio_tutorial.html)
|
||||
And uses the wonderful pyannote VAD / Diarization https://github.com/pyannote/pyannote-audio
|
||||
|
||||
Valuable VAD & Diarization Models from:
|
||||
|
||||
Valuable VAD & Diarization Models from [pyannote audio](https://github.com/pyannote/pyannote-audio)
|
||||
- [pyannote audio][https://github.com/pyannote/pyannote-audio]
|
||||
- [silero vad][https://github.com/snakers4/silero-vad]
|
||||
|
||||
Great backend from [faster-whisper](https://github.com/guillaumekln/faster-whisper) and [CTranslate2](https://github.com/OpenNMT/CTranslate2)
|
||||
|
||||
|
36
pyproject.toml
Normal file
36
pyproject.toml
Normal file
@ -0,0 +1,36 @@
|
||||
[project]
|
||||
urls = { repository = "https://github.com/m-bain/whisperx" }
|
||||
authors = [{ name = "Max Bain" }]
|
||||
name = "whisperx"
|
||||
version = "3.4.0"
|
||||
description = "Time-Accurate Automatic Speech Recognition using Whisper."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9, <3.13"
|
||||
license = { text = "BSD-2-Clause" }
|
||||
|
||||
dependencies = [
|
||||
"ctranslate2<4.5.0",
|
||||
"faster-whisper>=1.1.1",
|
||||
"nltk>=3.9.1",
|
||||
"numpy>=2.0.2",
|
||||
"onnxruntime>=1.19",
|
||||
"pandas>=2.2.3",
|
||||
"pyannote-audio>=3.3.2",
|
||||
"torch>=2.5.1",
|
||||
"torchaudio>=2.5.1",
|
||||
"transformers>=4.48.0",
|
||||
]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
whisperx = "whisperx.__main__:cli"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
|
||||
[tool.setuptools]
|
||||
include-package-data = true
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["whisperx*"]
|
@ -1,8 +0,0 @@
|
||||
torch>=2
|
||||
torchaudio>=2
|
||||
faster-whisper==1.1.0
|
||||
ctranslate2<4.5.0
|
||||
transformers
|
||||
pandas
|
||||
setuptools>=65
|
||||
nltk
|
33
setup.py
33
setup.py
@ -1,33 +0,0 @@
|
||||
import os
|
||||
|
||||
import pkg_resources
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
with open("README.md", "r", encoding="utf-8") as f:
|
||||
long_description = f.read()
|
||||
|
||||
setup(
|
||||
name="whisperx",
|
||||
py_modules=["whisperx"],
|
||||
version="3.3.1",
|
||||
description="Time-Accurate Automatic Speech Recognition using Whisper.",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
python_requires=">=3.9, <3.13",
|
||||
author="Max Bain",
|
||||
url="https://github.com/m-bain/whisperx",
|
||||
license="BSD-2-Clause",
|
||||
packages=find_packages(exclude=["tests*"]),
|
||||
install_requires=[
|
||||
str(r)
|
||||
for r in pkg_resources.parse_requirements(
|
||||
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
|
||||
)
|
||||
]
|
||||
+ [f"pyannote.audio==3.3.2"],
|
||||
entry_points={
|
||||
"console_scripts": ["whisperx=whisperx.transcribe:cli"],
|
||||
},
|
||||
include_package_data=True,
|
||||
extras_require={"dev": ["pytest"]},
|
||||
)
|
@ -1,6 +1,5 @@
|
||||
import math
|
||||
from .conjunctions import get_conjunctions, get_comma
|
||||
from typing import TextIO
|
||||
from whisperx.conjunctions import get_conjunctions, get_comma
|
||||
|
||||
def normal_round(n):
|
||||
if n - math.floor(n) < 0.5:
|
||||
|
@ -1,4 +1,31 @@
|
||||
from .transcribe import load_model
|
||||
from .alignment import load_align_model, align
|
||||
from .audio import load_audio
|
||||
from .diarize import assign_word_speakers, DiarizationPipeline
|
||||
import importlib
|
||||
|
||||
|
||||
def _lazy_import(name):
|
||||
module = importlib.import_module(f"whisperx.{name}")
|
||||
return module
|
||||
|
||||
|
||||
def load_align_model(*args, **kwargs):
|
||||
alignment = _lazy_import("alignment")
|
||||
return alignment.load_align_model(*args, **kwargs)
|
||||
|
||||
|
||||
def align(*args, **kwargs):
|
||||
alignment = _lazy_import("alignment")
|
||||
return alignment.align(*args, **kwargs)
|
||||
|
||||
|
||||
def load_model(*args, **kwargs):
|
||||
asr = _lazy_import("asr")
|
||||
return asr.load_model(*args, **kwargs)
|
||||
|
||||
|
||||
def load_audio(*args, **kwargs):
|
||||
audio = _lazy_import("audio")
|
||||
return audio.load_audio(*args, **kwargs)
|
||||
|
||||
|
||||
def assign_word_speakers(*args, **kwargs):
|
||||
diarize = _lazy_import("diarize")
|
||||
return diarize.assign_word_speakers(*args, **kwargs)
|
||||
|
@ -1,4 +1,89 @@
|
||||
from .transcribe import cli
|
||||
import argparse
|
||||
import importlib.metadata
|
||||
import platform
|
||||
|
||||
import torch
|
||||
|
||||
from whisperx.utils import (LANGUAGES, TO_LANGUAGE_CODE, optional_float,
|
||||
optional_int, str2bool)
|
||||
|
||||
|
||||
cli()
|
||||
def cli():
|
||||
# fmt: off
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
|
||||
parser.add_argument("--model", default="small", help="name of the Whisper model to use")
|
||||
parser.add_argument("--model_cache_only", type=str2bool, default=False, help="If True, will not attempt to download models, instead using cached models from --model_dir")
|
||||
parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
|
||||
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
|
||||
parser.add_argument("--device_index", default=0, type=int, help="device index to use for FasterWhisper inference")
|
||||
parser.add_argument("--batch_size", default=8, type=int, help="the preferred batch size for inference")
|
||||
parser.add_argument("--compute_type", default="float16", type=str, choices=["float16", "float32", "int8"], help="compute type for computation")
|
||||
|
||||
parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
|
||||
parser.add_argument("--output_format", "-f", type=str, default="all", choices=["all", "srt", "vtt", "txt", "tsv", "json", "aud"], help="format of the output file; if not specified, all available formats will be produced")
|
||||
parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages")
|
||||
|
||||
parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
|
||||
parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection")
|
||||
|
||||
# alignment params
|
||||
parser.add_argument("--align_model", default=None, help="Name of phoneme-level ASR model to do alignment")
|
||||
parser.add_argument("--interpolate_method", default="nearest", choices=["nearest", "linear", "ignore"], help="For word .srt, method to assign timestamps to non-aligned words, or merge them into neighbouring.")
|
||||
parser.add_argument("--no_align", action='store_true', help="Do not perform phoneme alignment")
|
||||
parser.add_argument("--return_char_alignments", action='store_true', help="Return character-level alignments in the output json file")
|
||||
|
||||
# vad params
|
||||
parser.add_argument("--vad_method", type=str, default="pyannote", choices=["pyannote", "silero"], help="VAD method to be used")
|
||||
parser.add_argument("--vad_onset", type=float, default=0.500, help="Onset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected")
|
||||
parser.add_argument("--vad_offset", type=float, default=0.363, help="Offset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected.")
|
||||
parser.add_argument("--chunk_size", type=int, default=30, help="Chunk size for merging VAD segments. Default is 30, reduce this if the chunk is too long.")
|
||||
|
||||
# diarization params
|
||||
parser.add_argument("--diarize", action="store_true", help="Apply diarization to assign speaker labels to each segment/word")
|
||||
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
|
||||
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
|
||||
parser.add_argument("--diarize_model", default="pyannote/speaker-diarization-3.1", type=str, help="Name of the speaker diarization model to use")
|
||||
parser.add_argument("--speaker_embeddings", action="store_true", help="Include speaker embeddings in JSON output (only works with --diarize)")
|
||||
|
||||
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
|
||||
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
|
||||
parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
|
||||
parser.add_argument("--patience", type=float, default=1.0, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
|
||||
parser.add_argument("--length_penalty", type=float, default=1.0, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
|
||||
|
||||
parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
|
||||
parser.add_argument("--suppress_numerals", action="store_true", help="whether to suppress numeric symbols and currency symbols during sampling, since wav2vec2 cannot align them correctly")
|
||||
|
||||
parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
|
||||
parser.add_argument("--condition_on_previous_text", type=str2bool, default=False, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
|
||||
parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
|
||||
|
||||
parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
|
||||
parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
|
||||
parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
|
||||
parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
|
||||
|
||||
parser.add_argument("--max_line_width", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
|
||||
parser.add_argument("--max_line_count", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of lines in a segment")
|
||||
parser.add_argument("--highlight_words", type=str2bool, default=False, help="(not possible with --no_align) underline each word as it is spoken in srt and vtt")
|
||||
parser.add_argument("--segment_resolution", type=str, default="sentence", choices=["sentence", "chunk"], help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
|
||||
|
||||
parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
|
||||
|
||||
parser.add_argument("--hf_token", type=str, default=None, help="Hugging Face Access Token to access PyAnnote gated models")
|
||||
|
||||
parser.add_argument("--print_progress", type=str2bool, default = False, help = "if True, progress will be printed in transcribe() and align() methods.")
|
||||
parser.add_argument("--version", "-V", action="version", version=f"%(prog)s {importlib.metadata.version('whisperx')}",help="Show whisperx version information and exit")
|
||||
parser.add_argument("--python-version", "-P", action="version", version=f"Python {platform.python_version()} ({platform.python_implementation()})",help="Show python version information and exit")
|
||||
# fmt: on
|
||||
|
||||
args = parser.parse_args().__dict__
|
||||
|
||||
from whisperx.transcribe import transcribe_task
|
||||
|
||||
transcribe_task(args, parser)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
@ -1,7 +1,9 @@
|
||||
""""
|
||||
"""
|
||||
Forced Alignment with Whisper
|
||||
C. Max Bain
|
||||
"""
|
||||
import math
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable, Optional, Union, List
|
||||
|
||||
@ -11,10 +13,15 @@ import torch
|
||||
import torchaudio
|
||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||
|
||||
from .audio import SAMPLE_RATE, load_audio
|
||||
from .utils import interpolate_nans
|
||||
from .types import AlignedTranscriptionResult, SingleSegment, SingleAlignedSegment, SingleWordSegment
|
||||
import nltk
|
||||
from whisperx.audio import SAMPLE_RATE, load_audio
|
||||
from whisperx.utils import interpolate_nans
|
||||
from whisperx.types import (
|
||||
AlignedTranscriptionResult,
|
||||
SingleSegment,
|
||||
SingleAlignedSegment,
|
||||
SingleWordSegment,
|
||||
SegmentData,
|
||||
)
|
||||
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
|
||||
|
||||
PUNKT_ABBREVIATIONS = ['dr', 'vs', 'mr', 'mrs', 'prof']
|
||||
@ -62,6 +69,8 @@ DEFAULT_ALIGN_MODELS_HF = {
|
||||
"eu": "stefan-it/wav2vec2-large-xlsr-53-basque",
|
||||
"gl": "ifrz/wav2vec2-large-xlsr-galician",
|
||||
"ka": "xsway/wav2vec2-large-xlsr-georgian",
|
||||
"lv": "jimregan/wav2vec2-large-xlsr-latvian-cv",
|
||||
"tl": "Khalsuu/filipino-wav2vec2-l-xls-r-300m-official",
|
||||
}
|
||||
|
||||
|
||||
@ -131,6 +140,8 @@ def align(
|
||||
|
||||
# 1. Preprocess to keep only characters in dictionary
|
||||
total_segments = len(transcript)
|
||||
# Store temporary processing values
|
||||
segment_data: dict[int, SegmentData] = {}
|
||||
for sdx, segment in enumerate(transcript):
|
||||
# strip spaces at beginning / end, but keep track of the amount.
|
||||
if print_progress:
|
||||
@ -163,10 +174,17 @@ def align(
|
||||
elif char_ in model_dictionary.keys():
|
||||
clean_char.append(char_)
|
||||
clean_cdx.append(cdx)
|
||||
else:
|
||||
# add placeholder
|
||||
clean_char.append('*')
|
||||
clean_cdx.append(cdx)
|
||||
|
||||
clean_wdx = []
|
||||
for wdx, wrd in enumerate(per_word):
|
||||
if any([c in model_dictionary.keys() for c in wrd]):
|
||||
if any([c in model_dictionary.keys() for c in wrd.lower()]):
|
||||
clean_wdx.append(wdx)
|
||||
else:
|
||||
# index for placeholder
|
||||
clean_wdx.append(wdx)
|
||||
|
||||
|
||||
@ -175,10 +193,12 @@ def align(
|
||||
sentence_splitter = PunktSentenceTokenizer(punkt_param)
|
||||
sentence_spans = list(sentence_splitter.span_tokenize(text))
|
||||
|
||||
segment["clean_char"] = clean_char
|
||||
segment["clean_cdx"] = clean_cdx
|
||||
segment["clean_wdx"] = clean_wdx
|
||||
segment["sentence_spans"] = sentence_spans
|
||||
segment_data[sdx] = {
|
||||
"clean_char": clean_char,
|
||||
"clean_cdx": clean_cdx,
|
||||
"clean_wdx": clean_wdx,
|
||||
"sentence_spans": sentence_spans
|
||||
}
|
||||
|
||||
aligned_segments: List[SingleAlignedSegment] = []
|
||||
|
||||
@ -194,13 +214,14 @@ def align(
|
||||
"end": t2,
|
||||
"text": text,
|
||||
"words": [],
|
||||
"chars": None,
|
||||
}
|
||||
|
||||
if return_char_alignments:
|
||||
aligned_seg["chars"] = []
|
||||
|
||||
# check we can align
|
||||
if len(segment["clean_char"]) == 0:
|
||||
if len(segment_data[sdx]["clean_char"]) == 0:
|
||||
print(f'Failed to align segment ("{segment["text"]}"): no characters in this segment found in model dictionary, resorting to original...')
|
||||
aligned_segments.append(aligned_seg)
|
||||
continue
|
||||
@ -210,8 +231,8 @@ def align(
|
||||
aligned_segments.append(aligned_seg)
|
||||
continue
|
||||
|
||||
text_clean = "".join(segment["clean_char"])
|
||||
tokens = [model_dictionary[c] for c in text_clean]
|
||||
text_clean = "".join(segment_data[sdx]["clean_char"])
|
||||
tokens = [model_dictionary.get(c, -1) for c in text_clean]
|
||||
|
||||
f1 = int(t1 * SAMPLE_RATE)
|
||||
f2 = int(t2 * SAMPLE_RATE)
|
||||
@ -244,7 +265,8 @@ def align(
|
||||
blank_id = code
|
||||
|
||||
trellis = get_trellis(emission, tokens, blank_id)
|
||||
path = backtrack(trellis, emission, tokens, blank_id)
|
||||
# path = backtrack(trellis, emission, tokens, blank_id)
|
||||
path = backtrack_beam(trellis, emission, tokens, blank_id, beam_width=2)
|
||||
|
||||
if path is None:
|
||||
print(f'Failed to align segment ("{segment["text"]}"): backtrack failed, resorting to original...')
|
||||
@ -253,7 +275,7 @@ def align(
|
||||
|
||||
char_segments = merge_repeats(path, text_clean)
|
||||
|
||||
duration = t2 -t1
|
||||
duration = t2 - t1
|
||||
ratio = duration * waveform_segment.size(0) / (trellis.size(0) - 1)
|
||||
|
||||
# assign timestamps to aligned characters
|
||||
@ -261,8 +283,8 @@ def align(
|
||||
word_idx = 0
|
||||
for cdx, char in enumerate(text):
|
||||
start, end, score = None, None, None
|
||||
if cdx in segment["clean_cdx"]:
|
||||
char_seg = char_segments[segment["clean_cdx"].index(cdx)]
|
||||
if cdx in segment_data[sdx]["clean_cdx"]:
|
||||
char_seg = char_segments[segment_data[sdx]["clean_cdx"].index(cdx)]
|
||||
start = round(char_seg.start * ratio + t1, 3)
|
||||
end = round(char_seg.end * ratio + t1, 3)
|
||||
score = round(char_seg.score, 3)
|
||||
@ -288,9 +310,9 @@ def align(
|
||||
aligned_subsegments = []
|
||||
# assign sentence_idx to each character index
|
||||
char_segments_arr["sentence-idx"] = None
|
||||
for sdx, (sstart, send) in enumerate(segment["sentence_spans"]):
|
||||
for sdx2, (sstart, send) in enumerate(segment_data[sdx]["sentence_spans"]):
|
||||
curr_chars = char_segments_arr.loc[(char_segments_arr.index >= sstart) & (char_segments_arr.index <= send)]
|
||||
char_segments_arr.loc[(char_segments_arr.index >= sstart) & (char_segments_arr.index <= send), "sentence-idx"] = sdx
|
||||
char_segments_arr.loc[(char_segments_arr.index >= sstart) & (char_segments_arr.index <= send), "sentence-idx"] = sdx2
|
||||
|
||||
sentence_text = text[sstart:send]
|
||||
sentence_start = curr_chars["start"].min()
|
||||
@ -360,70 +382,203 @@ def align(
|
||||
"""
|
||||
source: https://pytorch.org/tutorials/intermediate/forced_alignment_with_torchaudio_tutorial.html
|
||||
"""
|
||||
|
||||
|
||||
def get_trellis(emission, tokens, blank_id=0):
|
||||
num_frame = emission.size(0)
|
||||
num_tokens = len(tokens)
|
||||
|
||||
# Trellis has extra diemsions for both time axis and tokens.
|
||||
# The extra dim for tokens represents <SoS> (start-of-sentence)
|
||||
# The extra dim for time axis is for simplification of the code.
|
||||
trellis = torch.empty((num_frame + 1, num_tokens + 1))
|
||||
trellis[0, 0] = 0
|
||||
trellis[1:, 0] = torch.cumsum(emission[:, 0], 0)
|
||||
trellis[0, -num_tokens:] = -float("inf")
|
||||
trellis[-num_tokens:, 0] = float("inf")
|
||||
trellis = torch.zeros((num_frame, num_tokens))
|
||||
trellis[1:, 0] = torch.cumsum(emission[1:, blank_id], 0)
|
||||
trellis[0, 1:] = -float("inf")
|
||||
trellis[-num_tokens + 1:, 0] = float("inf")
|
||||
|
||||
for t in range(num_frame):
|
||||
for t in range(num_frame - 1):
|
||||
trellis[t + 1, 1:] = torch.maximum(
|
||||
# Score for staying at the same token
|
||||
trellis[t, 1:] + emission[t, blank_id],
|
||||
# Score for changing to the next token
|
||||
trellis[t, :-1] + emission[t, tokens],
|
||||
# trellis[t, :-1] + emission[t, tokens[1:]],
|
||||
trellis[t, :-1] + get_wildcard_emission(emission[t], tokens[1:], blank_id),
|
||||
)
|
||||
return trellis
|
||||
|
||||
|
||||
def get_wildcard_emission(frame_emission, tokens, blank_id):
|
||||
"""Processing token emission scores containing wildcards (vectorized version)
|
||||
|
||||
Args:
|
||||
frame_emission: Emission probability vector for the current frame
|
||||
tokens: List of token indices
|
||||
blank_id: ID of the blank token
|
||||
|
||||
Returns:
|
||||
tensor: Maximum probability score for each token position
|
||||
"""
|
||||
assert 0 <= blank_id < len(frame_emission)
|
||||
|
||||
# Convert tokens to a tensor if they are not already
|
||||
tokens = torch.tensor(tokens) if not isinstance(tokens, torch.Tensor) else tokens
|
||||
|
||||
# Create a mask to identify wildcard positions
|
||||
wildcard_mask = (tokens == -1)
|
||||
|
||||
# Get scores for non-wildcard positions
|
||||
regular_scores = frame_emission[tokens.clamp(min=0)] # clamp to avoid -1 index
|
||||
|
||||
# Create a mask and compute the maximum value without modifying frame_emission
|
||||
max_valid_score = frame_emission.clone() # Create a copy
|
||||
max_valid_score[blank_id] = float('-inf') # Modify the copy to exclude the blank token
|
||||
max_valid_score = max_valid_score.max()
|
||||
|
||||
# Use where operation to combine results
|
||||
result = torch.where(wildcard_mask, max_valid_score, regular_scores)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class Point:
|
||||
token_index: int
|
||||
time_index: int
|
||||
score: float
|
||||
|
||||
|
||||
def backtrack(trellis, emission, tokens, blank_id=0):
|
||||
# Note:
|
||||
# j and t are indices for trellis, which has extra dimensions
|
||||
# for time and tokens at the beginning.
|
||||
# When referring to time frame index `T` in trellis,
|
||||
# the corresponding index in emission is `T-1`.
|
||||
# Similarly, when referring to token index `J` in trellis,
|
||||
# the corresponding index in transcript is `J-1`.
|
||||
j = trellis.size(1) - 1
|
||||
t_start = torch.argmax(trellis[:, j]).item()
|
||||
t, j = trellis.size(0) - 1, trellis.size(1) - 1
|
||||
|
||||
path = [Point(j, t, emission[t, blank_id].exp().item())]
|
||||
while j > 0:
|
||||
# Should not happen but just in case
|
||||
assert t > 0
|
||||
|
||||
path = []
|
||||
for t in range(t_start, 0, -1):
|
||||
# 1. Figure out if the current position was stay or change
|
||||
# Note (again):
|
||||
# `emission[J-1]` is the emission at time frame `J` of trellis dimension.
|
||||
# Score for token staying the same from time frame J-1 to T.
|
||||
stayed = trellis[t - 1, j] + emission[t - 1, blank_id]
|
||||
# Score for token changing from C-1 at T-1 to J at T.
|
||||
changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]]
|
||||
# Frame-wise score of stay vs change
|
||||
p_stay = emission[t - 1, blank_id]
|
||||
# p_change = emission[t - 1, tokens[j]]
|
||||
p_change = get_wildcard_emission(emission[t - 1], [tokens[j]], blank_id)[0]
|
||||
|
||||
# 2. Store the path with frame-wise probability.
|
||||
prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item()
|
||||
# Return token index and time index in non-trellis coordinate.
|
||||
path.append(Point(j - 1, t - 1, prob))
|
||||
# Context-aware score for stay vs change
|
||||
stayed = trellis[t - 1, j] + p_stay
|
||||
changed = trellis[t - 1, j - 1] + p_change
|
||||
|
||||
# 3. Update the token
|
||||
# Update position
|
||||
t -= 1
|
||||
if changed > stayed:
|
||||
j -= 1
|
||||
if j == 0:
|
||||
break
|
||||
else:
|
||||
# failed
|
||||
return None
|
||||
|
||||
# Store the path with frame-wise probability.
|
||||
prob = (p_change if changed > stayed else p_stay).exp().item()
|
||||
path.append(Point(j, t, prob))
|
||||
|
||||
# Now j == 0, which means, it reached the SoS.
|
||||
# Fill up the rest for the sake of visualization
|
||||
while t > 0:
|
||||
prob = emission[t - 1, blank_id].exp().item()
|
||||
path.append(Point(j, t - 1, prob))
|
||||
t -= 1
|
||||
|
||||
return path[::-1]
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class Path:
|
||||
points: List[Point]
|
||||
score: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class BeamState:
|
||||
"""State in beam search."""
|
||||
token_index: int # Current token position
|
||||
time_index: int # Current time step
|
||||
score: float # Cumulative score
|
||||
path: List[Point] # Path history
|
||||
|
||||
|
||||
def backtrack_beam(trellis, emission, tokens, blank_id=0, beam_width=5):
|
||||
"""Standard CTC beam search backtracking implementation.
|
||||
|
||||
Args:
|
||||
trellis (torch.Tensor): The trellis (or lattice) of shape (T, N), where T is the number of time steps
|
||||
and N is the number of tokens (including the blank token).
|
||||
emission (torch.Tensor): The emission probabilities of shape (T, N).
|
||||
tokens (List[int]): List of token indices (excluding the blank token).
|
||||
blank_id (int, optional): The ID of the blank token. Defaults to 0.
|
||||
beam_width (int, optional): The number of top paths to keep during beam search. Defaults to 5.
|
||||
|
||||
Returns:
|
||||
List[Point]: the best path
|
||||
"""
|
||||
T, J = trellis.size(0) - 1, trellis.size(1) - 1
|
||||
|
||||
init_state = BeamState(
|
||||
token_index=J,
|
||||
time_index=T,
|
||||
score=trellis[T, J],
|
||||
path=[Point(J, T, emission[T, blank_id].exp().item())]
|
||||
)
|
||||
|
||||
beams = [init_state]
|
||||
|
||||
while beams and beams[0].token_index > 0:
|
||||
next_beams = []
|
||||
|
||||
for beam in beams:
|
||||
t, j = beam.time_index, beam.token_index
|
||||
|
||||
if t <= 0:
|
||||
continue
|
||||
|
||||
p_stay = emission[t - 1, blank_id]
|
||||
p_change = get_wildcard_emission(emission[t - 1], [tokens[j]], blank_id)[0]
|
||||
|
||||
stay_score = trellis[t - 1, j]
|
||||
change_score = trellis[t - 1, j - 1] if j > 0 else float('-inf')
|
||||
|
||||
# Stay
|
||||
if not math.isinf(stay_score):
|
||||
new_path = beam.path.copy()
|
||||
new_path.append(Point(j, t - 1, p_stay.exp().item()))
|
||||
next_beams.append(BeamState(
|
||||
token_index=j,
|
||||
time_index=t - 1,
|
||||
score=stay_score,
|
||||
path=new_path
|
||||
))
|
||||
|
||||
# Change
|
||||
if j > 0 and not math.isinf(change_score):
|
||||
new_path = beam.path.copy()
|
||||
new_path.append(Point(j - 1, t - 1, p_change.exp().item()))
|
||||
next_beams.append(BeamState(
|
||||
token_index=j - 1,
|
||||
time_index=t - 1,
|
||||
score=change_score,
|
||||
path=new_path
|
||||
))
|
||||
|
||||
# sort by score
|
||||
beams = sorted(next_beams, key=lambda x: x.score, reverse=True)[:beam_width]
|
||||
|
||||
if not beams:
|
||||
break
|
||||
|
||||
if not beams:
|
||||
return None
|
||||
|
||||
best_beam = beams[0]
|
||||
t = best_beam.time_index
|
||||
j = best_beam.token_index
|
||||
while t > 0:
|
||||
prob = emission[t - 1, blank_id].exp().item()
|
||||
best_beam.path.append(Point(j, t - 1, prob))
|
||||
t -= 1
|
||||
|
||||
return best_beam.path[::-1]
|
||||
|
||||
|
||||
# Merge the labels
|
||||
@dataclass
|
||||
class Segment:
|
||||
|
@ -1,6 +1,5 @@
|
||||
import os
|
||||
import warnings
|
||||
from typing import List, NamedTuple, Optional, Union
|
||||
from typing import List, Optional, Union
|
||||
from dataclasses import replace
|
||||
|
||||
import ctranslate2
|
||||
@ -12,9 +11,9 @@ from faster_whisper.transcribe import TranscriptionOptions, get_ctranslate2_stor
|
||||
from transformers import Pipeline
|
||||
from transformers.pipelines.pt_utils import PipelineIterator
|
||||
|
||||
from .audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
|
||||
from .types import SingleSegment, TranscriptionResult
|
||||
from .vad import VoiceActivitySegmentation, load_vad_model, merge_chunks
|
||||
from whisperx.audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
|
||||
from whisperx.types import SingleSegment, TranscriptionResult
|
||||
from whisperx.vads import Vad, Silero, Pyannote
|
||||
|
||||
|
||||
def find_numeral_symbol_tokens(tokenizer):
|
||||
@ -52,6 +51,7 @@ class WhisperModel(faster_whisper.WhisperModel):
|
||||
previous_tokens,
|
||||
without_timestamps=options.without_timestamps,
|
||||
prefix=options.prefix,
|
||||
hotwords=options.hotwords
|
||||
)
|
||||
|
||||
encoder_output = self.encode(features)
|
||||
@ -106,7 +106,7 @@ class FasterWhisperPipeline(Pipeline):
|
||||
def __init__(
|
||||
self,
|
||||
model: WhisperModel,
|
||||
vad: VoiceActivitySegmentation,
|
||||
vad,
|
||||
vad_params: dict,
|
||||
options: TranscriptionOptions,
|
||||
tokenizer: Optional[Tokenizer] = None,
|
||||
@ -208,7 +208,16 @@ class FasterWhisperPipeline(Pipeline):
|
||||
# print(f2-f1)
|
||||
yield {'inputs': audio[f1:f2]}
|
||||
|
||||
vad_segments = self.vad_model({"waveform": torch.from_numpy(audio).unsqueeze(0), "sample_rate": SAMPLE_RATE})
|
||||
# Pre-process audio and merge chunks as defined by the respective VAD child class
|
||||
# In case vad_model is manually assigned (see 'load_model') follow the functionality of pyannote toolkit
|
||||
if issubclass(type(self.vad_model), Vad):
|
||||
waveform = self.vad_model.preprocess_audio(audio)
|
||||
merge_chunks = self.vad_model.merge_chunks
|
||||
else:
|
||||
waveform = Pyannote.preprocess_audio(audio)
|
||||
merge_chunks = Pyannote.merge_chunks
|
||||
|
||||
vad_segments = self.vad_model({"waveform": waveform, "sample_rate": SAMPLE_RATE})
|
||||
vad_segments = merge_chunks(
|
||||
vad_segments,
|
||||
chunk_size,
|
||||
@ -296,7 +305,8 @@ def load_model(
|
||||
compute_type="float16",
|
||||
asr_options: Optional[dict] = None,
|
||||
language: Optional[str] = None,
|
||||
vad_model: Optional[VoiceActivitySegmentation] = None,
|
||||
vad_model: Optional[Vad]= None,
|
||||
vad_method: Optional[str] = "pyannote",
|
||||
vad_options: Optional[dict] = None,
|
||||
model: Optional[WhisperModel] = None,
|
||||
task="transcribe",
|
||||
@ -309,6 +319,7 @@ def load_model(
|
||||
whisper_arch - The name of the Whisper model to load.
|
||||
device - The device to load the model on.
|
||||
compute_type - The compute type to use for the model.
|
||||
vad_method - The vad method to use. vad_model has higher priority if is not None.
|
||||
options - A dictionary of options to use for the model.
|
||||
language - The language of the model. (use English for now)
|
||||
model - The WhisperModel instance to use.
|
||||
@ -374,6 +385,7 @@ def load_model(
|
||||
default_asr_options = TranscriptionOptions(**default_asr_options)
|
||||
|
||||
default_vad_options = {
|
||||
"chunk_size": 30, # needed by silero since binarization happens before merge_chunks
|
||||
"vad_onset": 0.500,
|
||||
"vad_offset": 0.363
|
||||
}
|
||||
@ -381,10 +393,17 @@ def load_model(
|
||||
if vad_options is not None:
|
||||
default_vad_options.update(vad_options)
|
||||
|
||||
# Note: manually assigned vad_model has higher priority than vad_method!
|
||||
if vad_model is not None:
|
||||
print("Use manually assigned vad_model. vad_method is ignored.")
|
||||
vad_model = vad_model
|
||||
else:
|
||||
vad_model = load_vad_model(torch.device(device), use_auth_token=None, **default_vad_options)
|
||||
if vad_method == "silero":
|
||||
vad_model = Silero(**default_vad_options)
|
||||
elif vad_method == "pyannote":
|
||||
vad_model = Pyannote(torch.device(device), use_auth_token=None, **default_vad_options)
|
||||
else:
|
||||
raise ValueError(f"Invalid vad_method: {vad_method}")
|
||||
|
||||
return FasterWhisperPipeline(
|
||||
model=model,
|
||||
|
@ -7,7 +7,7 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .utils import exact_div
|
||||
from whisperx.utils import exact_div
|
||||
|
||||
# hard-coded audio hyperparameters
|
||||
SAMPLE_RATE = 16000
|
||||
|
@ -4,20 +4,21 @@ from pyannote.audio import Pipeline
|
||||
from typing import Optional, Union
|
||||
import torch
|
||||
|
||||
from .audio import load_audio, SAMPLE_RATE
|
||||
from .types import TranscriptionResult, AlignedTranscriptionResult
|
||||
from whisperx.audio import load_audio, SAMPLE_RATE
|
||||
from whisperx.types import TranscriptionResult, AlignedTranscriptionResult
|
||||
|
||||
|
||||
class DiarizationPipeline:
|
||||
def __init__(
|
||||
self,
|
||||
model_name="pyannote/speaker-diarization-3.1",
|
||||
model_name=None,
|
||||
use_auth_token=None,
|
||||
device: Optional[Union[str, torch.device]] = "cpu",
|
||||
):
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)
|
||||
model_config = model_name or "pyannote/speaker-diarization-3.1"
|
||||
self.model = Pipeline.from_pretrained(model_config, use_auth_token=use_auth_token).to(device)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
@ -25,25 +26,81 @@ class DiarizationPipeline:
|
||||
num_speakers: Optional[int] = None,
|
||||
min_speakers: Optional[int] = None,
|
||||
max_speakers: Optional[int] = None,
|
||||
):
|
||||
return_embeddings: bool = False,
|
||||
) -> Union[tuple[pd.DataFrame, Optional[dict[str, list[float]]]], pd.DataFrame]:
|
||||
"""
|
||||
Perform speaker diarization on audio.
|
||||
|
||||
Args:
|
||||
audio: Path to audio file or audio array
|
||||
num_speakers: Exact number of speakers (if known)
|
||||
min_speakers: Minimum number of speakers to detect
|
||||
max_speakers: Maximum number of speakers to detect
|
||||
return_embeddings: Whether to return speaker embeddings
|
||||
|
||||
Returns:
|
||||
If return_embeddings is True:
|
||||
Tuple of (diarization dataframe, speaker embeddings dictionary)
|
||||
Otherwise:
|
||||
Just the diarization dataframe
|
||||
"""
|
||||
if isinstance(audio, str):
|
||||
audio = load_audio(audio)
|
||||
audio_data = {
|
||||
'waveform': torch.from_numpy(audio[None, :]),
|
||||
'sample_rate': SAMPLE_RATE
|
||||
}
|
||||
segments = self.model(audio_data, num_speakers = num_speakers, min_speakers=min_speakers, max_speakers=max_speakers)
|
||||
diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
|
||||
|
||||
if return_embeddings:
|
||||
diarization, embeddings = self.model(
|
||||
audio_data,
|
||||
num_speakers=num_speakers,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
return_embeddings=True,
|
||||
)
|
||||
else:
|
||||
diarization = self.model(
|
||||
audio_data,
|
||||
num_speakers=num_speakers,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
)
|
||||
embeddings = None
|
||||
|
||||
diarize_df = pd.DataFrame(diarization.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
|
||||
diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)
|
||||
diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)
|
||||
return diarize_df
|
||||
|
||||
if return_embeddings and embeddings is not None:
|
||||
speaker_embeddings = {speaker: embeddings[s].tolist() for s, speaker in enumerate(diarization.labels())}
|
||||
return diarize_df, speaker_embeddings
|
||||
|
||||
# For backwards compatibility
|
||||
if return_embeddings:
|
||||
return diarize_df, None
|
||||
else:
|
||||
return diarize_df
|
||||
|
||||
|
||||
def assign_word_speakers(
|
||||
diarize_df: pd.DataFrame,
|
||||
transcript_result: Union[AlignedTranscriptionResult, TranscriptionResult],
|
||||
fill_nearest=False,
|
||||
) -> dict:
|
||||
speaker_embeddings: Optional[dict[str, list[float]]] = None,
|
||||
fill_nearest: bool = False,
|
||||
) -> Union[AlignedTranscriptionResult, TranscriptionResult]:
|
||||
"""
|
||||
Assign speakers to words and segments in the transcript.
|
||||
|
||||
Args:
|
||||
diarize_df: Diarization dataframe from DiarizationPipeline
|
||||
transcript_result: Transcription result to augment with speaker labels
|
||||
speaker_embeddings: Optional dictionary mapping speaker IDs to embedding vectors
|
||||
fill_nearest: If True, assign speakers even when there's no direct time overlap
|
||||
|
||||
Returns:
|
||||
Updated transcript_result with speaker assignments and optionally embeddings
|
||||
"""
|
||||
transcript_segments = transcript_result["segments"]
|
||||
for seg in transcript_segments:
|
||||
# assign speaker to segment (if any)
|
||||
@ -75,11 +132,15 @@ def assign_word_speakers(
|
||||
speaker = dia_tmp.groupby("speaker")["intersection"].sum().sort_values(ascending=False).index[0]
|
||||
word["speaker"] = speaker
|
||||
|
||||
# Add speaker embeddings to the result if provided
|
||||
if speaker_embeddings is not None:
|
||||
transcript_result["speaker_embeddings"] = speaker_embeddings
|
||||
|
||||
return transcript_result
|
||||
|
||||
|
||||
class Segment:
|
||||
def __init__(self, start, end, speaker=None):
|
||||
def __init__(self, start:int, end:int, speaker:Optional[str]=None):
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.speaker = speaker
|
||||
|
@ -6,89 +6,27 @@ import warnings
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from .alignment import align, load_align_model
|
||||
from .asr import load_model
|
||||
from .audio import load_audio
|
||||
from .diarize import DiarizationPipeline, assign_word_speakers
|
||||
from .types import AlignedTranscriptionResult, TranscriptionResult
|
||||
from .utils import (
|
||||
LANGUAGES,
|
||||
TO_LANGUAGE_CODE,
|
||||
get_writer,
|
||||
optional_float,
|
||||
optional_int,
|
||||
str2bool,
|
||||
)
|
||||
from whisperx.alignment import align, load_align_model
|
||||
from whisperx.asr import load_model
|
||||
from whisperx.audio import load_audio
|
||||
from whisperx.diarize import DiarizationPipeline, assign_word_speakers
|
||||
from whisperx.types import AlignedTranscriptionResult, TranscriptionResult
|
||||
from whisperx.utils import LANGUAGES, TO_LANGUAGE_CODE, get_writer
|
||||
|
||||
|
||||
def cli():
|
||||
def transcribe_task(args: dict, parser: argparse.ArgumentParser):
|
||||
"""Transcription task to be called from CLI.
|
||||
|
||||
Args:
|
||||
args: Dictionary of command-line arguments.
|
||||
parser: argparse.ArgumentParser object.
|
||||
"""
|
||||
# fmt: off
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
|
||||
parser.add_argument("--model", default="small", help="name of the Whisper model to use")
|
||||
parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
|
||||
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
|
||||
parser.add_argument("--device_index", default=0, type=int, help="device index to use for FasterWhisper inference")
|
||||
parser.add_argument("--batch_size", default=8, type=int, help="the preferred batch size for inference")
|
||||
parser.add_argument("--compute_type", default="float16", type=str, choices=["float16", "float32", "int8"], help="compute type for computation")
|
||||
|
||||
parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
|
||||
parser.add_argument("--output_format", "-f", type=str, default="all", choices=["all", "srt", "vtt", "txt", "tsv", "json", "aud"], help="format of the output file; if not specified, all available formats will be produced")
|
||||
parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages")
|
||||
|
||||
parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
|
||||
parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection")
|
||||
|
||||
# alignment params
|
||||
parser.add_argument("--align_model", default=None, help="Name of phoneme-level ASR model to do alignment")
|
||||
parser.add_argument("--interpolate_method", default="nearest", choices=["nearest", "linear", "ignore"], help="For word .srt, method to assign timestamps to non-aligned words, or merge them into neighbouring.")
|
||||
parser.add_argument("--no_align", action='store_true', help="Do not perform phoneme alignment")
|
||||
parser.add_argument("--return_char_alignments", action='store_true', help="Return character-level alignments in the output json file")
|
||||
|
||||
# vad params
|
||||
parser.add_argument("--vad_onset", type=float, default=0.500, help="Onset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected")
|
||||
parser.add_argument("--vad_offset", type=float, default=0.363, help="Offset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected.")
|
||||
parser.add_argument("--chunk_size", type=int, default=30, help="Chunk size for merging VAD segments. Default is 30, reduce this if the chunk is too long.")
|
||||
|
||||
# diarization params
|
||||
parser.add_argument("--diarize", action="store_true", help="Apply diarization to assign speaker labels to each segment/word")
|
||||
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
|
||||
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
|
||||
|
||||
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
|
||||
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
|
||||
parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
|
||||
parser.add_argument("--patience", type=float, default=1.0, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
|
||||
parser.add_argument("--length_penalty", type=float, default=1.0, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
|
||||
|
||||
parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
|
||||
parser.add_argument("--suppress_numerals", action="store_true", help="whether to suppress numeric symbols and currency symbols during sampling, since wav2vec2 cannot align them correctly")
|
||||
|
||||
parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
|
||||
parser.add_argument("--condition_on_previous_text", type=str2bool, default=False, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
|
||||
parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
|
||||
|
||||
parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
|
||||
parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
|
||||
parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
|
||||
parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
|
||||
|
||||
parser.add_argument("--max_line_width", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
|
||||
parser.add_argument("--max_line_count", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of lines in a segment")
|
||||
parser.add_argument("--highlight_words", type=str2bool, default=False, help="(not possible with --no_align) underline each word as it is spoken in srt and vtt")
|
||||
parser.add_argument("--segment_resolution", type=str, default="sentence", choices=["sentence", "chunk"], help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
|
||||
|
||||
parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
|
||||
|
||||
parser.add_argument("--hf_token", type=str, default=None, help="Hugging Face Access Token to access PyAnnote gated models")
|
||||
|
||||
parser.add_argument("--print_progress", type=str2bool, default = False, help = "if True, progress will be printed in transcribe() and align() methods.")
|
||||
# fmt: on
|
||||
|
||||
args = parser.parse_args().__dict__
|
||||
model_name: str = args.pop("model")
|
||||
batch_size: int = args.pop("batch_size")
|
||||
model_dir: str = args.pop("model_dir")
|
||||
model_cache_only: bool = args.pop("model_cache_only")
|
||||
output_dir: str = args.pop("output_dir")
|
||||
output_format: str = args.pop("output_format")
|
||||
device: str = args.pop("device")
|
||||
@ -110,6 +48,7 @@ def cli():
|
||||
return_char_alignments: bool = args.pop("return_char_alignments")
|
||||
|
||||
hf_token: str = args.pop("hf_token")
|
||||
vad_method: str = args.pop("vad_method")
|
||||
vad_onset: float = args.pop("vad_onset")
|
||||
vad_offset: float = args.pop("vad_offset")
|
||||
|
||||
@ -118,7 +57,12 @@ def cli():
|
||||
diarize: bool = args.pop("diarize")
|
||||
min_speakers: int = args.pop("min_speakers")
|
||||
max_speakers: int = args.pop("max_speakers")
|
||||
diarize_model_name: str = args.pop("diarize_model")
|
||||
print_progress: bool = args.pop("print_progress")
|
||||
return_speaker_embeddings: bool = args.pop("speaker_embeddings")
|
||||
|
||||
if return_speaker_embeddings and not diarize:
|
||||
warnings.warn("--speaker_embeddings has no effect without --diarize")
|
||||
|
||||
if args["language"] is not None:
|
||||
args["language"] = args["language"].lower()
|
||||
@ -134,7 +78,9 @@ def cli():
|
||||
f"{model_name} is an English-only model but received '{args['language']}'; using English instead."
|
||||
)
|
||||
args["language"] = "en"
|
||||
align_language = args["language"] if args["language"] is not None else "en" # default to loading english if not specified
|
||||
align_language = (
|
||||
args["language"] if args["language"] is not None else "en"
|
||||
) # default to loading english if not specified
|
||||
|
||||
temperature = args.pop("temperature")
|
||||
if (increment := args.pop("temperature_increment_on_fallback")) is not None:
|
||||
@ -175,7 +121,24 @@ def cli():
|
||||
results = []
|
||||
tmp_results = []
|
||||
# model = load_model(model_name, device=device, download_root=model_dir)
|
||||
model = load_model(model_name, device=device, device_index=device_index, download_root=model_dir, compute_type=compute_type, language=args['language'], asr_options=asr_options, vad_options={"vad_onset": vad_onset, "vad_offset": vad_offset}, task=task, threads=faster_whisper_threads)
|
||||
model = load_model(
|
||||
model_name,
|
||||
device=device,
|
||||
device_index=device_index,
|
||||
download_root=model_dir,
|
||||
compute_type=compute_type,
|
||||
language=args["language"],
|
||||
asr_options=asr_options,
|
||||
vad_method=vad_method,
|
||||
vad_options={
|
||||
"chunk_size": chunk_size,
|
||||
"vad_onset": vad_onset,
|
||||
"vad_offset": vad_offset,
|
||||
},
|
||||
task=task,
|
||||
local_files_only=model_cache_only,
|
||||
threads=faster_whisper_threads,
|
||||
)
|
||||
|
||||
for audio_path in args.pop("audio"):
|
||||
audio = load_audio(audio_path)
|
||||
@ -199,7 +162,9 @@ def cli():
|
||||
if not no_align:
|
||||
tmp_results = results
|
||||
results = []
|
||||
align_model, align_metadata = load_align_model(align_language, device, model_name=align_model)
|
||||
align_model, align_metadata = load_align_model(
|
||||
align_language, device, model_name=align_model
|
||||
)
|
||||
for result, audio_path in tmp_results:
|
||||
# >> Align
|
||||
if len(tmp_results) > 1:
|
||||
@ -211,8 +176,12 @@ def cli():
|
||||
if align_model is not None and len(result["segments"]) > 0:
|
||||
if result.get("language", "en") != align_metadata["language"]:
|
||||
# load new language
|
||||
print(f"New language found ({result['language']})! Previous was ({align_metadata['language']}), loading new alignment model for new language...")
|
||||
align_model, align_metadata = load_align_model(result["language"], device)
|
||||
print(
|
||||
f"New language found ({result['language']})! Previous was ({align_metadata['language']}), loading new alignment model for new language..."
|
||||
)
|
||||
align_model, align_metadata = load_align_model(
|
||||
result["language"], device
|
||||
)
|
||||
print(">>Performing alignment...")
|
||||
result: AlignedTranscriptionResult = align(
|
||||
result["segments"],
|
||||
@ -235,19 +204,24 @@ def cli():
|
||||
# >> Diarize
|
||||
if diarize:
|
||||
if hf_token is None:
|
||||
print("Warning, no --hf_token used, needs to be saved in environment variable, otherwise will throw error loading diarization model...")
|
||||
print(
|
||||
"Warning, no --hf_token used, needs to be saved in environment variable, otherwise will throw error loading diarization model..."
|
||||
)
|
||||
tmp_results = results
|
||||
print(">>Performing diarization...")
|
||||
print(">>Using model:", diarize_model_name)
|
||||
results = []
|
||||
diarize_model = DiarizationPipeline(use_auth_token=hf_token, device=device)
|
||||
diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device)
|
||||
for result, input_audio_path in tmp_results:
|
||||
diarize_segments = diarize_model(input_audio_path, min_speakers=min_speakers, max_speakers=max_speakers)
|
||||
result = assign_word_speakers(diarize_segments, result)
|
||||
diarize_segments, speaker_embeddings = diarize_model(
|
||||
input_audio_path,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
return_embeddings=return_speaker_embeddings
|
||||
)
|
||||
result = assign_word_speakers(diarize_segments, result, speaker_embeddings)
|
||||
results.append((result, input_audio_path))
|
||||
# >> Write
|
||||
for result, audio_path in results:
|
||||
result["language"] = align_language
|
||||
writer(result, audio_path, writer_args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import TypedDict, Optional, List
|
||||
from typing import TypedDict, Optional, List, Tuple
|
||||
|
||||
|
||||
class SingleWordSegment(TypedDict):
|
||||
@ -30,6 +30,17 @@ class SingleSegment(TypedDict):
|
||||
text: str
|
||||
|
||||
|
||||
class SegmentData(TypedDict):
|
||||
"""
|
||||
Temporary processing data used during alignment.
|
||||
Contains cleaned and preprocessed data for each segment.
|
||||
"""
|
||||
clean_char: List[str] # Cleaned characters that exist in model dictionary
|
||||
clean_cdx: List[int] # Original indices of cleaned characters
|
||||
clean_wdx: List[int] # Indices of words containing valid characters
|
||||
sentence_spans: List[Tuple[int, int]] # Start and end indices of sentences
|
||||
|
||||
|
||||
class SingleAlignedSegment(TypedDict):
|
||||
"""
|
||||
A single segment (up to multiple sentences) of a speech with word alignment.
|
||||
|
@ -241,7 +241,7 @@ class SubtitlesWriter(ResultWriter):
|
||||
line_count = 1
|
||||
# the next subtitle to yield (a list of word timings with whitespace)
|
||||
subtitle: list[dict] = []
|
||||
times = []
|
||||
times: list[tuple] = []
|
||||
last = result["segments"][0]["start"]
|
||||
for segment in result["segments"]:
|
||||
for i, original_timing in enumerate(segment["words"]):
|
||||
|
3
whisperx/vads/__init__.py
Normal file
3
whisperx/vads/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
from whisperx.vads.pyannote import Pyannote as Pyannote
|
||||
from whisperx.vads.silero import Silero as Silero
|
||||
from whisperx.vads.vad import Vad as Vad
|
@ -1,32 +1,29 @@
|
||||
import hashlib
|
||||
import os
|
||||
import urllib
|
||||
from typing import Callable, Optional, Text, Union
|
||||
from typing import Callable, Text, Union
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
from pyannote.audio import Model
|
||||
from pyannote.audio.core.io import AudioFile
|
||||
from pyannote.audio.pipelines import VoiceActivityDetection
|
||||
from pyannote.audio.pipelines.utils import PipelineModel
|
||||
from pyannote.core import Annotation, Segment, SlidingWindowFeature
|
||||
from tqdm import tqdm
|
||||
from pyannote.core import Annotation, SlidingWindowFeature
|
||||
from pyannote.core import Segment
|
||||
|
||||
from .diarize import Segment as SegmentX
|
||||
from whisperx.diarize import Segment as SegmentX
|
||||
from whisperx.vads.vad import Vad
|
||||
|
||||
# deprecated
|
||||
VAD_SEGMENTATION_URL = "https://whisperx.s3.eu-west-2.amazonaws.com/model_weights/segmentation/0b5b3216d60a2d32fc086b47ea8c67589aaeb26b7e07fcbe620d6d0b83e209ea/pytorch_model.bin"
|
||||
|
||||
def load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=None, model_fp=None):
|
||||
model_dir = torch.hub._get_torch_home()
|
||||
|
||||
vad_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
main_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
os.makedirs(model_dir, exist_ok = True)
|
||||
if model_fp is None:
|
||||
# Dynamically resolve the path to the model file
|
||||
model_fp = os.path.join(vad_dir, "assets", "pytorch_model.bin")
|
||||
model_fp = os.path.join(main_dir, "assets", "pytorch_model.bin")
|
||||
model_fp = os.path.abspath(model_fp) # Ensure the path is absolute
|
||||
else:
|
||||
model_fp = os.path.abspath(model_fp) # Ensure any provided path is absolute
|
||||
@ -39,10 +36,6 @@ def load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=Non
|
||||
raise RuntimeError(f"{model_fp} exists and is not a regular file")
|
||||
|
||||
model_bytes = open(model_fp, "rb").read()
|
||||
if hashlib.sha256(model_bytes).hexdigest() != VAD_SEGMENTATION_URL.split('/')[-2]:
|
||||
raise RuntimeError(
|
||||
"Model has been downloaded but the SHA256 checksum does not match. Please retry loading the model."
|
||||
)
|
||||
|
||||
vad_model = Model.from_pretrained(model_fp, use_auth_token=use_auth_token)
|
||||
hyperparameters = {"onset": vad_onset,
|
||||
@ -88,14 +81,14 @@ class Binarize:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
onset: float = 0.5,
|
||||
offset: Optional[float] = None,
|
||||
min_duration_on: float = 0.0,
|
||||
min_duration_off: float = 0.0,
|
||||
pad_onset: float = 0.0,
|
||||
pad_offset: float = 0.0,
|
||||
max_duration: float = float('inf')
|
||||
self,
|
||||
onset: float = 0.5,
|
||||
offset: Optional[float] = None,
|
||||
min_duration_on: float = 0.0,
|
||||
min_duration_off: float = 0.0,
|
||||
pad_onset: float = 0.0,
|
||||
pad_offset: float = 0.0,
|
||||
max_duration: float = float('inf')
|
||||
):
|
||||
|
||||
super().__init__()
|
||||
@ -151,8 +144,8 @@ class Binarize:
|
||||
region = Segment(start - self.pad_onset, min_score_t + self.pad_offset)
|
||||
active[region, k] = label
|
||||
start = curr_timestamps[min_score_div_idx]
|
||||
curr_scores = curr_scores[min_score_div_idx+1:]
|
||||
curr_timestamps = curr_timestamps[min_score_div_idx+1:]
|
||||
curr_scores = curr_scores[min_score_div_idx + 1:]
|
||||
curr_timestamps = curr_timestamps[min_score_div_idx + 1:]
|
||||
# switching from active to inactive
|
||||
elif y < self.offset:
|
||||
region = Segment(start - self.pad_onset, t + self.pad_offset)
|
||||
@ -193,11 +186,11 @@ class Binarize:
|
||||
|
||||
class VoiceActivitySegmentation(VoiceActivityDetection):
|
||||
def __init__(
|
||||
self,
|
||||
segmentation: PipelineModel = "pyannote/segmentation",
|
||||
fscore: bool = False,
|
||||
use_auth_token: Union[Text, None] = None,
|
||||
**inference_kwargs,
|
||||
self,
|
||||
segmentation: PipelineModel = "pyannote/segmentation",
|
||||
fscore: bool = False,
|
||||
use_auth_token: Union[Text, None] = None,
|
||||
**inference_kwargs,
|
||||
):
|
||||
|
||||
super().__init__(segmentation=segmentation, fscore=fscore, use_auth_token=use_auth_token, **inference_kwargs)
|
||||
@ -236,72 +229,35 @@ class VoiceActivitySegmentation(VoiceActivityDetection):
|
||||
return segmentations
|
||||
|
||||
|
||||
def merge_vad(vad_arr, pad_onset=0.0, pad_offset=0.0, min_duration_off=0.0, min_duration_on=0.0):
|
||||
class Pyannote(Vad):
|
||||
|
||||
active = Annotation()
|
||||
for k, vad_t in enumerate(vad_arr):
|
||||
region = Segment(vad_t[0] - pad_onset, vad_t[1] + pad_offset)
|
||||
active[region, k] = 1
|
||||
def __init__(self, device, use_auth_token=None, model_fp=None, **kwargs):
|
||||
print(">>Performing voice activity detection using Pyannote...")
|
||||
super().__init__(kwargs['vad_onset'])
|
||||
self.vad_pipeline = load_vad_model(device, use_auth_token=use_auth_token, model_fp=model_fp)
|
||||
|
||||
def __call__(self, audio: AudioFile, **kwargs):
|
||||
return self.vad_pipeline(audio)
|
||||
|
||||
if pad_offset > 0.0 or pad_onset > 0.0 or min_duration_off > 0.0:
|
||||
active = active.support(collar=min_duration_off)
|
||||
@staticmethod
|
||||
def preprocess_audio(audio):
|
||||
return torch.from_numpy(audio).unsqueeze(0)
|
||||
|
||||
# remove tracks shorter than min_duration_on
|
||||
if min_duration_on > 0:
|
||||
for segment, track in list(active.itertracks()):
|
||||
if segment.duration < min_duration_on:
|
||||
del active[segment, track]
|
||||
@staticmethod
|
||||
def merge_chunks(segments,
|
||||
chunk_size,
|
||||
onset: float = 0.5,
|
||||
offset: Optional[float] = None,
|
||||
):
|
||||
assert chunk_size > 0
|
||||
binarize = Binarize(max_duration=chunk_size, onset=onset, offset=offset)
|
||||
segments = binarize(segments)
|
||||
segments_list = []
|
||||
for speech_turn in segments.get_timeline():
|
||||
segments_list.append(SegmentX(speech_turn.start, speech_turn.end, "UNKNOWN"))
|
||||
|
||||
active = active.for_json()
|
||||
active_segs = pd.DataFrame([x['segment'] for x in active['content']])
|
||||
return active_segs
|
||||
|
||||
def merge_chunks(
|
||||
segments,
|
||||
chunk_size,
|
||||
onset: float = 0.5,
|
||||
offset: Optional[float] = None,
|
||||
):
|
||||
"""
|
||||
Merge operation described in paper
|
||||
"""
|
||||
curr_end = 0
|
||||
merged_segments = []
|
||||
seg_idxs = []
|
||||
speaker_idxs = []
|
||||
|
||||
assert chunk_size > 0
|
||||
binarize = Binarize(max_duration=chunk_size, onset=onset, offset=offset)
|
||||
segments = binarize(segments)
|
||||
segments_list = []
|
||||
for speech_turn in segments.get_timeline():
|
||||
segments_list.append(SegmentX(speech_turn.start, speech_turn.end, "UNKNOWN"))
|
||||
|
||||
if len(segments_list) == 0:
|
||||
print("No active speech found in audio")
|
||||
return []
|
||||
# assert segments_list, "segments_list is empty."
|
||||
# Make sur the starting point is the start of the segment.
|
||||
curr_start = segments_list[0].start
|
||||
|
||||
for seg in segments_list:
|
||||
if seg.end - curr_start > chunk_size and curr_end-curr_start > 0:
|
||||
merged_segments.append({
|
||||
"start": curr_start,
|
||||
"end": curr_end,
|
||||
"segments": seg_idxs,
|
||||
})
|
||||
curr_start = seg.start
|
||||
seg_idxs = []
|
||||
speaker_idxs = []
|
||||
curr_end = seg.end
|
||||
seg_idxs.append((seg.start, seg.end))
|
||||
speaker_idxs.append(seg.speaker)
|
||||
# add final
|
||||
merged_segments.append({
|
||||
"start": curr_start,
|
||||
"end": curr_end,
|
||||
"segments": seg_idxs,
|
||||
})
|
||||
return merged_segments
|
||||
if len(segments_list) == 0:
|
||||
print("No active speech found in audio")
|
||||
return []
|
||||
assert segments_list, "segments_list is empty."
|
||||
return Vad.merge_chunks(segments_list, chunk_size, onset, offset)
|
66
whisperx/vads/silero.py
Normal file
66
whisperx/vads/silero.py
Normal file
@ -0,0 +1,66 @@
|
||||
from io import IOBase
|
||||
from pathlib import Path
|
||||
from typing import Mapping, Text
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
|
||||
from whisperx.diarize import Segment as SegmentX
|
||||
from whisperx.vads.vad import Vad
|
||||
|
||||
AudioFile = Union[Text, Path, IOBase, Mapping]
|
||||
|
||||
|
||||
class Silero(Vad):
|
||||
# check again default values
|
||||
def __init__(self, **kwargs):
|
||||
print(">>Performing voice activity detection using Silero...")
|
||||
super().__init__(kwargs['vad_onset'])
|
||||
|
||||
self.vad_onset = kwargs['vad_onset']
|
||||
self.chunk_size = kwargs['chunk_size']
|
||||
self.vad_pipeline, vad_utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
|
||||
model='silero_vad',
|
||||
force_reload=False,
|
||||
onnx=False,
|
||||
trust_repo=True)
|
||||
(self.get_speech_timestamps, _, self.read_audio, _, _) = vad_utils
|
||||
|
||||
def __call__(self, audio: AudioFile, **kwargs):
|
||||
"""use silero to get segments of speech"""
|
||||
# Only accept 16000 Hz for now.
|
||||
# Note: Silero models support both 8000 and 16000 Hz. Although other values are not directly supported,
|
||||
# multiples of 16000 (e.g. 32000 or 48000) are cast to 16000 inside of the JIT model!
|
||||
sample_rate = audio["sample_rate"]
|
||||
if sample_rate != 16000:
|
||||
raise ValueError("Only 16000Hz sample rate is allowed")
|
||||
|
||||
timestamps = self.get_speech_timestamps(audio["waveform"],
|
||||
model=self.vad_pipeline,
|
||||
sampling_rate=sample_rate,
|
||||
max_speech_duration_s=self.chunk_size,
|
||||
threshold=self.vad_onset
|
||||
# min_silence_duration_ms = self.min_duration_off/1000
|
||||
# min_speech_duration_ms = self.min_duration_on/1000
|
||||
# ...
|
||||
# See silero documentation for full option list
|
||||
)
|
||||
return [SegmentX(i['start'] / sample_rate, i['end'] / sample_rate, "UNKNOWN") for i in timestamps]
|
||||
|
||||
@staticmethod
|
||||
def preprocess_audio(audio):
|
||||
return audio
|
||||
|
||||
@staticmethod
|
||||
def merge_chunks(segments_list,
|
||||
chunk_size,
|
||||
onset: float = 0.5,
|
||||
offset: Optional[float] = None,
|
||||
):
|
||||
assert chunk_size > 0
|
||||
if len(segments_list) == 0:
|
||||
print("No active speech found in audio")
|
||||
return []
|
||||
assert segments_list, "segments_list is empty."
|
||||
return Vad.merge_chunks(segments_list, chunk_size, onset, offset)
|
74
whisperx/vads/vad.py
Normal file
74
whisperx/vads/vad.py
Normal file
@ -0,0 +1,74 @@
|
||||
from typing import Optional
|
||||
|
||||
import pandas as pd
|
||||
from pyannote.core import Annotation, Segment
|
||||
|
||||
|
||||
class Vad:
|
||||
def __init__(self, vad_onset):
|
||||
if not (0 < vad_onset < 1):
|
||||
raise ValueError(
|
||||
"vad_onset is a decimal value between 0 and 1."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def preprocess_audio(audio):
|
||||
pass
|
||||
|
||||
# keep merge_chunks as static so it can be also used by manually assigned vad_model (see 'load_model')
|
||||
@staticmethod
|
||||
def merge_chunks(segments,
|
||||
chunk_size,
|
||||
onset: float,
|
||||
offset: Optional[float]):
|
||||
"""
|
||||
Merge operation described in paper
|
||||
"""
|
||||
curr_end = 0
|
||||
merged_segments = []
|
||||
seg_idxs: list[tuple]= []
|
||||
speaker_idxs: list[Optional[str]] = []
|
||||
|
||||
curr_start = segments[0].start
|
||||
for seg in segments:
|
||||
if seg.end - curr_start > chunk_size and curr_end - curr_start > 0:
|
||||
merged_segments.append({
|
||||
"start": curr_start,
|
||||
"end": curr_end,
|
||||
"segments": seg_idxs,
|
||||
})
|
||||
curr_start = seg.start
|
||||
seg_idxs = []
|
||||
speaker_idxs = []
|
||||
curr_end = seg.end
|
||||
seg_idxs.append((seg.start, seg.end))
|
||||
speaker_idxs.append(seg.speaker)
|
||||
# add final
|
||||
merged_segments.append({
|
||||
"start": curr_start,
|
||||
"end": curr_end,
|
||||
"segments": seg_idxs,
|
||||
})
|
||||
|
||||
return merged_segments
|
||||
|
||||
# Unused function
|
||||
@staticmethod
|
||||
def merge_vad(vad_arr, pad_onset=0.0, pad_offset=0.0, min_duration_off=0.0, min_duration_on=0.0):
|
||||
active = Annotation()
|
||||
for k, vad_t in enumerate(vad_arr):
|
||||
region = Segment(vad_t[0] - pad_onset, vad_t[1] + pad_offset)
|
||||
active[region, k] = 1
|
||||
|
||||
if pad_offset > 0.0 or pad_onset > 0.0 or min_duration_off > 0.0:
|
||||
active = active.support(collar=min_duration_off)
|
||||
|
||||
# remove tracks shorter than min_duration_on
|
||||
if min_duration_on > 0:
|
||||
for segment, track in list(active.itertracks()):
|
||||
if segment.duration < min_duration_on:
|
||||
del active[segment, track]
|
||||
|
||||
active = active.for_json()
|
||||
active_segs = pd.DataFrame([x['segment'] for x in active['content']])
|
||||
return active_segs
|
Reference in New Issue
Block a user