mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
Compare commits
4 Commits
afbf00018c
...
399010fd12
Author | SHA1 | Date | |
---|---|---|---|
399010fd12 | |||
d3dcb1175f | |||
4f99f1f67c | |||
d700b56c9c |
23
README.md
23
README.md
@ -97,25 +97,6 @@ uv sync --all-extras --dev
|
|||||||
|
|
||||||
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
|
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
|
||||||
|
|
||||||
### Common Issues & Troubleshooting 🔧
|
|
||||||
|
|
||||||
#### libcudnn Dependencies (GPU Users)
|
|
||||||
|
|
||||||
If you're using WhisperX with GPU support and encounter errors like:
|
|
||||||
|
|
||||||
- `Could not load library libcudnn_ops_infer.so.8`
|
|
||||||
- `Unable to load any of {libcudnn_cnn.so.9.1.0, libcudnn_cnn.so.9.1, libcudnn_cnn.so.9, libcudnn_cnn.so}`
|
|
||||||
- `libcudnn_ops_infer.so.8: cannot open shared object file: No such file or directory`
|
|
||||||
|
|
||||||
This means your system is missing the CUDA Deep Neural Network library (cuDNN). This library is needed for GPU acceleration but isn't always installed by default.
|
|
||||||
|
|
||||||
**Install cuDNN (example for apt based systems):**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install libcudnn8 libcudnn8-dev -y
|
|
||||||
```
|
|
||||||
|
|
||||||
### Speaker Diarization
|
### Speaker Diarization
|
||||||
|
|
||||||
To **enable Speaker Diarization**, include your Hugging Face access token (read) that you can generate from [Here](https://huggingface.co/settings/tokens) after the `--hf_token` argument and accept the user agreement for the following models: [Segmentation](https://huggingface.co/pyannote/segmentation-3.0) and [Speaker-Diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1) (if you choose to use Speaker-Diarization 2.x, follow requirements [here](https://huggingface.co/pyannote/speaker-diarization) instead.)
|
To **enable Speaker Diarization**, include your Hugging Face access token (read) that you can generate from [Here](https://huggingface.co/settings/tokens) after the `--hf_token` argument and accept the user agreement for the following models: [Segmentation](https://huggingface.co/pyannote/segmentation-3.0) and [Speaker-Diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1) (if you choose to use Speaker-Diarization 2.x, follow requirements [here](https://huggingface.co/pyannote/speaker-diarization) instead.)
|
||||||
@ -189,7 +170,7 @@ result = model.transcribe(audio, batch_size=batch_size)
|
|||||||
print(result["segments"]) # before alignment
|
print(result["segments"]) # before alignment
|
||||||
|
|
||||||
# delete model if low on GPU resources
|
# delete model if low on GPU resources
|
||||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model
|
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model
|
||||||
|
|
||||||
# 2. Align whisper output
|
# 2. Align whisper output
|
||||||
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
|
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
|
||||||
@ -198,7 +179,7 @@ result = whisperx.align(result["segments"], model_a, metadata, audio, device, re
|
|||||||
print(result["segments"]) # after alignment
|
print(result["segments"]) # after alignment
|
||||||
|
|
||||||
# delete model if low on GPU resources
|
# delete model if low on GPU resources
|
||||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model_a
|
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model_a
|
||||||
|
|
||||||
# 3. Assign speaker labels
|
# 3. Assign speaker labels
|
||||||
diarize_model = whisperx.diarize.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
|
diarize_model = whisperx.diarize.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
|
||||||
|
@ -13,11 +13,11 @@ dependencies = [
|
|||||||
"faster-whisper>=1.1.1",
|
"faster-whisper>=1.1.1",
|
||||||
"nltk>=3.9.1",
|
"nltk>=3.9.1",
|
||||||
"numpy>=2.0.2",
|
"numpy>=2.0.2",
|
||||||
"onnxruntime>=1.19",
|
"onnxruntime>=1.19,<1.20.0",
|
||||||
"pandas>=2.2.3",
|
"pandas>=2.2.3",
|
||||||
"pyannote-audio>=3.3.2",
|
"pyannote-audio>=3.3.2",
|
||||||
"torch>=2.5.1",
|
"torch<2.4.0",
|
||||||
"torchaudio>=2.5.1",
|
"torchaudio",
|
||||||
"transformers>=4.48.0",
|
"transformers>=4.48.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user