7 Commits

Author SHA1 Message Date
036b5b0717 Merge c89b4f898f into d700b56c9c 2025-06-13 15:33:03 +02:00
d700b56c9c docs: add missing torch import to Python usage example in README 2025-06-08 03:34:49 -06:00
bog
b343241253 feat: add diarize_model arg to CLI (#1101) 2025-05-31 13:32:31 +02:00
6fe0a8784a docs: add troubleshooting section for libcudnn dependencies in README 2025-05-31 05:20:06 -06:00
c89b4f898f fix: incorrect type annotation in get_writer return value
The audio_path attribute that the __call__ method of the ResultWriter class takes is a str, not TextIO
2025-05-13 02:45:33 +02:00
5012650d0f chore: update lockfile 2025-05-03 16:25:43 +02:00
108bd0c400 chore: add lockfile check step to CI workflows 2025-05-03 16:25:43 +02:00
8 changed files with 37 additions and 8 deletions

View File

@ -17,6 +17,9 @@ jobs:
version: "0.5.14"
python-version: "3.9"
- name: Check if lockfile is up to date
run: uv lock --check
- name: Build package
run: uv build

View File

@ -23,6 +23,9 @@ jobs:
version: "0.5.14"
python-version: ${{ matrix.python-version }}
- name: Check if lockfile is up to date
run: uv lock --check
- name: Install the project
run: uv sync --all-extras

View File

@ -97,6 +97,25 @@ uv sync --all-extras --dev
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
### Common Issues & Troubleshooting 🔧
#### libcudnn Dependencies (GPU Users)
If you're using WhisperX with GPU support and encounter errors like:
- `Could not load library libcudnn_ops_infer.so.8`
- `Unable to load any of {libcudnn_cnn.so.9.1.0, libcudnn_cnn.so.9.1, libcudnn_cnn.so.9, libcudnn_cnn.so}`
- `libcudnn_ops_infer.so.8: cannot open shared object file: No such file or directory`
This means your system is missing the CUDA Deep Neural Network library (cuDNN). This library is needed for GPU acceleration but isn't always installed by default.
**Install cuDNN (example for apt based systems):**
```bash
sudo apt update
sudo apt install libcudnn8 libcudnn8-dev -y
```
### Speaker Diarization
To **enable Speaker Diarization**, include your Hugging Face access token (read) that you can generate from [Here](https://huggingface.co/settings/tokens) after the `--hf_token` argument and accept the user agreement for the following models: [Segmentation](https://huggingface.co/pyannote/segmentation-3.0) and [Speaker-Diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1) (if you choose to use Speaker-Diarization 2.x, follow requirements [here](https://huggingface.co/pyannote/speaker-diarization) instead.)
@ -170,7 +189,7 @@ result = model.transcribe(audio, batch_size=batch_size)
print(result["segments"]) # before alignment
# delete model if low on GPU resources
# import gc; gc.collect(); torch.cuda.empty_cache(); del model
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model
# 2. Align whisper output
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
@ -179,7 +198,7 @@ result = whisperx.align(result["segments"], model_a, metadata, audio, device, re
print(result["segments"]) # after alignment
# delete model if low on GPU resources
# import gc; gc.collect(); torch.cuda.empty_cache(); del model_a
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model_a
# 3. Assign speaker labels
diarize_model = whisperx.diarize.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)

2
uv.lock generated
View File

@ -2787,7 +2787,7 @@ wheels = [
[[package]]
name = "whisperx"
version = "3.3.3"
version = "3.3.4"
source = { editable = "." }
dependencies = [
{ name = "ctranslate2" },

View File

@ -43,6 +43,7 @@ def cli():
parser.add_argument("--diarize", action="store_true", help="Apply diarization to assign speaker labels to each segment/word")
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
parser.add_argument("--diarize_model", default="pyannote/speaker-diarization-3.1", type=str, help="Name of the speaker diarization model to use")
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")

View File

@ -11,13 +11,14 @@ from whisperx.types import TranscriptionResult, AlignedTranscriptionResult
class DiarizationPipeline:
def __init__(
self,
model_name="pyannote/speaker-diarization-3.1",
model_name=None,
use_auth_token=None,
device: Optional[Union[str, torch.device]] = "cpu",
):
if isinstance(device, str):
device = torch.device(device)
self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)
model_config = model_name or "pyannote/speaker-diarization-3.1"
self.model = Pipeline.from_pretrained(model_config, use_auth_token=use_auth_token).to(device)
def __call__(
self,

View File

@ -57,6 +57,7 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
diarize: bool = args.pop("diarize")
min_speakers: int = args.pop("min_speakers")
max_speakers: int = args.pop("max_speakers")
diarize_model_name: str = args.pop("diarize_model")
print_progress: bool = args.pop("print_progress")
if args["language"] is not None:
@ -204,8 +205,9 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
)
tmp_results = results
print(">>Performing diarization...")
print(">>Using model:", diarize_model_name)
results = []
diarize_model = DiarizationPipeline(use_auth_token=hf_token, device=device)
diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device)
for result, input_audio_path in tmp_results:
diarize_segments = diarize_model(
input_audio_path, min_speakers=min_speakers, max_speakers=max_speakers

View File

@ -410,7 +410,7 @@ class WriteJSON(ResultWriter):
def get_writer(
output_format: str, output_dir: str
) -> Callable[[dict, TextIO, dict], None]:
) -> Callable[[dict, str, dict], None]:
writers = {
"txt": WriteTXT,
"vtt": WriteVTT,
@ -425,7 +425,7 @@ def get_writer(
if output_format == "all":
all_writers = [writer(output_dir) for writer in writers.values()]
def write_all(result: dict, file: TextIO, options: dict):
def write_all(result: dict, file: str, options: dict):
for writer in all_writers:
writer(result, file, options)