mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
Compare commits
12 Commits
v3.3.4
...
429658d4cc
Author | SHA1 | Date | |
---|---|---|---|
429658d4cc | |||
e0833da5dc | |||
ffedc5cdf0 | |||
b93e9b6f57 | |||
844736e4e4 | |||
220fec9aea | |||
1631c3040f | |||
d700b56c9c | |||
b343241253 | |||
6fe0a8784a | |||
5012650d0f | |||
108bd0c400 |
3
.github/workflows/build-and-release.yml
vendored
3
.github/workflows/build-and-release.yml
vendored
@ -17,6 +17,9 @@ jobs:
|
||||
version: "0.5.14"
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Check if lockfile is up to date
|
||||
run: uv lock --check
|
||||
|
||||
- name: Build package
|
||||
run: uv build
|
||||
|
||||
|
3
.github/workflows/python-compatibility.yml
vendored
3
.github/workflows/python-compatibility.yml
vendored
@ -23,6 +23,9 @@ jobs:
|
||||
version: "0.5.14"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Check if lockfile is up to date
|
||||
run: uv lock --check
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras
|
||||
|
||||
|
23
README.md
23
README.md
@ -97,6 +97,25 @@ uv sync --all-extras --dev
|
||||
|
||||
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
|
||||
|
||||
### Common Issues & Troubleshooting 🔧
|
||||
|
||||
#### libcudnn Dependencies (GPU Users)
|
||||
|
||||
If you're using WhisperX with GPU support and encounter errors like:
|
||||
|
||||
- `Could not load library libcudnn_ops_infer.so.8`
|
||||
- `Unable to load any of {libcudnn_cnn.so.9.1.0, libcudnn_cnn.so.9.1, libcudnn_cnn.so.9, libcudnn_cnn.so}`
|
||||
- `libcudnn_ops_infer.so.8: cannot open shared object file: No such file or directory`
|
||||
|
||||
This means your system is missing the CUDA Deep Neural Network library (cuDNN). This library is needed for GPU acceleration but isn't always installed by default.
|
||||
|
||||
**Install cuDNN (example for apt based systems):**
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install libcudnn8 libcudnn8-dev -y
|
||||
```
|
||||
|
||||
### Speaker Diarization
|
||||
|
||||
To **enable Speaker Diarization**, include your Hugging Face access token (read) that you can generate from [Here](https://huggingface.co/settings/tokens) after the `--hf_token` argument and accept the user agreement for the following models: [Segmentation](https://huggingface.co/pyannote/segmentation-3.0) and [Speaker-Diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1) (if you choose to use Speaker-Diarization 2.x, follow requirements [here](https://huggingface.co/pyannote/speaker-diarization) instead.)
|
||||
@ -170,7 +189,7 @@ result = model.transcribe(audio, batch_size=batch_size)
|
||||
print(result["segments"]) # before alignment
|
||||
|
||||
# delete model if low on GPU resources
|
||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model
|
||||
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model
|
||||
|
||||
# 2. Align whisper output
|
||||
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
|
||||
@ -179,7 +198,7 @@ result = whisperx.align(result["segments"], model_a, metadata, audio, device, re
|
||||
print(result["segments"]) # after alignment
|
||||
|
||||
# delete model if low on GPU resources
|
||||
# import gc; gc.collect(); torch.cuda.empty_cache(); del model_a
|
||||
# import gc; import torch; gc.collect(); torch.cuda.empty_cache(); del model_a
|
||||
|
||||
# 3. Assign speaker labels
|
||||
diarize_model = whisperx.diarize.DiarizationPipeline(use_auth_token=YOUR_HF_TOKEN, device=device)
|
||||
|
@ -2,7 +2,7 @@
|
||||
urls = { repository = "https://github.com/m-bain/whisperx" }
|
||||
authors = [{ name = "Max Bain" }]
|
||||
name = "whisperx"
|
||||
version = "3.3.4"
|
||||
version = "3.4.2"
|
||||
description = "Time-Accurate Automatic Speech Recognition using Whisper."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9, <3.13"
|
||||
|
@ -43,6 +43,8 @@ def cli():
|
||||
parser.add_argument("--diarize", action="store_true", help="Apply diarization to assign speaker labels to each segment/word")
|
||||
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
|
||||
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
|
||||
parser.add_argument("--diarize_model", default="pyannote/speaker-diarization-3.1", type=str, help="Name of the speaker diarization model to use")
|
||||
parser.add_argument("--speaker_embeddings", action="store_true", help="Include speaker embeddings in JSON output (only works with --diarize)")
|
||||
|
||||
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
|
||||
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
|
||||
|
@ -424,7 +424,7 @@ def get_wildcard_emission(frame_emission, tokens, blank_id):
|
||||
wildcard_mask = (tokens == -1)
|
||||
|
||||
# Get scores for non-wildcard positions
|
||||
regular_scores = frame_emission[tokens.clamp(min=0)] # clamp to avoid -1 index
|
||||
regular_scores = frame_emission[tokens.clamp(min=0).long()] # clamp to avoid -1 index
|
||||
|
||||
# Create a mask and compute the maximum value without modifying frame_emission
|
||||
max_valid_score = frame_emission.clone() # Create a copy
|
||||
|
@ -11,13 +11,14 @@ from whisperx.types import TranscriptionResult, AlignedTranscriptionResult
|
||||
class DiarizationPipeline:
|
||||
def __init__(
|
||||
self,
|
||||
model_name="pyannote/speaker-diarization-3.1",
|
||||
model_name=None,
|
||||
use_auth_token=None,
|
||||
device: Optional[Union[str, torch.device]] = "cpu",
|
||||
):
|
||||
if isinstance(device, str):
|
||||
device = torch.device(device)
|
||||
self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)
|
||||
model_config = model_name or "pyannote/speaker-diarization-3.1"
|
||||
self.model = Pipeline.from_pretrained(model_config, use_auth_token=use_auth_token).to(device)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
@ -25,25 +26,81 @@ class DiarizationPipeline:
|
||||
num_speakers: Optional[int] = None,
|
||||
min_speakers: Optional[int] = None,
|
||||
max_speakers: Optional[int] = None,
|
||||
):
|
||||
return_embeddings: bool = False,
|
||||
) -> Union[tuple[pd.DataFrame, Optional[dict[str, list[float]]]], pd.DataFrame]:
|
||||
"""
|
||||
Perform speaker diarization on audio.
|
||||
|
||||
Args:
|
||||
audio: Path to audio file or audio array
|
||||
num_speakers: Exact number of speakers (if known)
|
||||
min_speakers: Minimum number of speakers to detect
|
||||
max_speakers: Maximum number of speakers to detect
|
||||
return_embeddings: Whether to return speaker embeddings
|
||||
|
||||
Returns:
|
||||
If return_embeddings is True:
|
||||
Tuple of (diarization dataframe, speaker embeddings dictionary)
|
||||
Otherwise:
|
||||
Just the diarization dataframe
|
||||
"""
|
||||
if isinstance(audio, str):
|
||||
audio = load_audio(audio)
|
||||
audio_data = {
|
||||
'waveform': torch.from_numpy(audio[None, :]),
|
||||
'sample_rate': SAMPLE_RATE
|
||||
}
|
||||
segments = self.model(audio_data, num_speakers = num_speakers, min_speakers=min_speakers, max_speakers=max_speakers)
|
||||
diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
|
||||
|
||||
if return_embeddings:
|
||||
diarization, embeddings = self.model(
|
||||
audio_data,
|
||||
num_speakers=num_speakers,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
return_embeddings=True,
|
||||
)
|
||||
else:
|
||||
diarization = self.model(
|
||||
audio_data,
|
||||
num_speakers=num_speakers,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
)
|
||||
embeddings = None
|
||||
|
||||
diarize_df = pd.DataFrame(diarization.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
|
||||
diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)
|
||||
diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)
|
||||
return diarize_df
|
||||
|
||||
if return_embeddings and embeddings is not None:
|
||||
speaker_embeddings = {speaker: embeddings[s].tolist() for s, speaker in enumerate(diarization.labels())}
|
||||
return diarize_df, speaker_embeddings
|
||||
|
||||
# For backwards compatibility
|
||||
if return_embeddings:
|
||||
return diarize_df, None
|
||||
else:
|
||||
return diarize_df
|
||||
|
||||
|
||||
def assign_word_speakers(
|
||||
diarize_df: pd.DataFrame,
|
||||
transcript_result: Union[AlignedTranscriptionResult, TranscriptionResult],
|
||||
fill_nearest=False,
|
||||
) -> dict:
|
||||
speaker_embeddings: Optional[dict[str, list[float]]] = None,
|
||||
fill_nearest: bool = False,
|
||||
) -> Union[AlignedTranscriptionResult, TranscriptionResult]:
|
||||
"""
|
||||
Assign speakers to words and segments in the transcript.
|
||||
|
||||
Args:
|
||||
diarize_df: Diarization dataframe from DiarizationPipeline
|
||||
transcript_result: Transcription result to augment with speaker labels
|
||||
speaker_embeddings: Optional dictionary mapping speaker IDs to embedding vectors
|
||||
fill_nearest: If True, assign speakers even when there's no direct time overlap
|
||||
|
||||
Returns:
|
||||
Updated transcript_result with speaker assignments and optionally embeddings
|
||||
"""
|
||||
transcript_segments = transcript_result["segments"]
|
||||
for seg in transcript_segments:
|
||||
# assign speaker to segment (if any)
|
||||
@ -74,8 +131,12 @@ def assign_word_speakers(
|
||||
# sum over speakers
|
||||
speaker = dia_tmp.groupby("speaker")["intersection"].sum().sort_values(ascending=False).index[0]
|
||||
word["speaker"] = speaker
|
||||
|
||||
return transcript_result
|
||||
|
||||
# Add speaker embeddings to the result if provided
|
||||
if speaker_embeddings is not None:
|
||||
transcript_result["speaker_embeddings"] = speaker_embeddings
|
||||
|
||||
return transcript_result
|
||||
|
||||
|
||||
class Segment:
|
||||
|
@ -57,7 +57,12 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
|
||||
diarize: bool = args.pop("diarize")
|
||||
min_speakers: int = args.pop("min_speakers")
|
||||
max_speakers: int = args.pop("max_speakers")
|
||||
diarize_model_name: str = args.pop("diarize_model")
|
||||
print_progress: bool = args.pop("print_progress")
|
||||
return_speaker_embeddings: bool = args.pop("speaker_embeddings")
|
||||
|
||||
if return_speaker_embeddings and not diarize:
|
||||
warnings.warn("--speaker_embeddings has no effect without --diarize")
|
||||
|
||||
if args["language"] is not None:
|
||||
args["language"] = args["language"].lower()
|
||||
@ -204,13 +209,24 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
|
||||
)
|
||||
tmp_results = results
|
||||
print(">>Performing diarization...")
|
||||
print(">>Using model:", diarize_model_name)
|
||||
results = []
|
||||
diarize_model = DiarizationPipeline(use_auth_token=hf_token, device=device)
|
||||
diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device)
|
||||
for result, input_audio_path in tmp_results:
|
||||
diarize_segments = diarize_model(
|
||||
input_audio_path, min_speakers=min_speakers, max_speakers=max_speakers
|
||||
diarize_result = diarize_model(
|
||||
input_audio_path,
|
||||
min_speakers=min_speakers,
|
||||
max_speakers=max_speakers,
|
||||
return_embeddings=return_speaker_embeddings
|
||||
)
|
||||
result = assign_word_speakers(diarize_segments, result)
|
||||
|
||||
if return_speaker_embeddings:
|
||||
diarize_segments, speaker_embeddings = diarize_result
|
||||
else:
|
||||
diarize_segments = diarize_result
|
||||
speaker_embeddings = None
|
||||
|
||||
result = assign_word_speakers(diarize_segments, result, speaker_embeddings)
|
||||
results.append((result, input_audio_path))
|
||||
# >> Write
|
||||
for result, audio_path in results:
|
||||
|
Reference in New Issue
Block a user