1 Commits

Author SHA1 Message Date
3dfe6c6ea0 docs: document Docker image usage for WhisperX in README
- Add a new section to the README describing how to use pre-built Docker images for WhisperX with example commands.
- Provide a link to the Docker image repository for available tags.

Signed-off-by: CHEN, CHUN <jim60105@gmail.com>

# Conflicts:
#	README.md
2025-06-14 00:23:19 +08:00
8 changed files with 1592 additions and 1636 deletions

View File

@ -97,6 +97,18 @@ uv sync --all-extras --dev
You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup. You may also need to install ffmpeg, rust etc. Follow openAI instructions here https://github.com/openai/whisper#setup.
### 3. Docker Images
Execute pre-built WhisperX container images:
```bash
docker run --gpus all -it -v ".:/app" ghcr.io/jim60105/whisperx:base-en -- --output_format srt audio.mp3
docker run --gpus all -it -v ".:/app" ghcr.io/jim60105/whisperx:large-v3-ja -- --output_format srt audio.mp3
docker run --gpus all -it -v ".:/app" ghcr.io/jim60105/whisperx:no_model -- --model tiny --language en --output_format srt audio.mp3
```
Review the tag lists in this repository: [jim60105/docker-whisperX](https://github.com/jim60105/docker-whisperX)
### Common Issues & Troubleshooting 🔧 ### Common Issues & Troubleshooting 🔧
#### libcudnn Dependencies (GPU Users) #### libcudnn Dependencies (GPU Users)

View File

@ -2,7 +2,7 @@
urls = { repository = "https://github.com/m-bain/whisperx" } urls = { repository = "https://github.com/m-bain/whisperx" }
authors = [{ name = "Max Bain" }] authors = [{ name = "Max Bain" }]
name = "whisperx" name = "whisperx"
version = "3.4.2" version = "3.3.4"
description = "Time-Accurate Automatic Speech Recognition using Whisper." description = "Time-Accurate Automatic Speech Recognition using Whisper."
readme = "README.md" readme = "README.md"
requires-python = ">=3.9, <3.13" requires-python = ">=3.9, <3.13"

3095
uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,6 @@ def cli():
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file") parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file") parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
parser.add_argument("--diarize_model", default="pyannote/speaker-diarization-3.1", type=str, help="Name of the speaker diarization model to use") parser.add_argument("--diarize_model", default="pyannote/speaker-diarization-3.1", type=str, help="Name of the speaker diarization model to use")
parser.add_argument("--speaker_embeddings", action="store_true", help="Include speaker embeddings in JSON output (only works with --diarize)")
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling") parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature") parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")

View File

@ -424,7 +424,7 @@ def get_wildcard_emission(frame_emission, tokens, blank_id):
wildcard_mask = (tokens == -1) wildcard_mask = (tokens == -1)
# Get scores for non-wildcard positions # Get scores for non-wildcard positions
regular_scores = frame_emission[tokens.clamp(min=0).long()] # clamp to avoid -1 index regular_scores = frame_emission[tokens.clamp(min=0)] # clamp to avoid -1 index
# Create a mask and compute the maximum value without modifying frame_emission # Create a mask and compute the maximum value without modifying frame_emission
max_valid_score = frame_emission.clone() # Create a copy max_valid_score = frame_emission.clone() # Create a copy

View File

@ -26,81 +26,25 @@ class DiarizationPipeline:
num_speakers: Optional[int] = None, num_speakers: Optional[int] = None,
min_speakers: Optional[int] = None, min_speakers: Optional[int] = None,
max_speakers: Optional[int] = None, max_speakers: Optional[int] = None,
return_embeddings: bool = False, ):
) -> Union[tuple[pd.DataFrame, Optional[dict[str, list[float]]]], pd.DataFrame]:
"""
Perform speaker diarization on audio.
Args:
audio: Path to audio file or audio array
num_speakers: Exact number of speakers (if known)
min_speakers: Minimum number of speakers to detect
max_speakers: Maximum number of speakers to detect
return_embeddings: Whether to return speaker embeddings
Returns:
If return_embeddings is True:
Tuple of (diarization dataframe, speaker embeddings dictionary)
Otherwise:
Just the diarization dataframe
"""
if isinstance(audio, str): if isinstance(audio, str):
audio = load_audio(audio) audio = load_audio(audio)
audio_data = { audio_data = {
'waveform': torch.from_numpy(audio[None, :]), 'waveform': torch.from_numpy(audio[None, :]),
'sample_rate': SAMPLE_RATE 'sample_rate': SAMPLE_RATE
} }
segments = self.model(audio_data, num_speakers = num_speakers, min_speakers=min_speakers, max_speakers=max_speakers)
if return_embeddings: diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
diarization, embeddings = self.model(
audio_data,
num_speakers=num_speakers,
min_speakers=min_speakers,
max_speakers=max_speakers,
return_embeddings=True,
)
else:
diarization = self.model(
audio_data,
num_speakers=num_speakers,
min_speakers=min_speakers,
max_speakers=max_speakers,
)
embeddings = None
diarize_df = pd.DataFrame(diarization.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])
diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start) diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)
diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end) diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)
return diarize_df
if return_embeddings and embeddings is not None:
speaker_embeddings = {speaker: embeddings[s].tolist() for s, speaker in enumerate(diarization.labels())}
return diarize_df, speaker_embeddings
# For backwards compatibility
if return_embeddings:
return diarize_df, None
else:
return diarize_df
def assign_word_speakers( def assign_word_speakers(
diarize_df: pd.DataFrame, diarize_df: pd.DataFrame,
transcript_result: Union[AlignedTranscriptionResult, TranscriptionResult], transcript_result: Union[AlignedTranscriptionResult, TranscriptionResult],
speaker_embeddings: Optional[dict[str, list[float]]] = None, fill_nearest=False,
fill_nearest: bool = False, ) -> dict:
) -> Union[AlignedTranscriptionResult, TranscriptionResult]:
"""
Assign speakers to words and segments in the transcript.
Args:
diarize_df: Diarization dataframe from DiarizationPipeline
transcript_result: Transcription result to augment with speaker labels
speaker_embeddings: Optional dictionary mapping speaker IDs to embedding vectors
fill_nearest: If True, assign speakers even when there's no direct time overlap
Returns:
Updated transcript_result with speaker assignments and optionally embeddings
"""
transcript_segments = transcript_result["segments"] transcript_segments = transcript_result["segments"]
for seg in transcript_segments: for seg in transcript_segments:
# assign speaker to segment (if any) # assign speaker to segment (if any)
@ -132,10 +76,6 @@ def assign_word_speakers(
speaker = dia_tmp.groupby("speaker")["intersection"].sum().sort_values(ascending=False).index[0] speaker = dia_tmp.groupby("speaker")["intersection"].sum().sort_values(ascending=False).index[0]
word["speaker"] = speaker word["speaker"] = speaker
# Add speaker embeddings to the result if provided
if speaker_embeddings is not None:
transcript_result["speaker_embeddings"] = speaker_embeddings
return transcript_result return transcript_result

View File

@ -59,10 +59,6 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
max_speakers: int = args.pop("max_speakers") max_speakers: int = args.pop("max_speakers")
diarize_model_name: str = args.pop("diarize_model") diarize_model_name: str = args.pop("diarize_model")
print_progress: bool = args.pop("print_progress") print_progress: bool = args.pop("print_progress")
return_speaker_embeddings: bool = args.pop("speaker_embeddings")
if return_speaker_embeddings and not diarize:
warnings.warn("--speaker_embeddings has no effect without --diarize")
if args["language"] is not None: if args["language"] is not None:
args["language"] = args["language"].lower() args["language"] = args["language"].lower()
@ -213,20 +209,10 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
results = [] results = []
diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device) diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device)
for result, input_audio_path in tmp_results: for result, input_audio_path in tmp_results:
diarize_result = diarize_model( diarize_segments = diarize_model(
input_audio_path, input_audio_path, min_speakers=min_speakers, max_speakers=max_speakers
min_speakers=min_speakers,
max_speakers=max_speakers,
return_embeddings=return_speaker_embeddings
) )
result = assign_word_speakers(diarize_segments, result)
if return_speaker_embeddings:
diarize_segments, speaker_embeddings = diarize_result
else:
diarize_segments = diarize_result
speaker_embeddings = None
result = assign_word_speakers(diarize_segments, result, speaker_embeddings)
results.append((result, input_audio_path)) results.append((result, input_audio_path))
# >> Write # >> Write
for result, audio_path in results: for result, audio_path in results:

View File

@ -52,3 +52,23 @@ class Vad:
return merged_segments return merged_segments
# Unused function
@staticmethod
def merge_vad(vad_arr, pad_onset=0.0, pad_offset=0.0, min_duration_off=0.0, min_duration_on=0.0):
active = Annotation()
for k, vad_t in enumerate(vad_arr):
region = Segment(vad_t[0] - pad_onset, vad_t[1] + pad_offset)
active[region, k] = 1
if pad_offset > 0.0 or pad_onset > 0.0 or min_duration_off > 0.0:
active = active.support(collar=min_duration_off)
# remove tracks shorter than min_duration_on
if min_duration_on > 0:
for segment, track in list(active.itertracks()):
if segment.duration < min_duration_on:
del active[segment, track]
active = active.for_json()
active_segs = pd.DataFrame([x['segment'] for x in active['content']])
return active_segs