3 Commits

Author SHA1 Message Date
429658d4cc chore: bump version to 3.4.2 2025-06-27 07:18:39 +00:00
e0833da5dc Fix: Ensure integer tensor indexing in get_wildcard_emission() 2025-06-27 09:17:44 +02:00
ffedc5cdf0 fix: speaker embedding bug (#1178)
* fix: improve handling of speaker embeddings in transcribe_task

* chore: bump version to 3.4.1
2025-06-25 13:55:20 +02:00
4 changed files with 11 additions and 4 deletions

View File

@ -2,7 +2,7 @@
urls = { repository = "https://github.com/m-bain/whisperx" }
authors = [{ name = "Max Bain" }]
name = "whisperx"
version = "3.4.0"
version = "3.4.2"
description = "Time-Accurate Automatic Speech Recognition using Whisper."
readme = "README.md"
requires-python = ">=3.9, <3.13"

2
uv.lock generated
View File

@ -2788,7 +2788,7 @@ wheels = [
[[package]]
name = "whisperx"
version = "3.4.0"
version = "3.4.2"
source = { editable = "." }
dependencies = [
{ name = "ctranslate2" },

View File

@ -424,7 +424,7 @@ def get_wildcard_emission(frame_emission, tokens, blank_id):
wildcard_mask = (tokens == -1)
# Get scores for non-wildcard positions
regular_scores = frame_emission[tokens.clamp(min=0)] # clamp to avoid -1 index
regular_scores = frame_emission[tokens.clamp(min=0).long()] # clamp to avoid -1 index
# Create a mask and compute the maximum value without modifying frame_emission
max_valid_score = frame_emission.clone() # Create a copy

View File

@ -213,12 +213,19 @@ def transcribe_task(args: dict, parser: argparse.ArgumentParser):
results = []
diarize_model = DiarizationPipeline(model_name=diarize_model_name, use_auth_token=hf_token, device=device)
for result, input_audio_path in tmp_results:
diarize_segments, speaker_embeddings = diarize_model(
diarize_result = diarize_model(
input_audio_path,
min_speakers=min_speakers,
max_speakers=max_speakers,
return_embeddings=return_speaker_embeddings
)
if return_speaker_embeddings:
diarize_segments, speaker_embeddings = diarize_result
else:
diarize_segments = diarize_result
speaker_embeddings = None
result = assign_word_speakers(diarize_segments, result, speaker_embeddings)
results.append((result, input_audio_path))
# >> Write