Files
whisperX/whisperx/asr.py

373 lines
15 KiB
Python
Raw Permalink Normal View History

2023-04-24 21:08:43 +01:00
import os
from typing import List, Optional, Union
2025-01-07 12:35:41 -05:00
from dataclasses import replace
2023-04-24 21:08:43 +01:00
import ctranslate2
import faster_whisper
2023-03-30 05:31:57 +01:00
import numpy as np
import torch
from faster_whisper.tokenizer import Tokenizer
2025-01-05 11:26:18 +01:00
from faster_whisper.transcribe import TranscriptionOptions, get_ctranslate2_storage
2023-04-24 21:08:43 +01:00
from transformers import Pipeline
from transformers.pipelines.pt_utils import PipelineIterator
from .audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
2025-01-05 11:26:18 +01:00
from .types import SingleSegment, TranscriptionResult
from .vads import Vad, Silero, Pyannote
2023-04-24 21:08:43 +01:00
def find_numeral_symbol_tokens(tokenizer):
"""
Finds tokens that represent numeral and symbols.
"""
numeral_symbol_tokens = []
for i in range(tokenizer.eot):
token = tokenizer.decode([i]).removeprefix(" ")
2023-06-05 15:44:17 +01:00
has_numeral_symbol = any(c in "0123456789%" for c in token)
if has_numeral_symbol:
numeral_symbol_tokens.append(i)
return numeral_symbol_tokens
2023-04-24 21:08:43 +01:00
class WhisperModel(faster_whisper.WhisperModel):
"""
Wrapper around faster-whisper's WhisperModel to enable batched inference.
Currently, it only supports non-timestamp mode and a fixed prompt for all samples in a batch.
"""
2023-04-24 21:08:43 +01:00
def generate_segment_batched(
self,
features: np.ndarray,
tokenizer: Tokenizer,
options: TranscriptionOptions,
encoder_output=None,
):
"""
Generates transcription for a batch of audio segments.
Args:
features: The input audio features.
tokenizer: The tokenizer used to decode the generated tokens.
options: Transcription options.
encoder_output: Output from the encoder model.
Returns:
The decoded transcription text.
"""
2023-04-24 21:08:43 +01:00
batch_size = features.shape[0]
# Initialize tokens and prompt for the generation process.
2023-04-24 21:08:43 +01:00
all_tokens = []
prompt_reset_since = 0
# Check if an initial prompt is provided and handle it.
2023-04-24 21:08:43 +01:00
if options.initial_prompt is not None:
initial_prompt = " " + options.initial_prompt.strip()
initial_prompt_tokens = tokenizer.encode(initial_prompt)
all_tokens.extend(initial_prompt_tokens)
# Prepare the prompt for the current batch.
2023-04-24 21:08:43 +01:00
previous_tokens = all_tokens[prompt_reset_since:]
prompt = self.get_prompt(
tokenizer,
previous_tokens,
without_timestamps=options.without_timestamps,
prefix=options.prefix,
)
# Encode the features to obtain the encoder output.
2023-04-24 21:08:43 +01:00
encoder_output = self.encode(features)
2023-03-30 05:31:57 +01:00
# Determine the maximum initial timestamp index based on the options.
2023-04-24 21:08:43 +01:00
max_initial_timestamp_index = int(
round(options.max_initial_timestamp / self.time_precision)
)
2023-03-30 05:31:57 +01:00
# Generate the transcription result for the batch.
2023-04-24 21:08:43 +01:00
result = self.model.generate(
encoder_output,
[prompt] * batch_size,
beam_size=options.beam_size,
patience=options.patience,
length_penalty=options.length_penalty,
max_length=self.max_length,
suppress_blank=options.suppress_blank,
suppress_tokens=options.suppress_tokens,
)
2023-05-27 11:38:54 +02:00
# Extract the token sequences from the result.
2023-04-24 21:08:43 +01:00
tokens_batch = [x.sequences_ids[0] for x in result]
# Define an inner function to decode the tokens for each batch.
2023-04-24 21:08:43 +01:00
def decode_batch(tokens: List[List[int]]) -> str:
res = []
for tk in tokens:
res.append([token for token in tk if token < tokenizer.eot])
return tokenizer.tokenizer.decode_batch(res)
# Decode the tokens to get the transcription text.
2023-04-24 21:08:43 +01:00
text = decode_batch(tokens_batch)
return text
2023-05-27 11:38:54 +02:00
2023-04-24 21:08:43 +01:00
def encode(self, features: np.ndarray) -> ctranslate2.StorageView:
"""
Encodes the audio features using the CTranslate2 storage.
When the model is running on multiple GPUs, the encoder output should be moved
to the CPU since we don't know which GPU will handle the next job.
"""
# When the model is running on multiple GPUs, the encoder output should be moved to the CPU.
2023-04-24 21:08:43 +01:00
to_cpu = self.model.device == "cuda" and len(self.model.device_index) > 1
# If the batch size is 1, unsqueeze the features to ensure it is a 3D array.
2023-04-24 21:08:43 +01:00
if len(features.shape) == 2:
features = np.expand_dims(features, 0)
features = get_ctranslate2_storage(features)
# call the model
2023-04-24 21:08:43 +01:00
return self.model.encode(features, to_cpu=to_cpu)
2023-05-27 11:38:54 +02:00
2023-04-24 21:08:43 +01:00
def get_iterator(
2025-01-05 11:26:18 +01:00
self,
inputs,
num_workers: int,
batch_size: int,
preprocess_params: dict,
forward_params: dict,
postprocess_params: dict,
2023-04-24 21:08:43 +01:00
):
dataset = PipelineIterator(inputs, self.preprocess, preprocess_params)
if "TOKENIZERS_PARALLELISM" not in os.environ:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# TODO hack by collating feature_extractor and image_processor
def stack(items):
return {'inputs': torch.stack([x['inputs'] for x in items])}
dataloader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=stack)
model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
return final_iterator
def transcribe(
2025-01-05 11:26:18 +01:00
self,
audio: Union[str, np.ndarray],
batch_size: Optional[int] = None,
num_workers=0,
language: Optional[str] = None,
task: Optional[str] = None,
chunk_size=30,
print_progress=False,
combined_progress=False,
verbose=False,
2023-05-08 20:45:34 +02:00
) -> TranscriptionResult:
2023-04-24 21:08:43 +01:00
if isinstance(audio, str):
audio = load_audio(audio)
2023-05-27 11:38:54 +02:00
2023-04-24 21:08:43 +01:00
def data(audio, segments):
for seg in segments:
f1 = int(seg['start'] * SAMPLE_RATE)
f2 = int(seg['end'] * SAMPLE_RATE)
# print(f2-f1)
yield {'inputs': audio[f1:f2]}
# Pre-process audio and merge chunks as defined by the respective VAD child class
# In case vad_model is manually assigned (see 'load_model') follow the functionality of pyannote toolkit
if issubclass(type(self.vad_model), Vad):
waveform = self.vad_model.preprocess_audio(audio)
merge_chunks = self.vad_model.merge_chunks
else:
waveform = Pyannote.preprocess_audio(audio)
merge_chunks = Pyannote.merge_chunks
vad_segments = self.vad_model({"waveform": waveform, "sample_rate": SAMPLE_RATE})
vad_segments = merge_chunks(
vad_segments,
chunk_size,
onset=self._vad_params["vad_onset"],
offset=self._vad_params["vad_offset"],
)
2023-07-29 18:56:33 +02:00
if self.tokenizer is None:
2023-05-20 15:42:21 +02:00
language = language or self.detect_language(audio)
task = task or "transcribe"
self.tokenizer = Tokenizer(
self.model.hf_tokenizer,
self.model.model.is_multilingual,
task=task,
language=language,
)
2023-04-24 21:08:43 +01:00
else:
2023-05-20 15:52:45 +02:00
language = language or self.tokenizer.language_code
task = task or self.tokenizer.task
if task != self.tokenizer.task or language != self.tokenizer.language_code:
self.tokenizer = Tokenizer(
self.model.hf_tokenizer,
self.model.model.is_multilingual,
task=task,
language=language,
)
2023-07-29 19:34:51 +02:00
if self.suppress_numerals:
previous_suppress_tokens = self.options.suppress_tokens
numeral_symbol_tokens = find_numeral_symbol_tokens(self.tokenizer)
print(f"Suppressing numeral and symbol tokens")
2023-07-29 19:34:51 +02:00
new_suppressed_tokens = numeral_symbol_tokens + self.options.suppress_tokens
new_suppressed_tokens = list(set(new_suppressed_tokens))
2025-01-07 12:35:41 -05:00
self.options = replace(self.options, suppress_tokens=new_suppressed_tokens)
2023-04-24 21:08:43 +01:00
2023-05-08 20:45:34 +02:00
segments: List[SingleSegment] = []
2023-04-24 21:08:43 +01:00
batch_size = batch_size or self._batch_size
2023-08-16 16:11:46 +02:00
total_segments = len(vad_segments)
for idx, out in enumerate(self.__call__(data(audio, vad_segments), batch_size=batch_size, num_workers=num_workers)):
2023-08-16 16:15:24 +02:00
if print_progress:
2023-08-17 14:49:57 +02:00
base_progress = ((idx + 1) / total_segments) * 100
percent_complete = base_progress / 2 if combined_progress else base_progress
2023-08-16 16:15:24 +02:00
print(f"Progress: {percent_complete:.2f}%...")
2023-04-24 21:08:43 +01:00
text = out['text']
if batch_size in [0, 1, None]:
text = text[0]
if verbose:
print(f"Transcript: [{round(vad_segments[idx]['start'], 3)} --> {round(vad_segments[idx]['end'], 3)}] {text}")
2023-04-24 21:08:43 +01:00
segments.append(
{
2023-05-08 20:03:42 +02:00
"text": text,
2023-04-24 21:08:43 +01:00
"start": round(vad_segments[idx]['start'], 3),
"end": round(vad_segments[idx]['end'], 3)
2023-03-30 05:31:57 +01:00
}
)
2023-04-24 21:08:43 +01:00
2023-07-29 19:34:51 +02:00
# revert the tokenizer if multilingual inference is enabled
2023-07-29 18:56:33 +02:00
if self.preset_language is None:
self.tokenizer = None
2023-07-29 19:34:51 +02:00
# revert suppressed tokens if suppress_numerals is enabled
if self.suppress_numerals:
2025-01-07 12:35:41 -05:00
self.options = replace(self.options, suppress_tokens=previous_suppress_tokens)
2023-07-29 19:34:51 +02:00
2023-04-24 21:08:43 +01:00
return {"segments": segments, "language": language}
2025-01-05 11:26:18 +01:00
def detect_language(self, audio: np.ndarray) -> str:
if audio.shape[0] < N_SAMPLES:
print("Warning: audio is shorter than 30s, language detection may be inaccurate.")
2023-11-25 12:09:00 +00:00
model_n_mels = self.model.feat_kwargs.get("feature_size")
segment = log_mel_spectrogram(audio[: N_SAMPLES],
2023-11-25 12:09:00 +00:00
n_mels=model_n_mels if model_n_mels is not None else 80,
padding=0 if audio.shape[0] >= N_SAMPLES else N_SAMPLES - audio.shape[0])
2023-04-24 21:08:43 +01:00
encoder_output = self.model.encode(segment)
results = self.model.model.detect_language(encoder_output)
language_token, language_probability = results[0][0]
language = language_token[2:-2]
print(f"Detected language: {language} ({language_probability:.2f}) in first 30s of audio...")
return language
2023-11-16 08:59:28 -03:00
2025-01-05 11:26:18 +01:00
def load_model(
whisper_arch: str,
device: str,
device_index=0,
compute_type="float16",
asr_options: Optional[dict] = None,
language: Optional[str] = None,
vad_model: Optional[Vad]= None,
2025-01-13 10:09:20 +00:00
vad_method: Optional[str] = "pyannote",
2025-01-05 11:26:18 +01:00
vad_options: Optional[dict] = None,
model: Optional[WhisperModel] = None,
task="transcribe",
download_root: Optional[str] = None,
local_files_only=False,
threads=4,
) -> FasterWhisperPipeline:
"""Load a Whisper model for inference.
2023-11-16 08:59:28 -03:00
Args:
2025-01-05 11:26:18 +01:00
whisper_arch - The name of the Whisper model to load.
device - The device to load the model on.
compute_type - The compute type to use for the model.
vad_method - The vad method to use. vad_model has higher priority if is not None.
2025-01-05 11:26:18 +01:00
options - A dictionary of options to use for the model.
language - The language of the model. (use English for now)
model - The WhisperModel instance to use.
download_root - The root directory to download the model to.
local_files_only - If `True`, avoid downloading the file and return the path to the local cached file if it exists.
threads - The number of cpu threads to use per worker, e.g. will be multiplied by num workers.
2023-11-16 08:59:28 -03:00
Returns:
A Whisper pipeline.
2025-01-05 11:26:18 +01:00
"""
2023-11-16 08:59:28 -03:00
if whisper_arch.endswith(".en"):
language = "en"
model = model or WhisperModel(whisper_arch,
device=device,
device_index=device_index,
compute_type=compute_type,
download_root=download_root,
local_files_only=local_files_only,
2023-11-16 08:59:28 -03:00
cpu_threads=threads)
if language is not None:
tokenizer = Tokenizer(model.hf_tokenizer, model.model.is_multilingual, task=task, language=language)
2023-11-16 08:59:28 -03:00
else:
print("No language specified, language will be first be detected for each audio file (increases inference time).")
tokenizer = None
default_asr_options = {
"beam_size": 5,
"best_of": 5,
"patience": 1,
"length_penalty": 1,
"repetition_penalty": 1,
"no_repeat_ngram_size": 0,
"temperatures": [0.0, 0.2, 0.4, 0.6, 0.8, 1.0],
"compression_ratio_threshold": 2.4,
"log_prob_threshold": -1.0,
"no_speech_threshold": 0.6,
"condition_on_previous_text": False,
"prompt_reset_on_temperature": 0.5,
"initial_prompt": None,
"prefix": None,
"suppress_blank": True,
"suppress_tokens": [-1],
"without_timestamps": True,
"max_initial_timestamp": 0.0,
"word_timestamps": False,
"prepend_punctuations": "\"'“¿([{-",
"append_punctuations": "\"'.。,!?::”)]}、",
"multilingual": model.model.is_multilingual,
2023-11-16 08:59:28 -03:00
"suppress_numerals": False,
2024-02-22 15:59:14 +00:00
"max_new_tokens": None,
"clip_timestamps": None,
"hallucination_silence_threshold": None,
"hotwords": None,
2023-11-16 08:59:28 -03:00
}
if asr_options is not None:
default_asr_options.update(asr_options)
suppress_numerals = default_asr_options["suppress_numerals"]
del default_asr_options["suppress_numerals"]
default_asr_options = TranscriptionOptions(**default_asr_options)
2023-11-16 08:59:28 -03:00
default_vad_options = {
"chunk_size": 30, # needed by silero since binarization happens before merge_chunks
2023-11-16 08:59:28 -03:00
"vad_onset": 0.500,
"vad_offset": 0.363
}
if vad_options is not None:
default_vad_options.update(vad_options)
# Note: manually assigned vad_model has higher priority than vad_method!
2024-01-01 14:56:51 +05:30
if vad_model is not None:
print("Use manually assigned vad_model. vad_method is ignored.")
2024-01-01 14:56:51 +05:30
vad_model = vad_model
else:
2025-01-11 17:11:21 +00:00
if vad_method == "silero":
vad_model = Silero(**default_vad_options)
2025-01-13 10:08:09 +00:00
elif vad_method == "pyannote":
vad_model = Pyannote(torch.device(device), use_auth_token=None, **default_vad_options)
2025-01-11 17:11:21 +00:00
else:
raise ValueError(f"Invalid vad_method: {vad_method}")
2023-11-16 08:59:28 -03:00
return FasterWhisperPipeline(
model=model,
vad=vad_model,
options=default_asr_options,
tokenizer=tokenizer,
language=language,
suppress_numerals=suppress_numerals,
vad_params=default_vad_options,
)