diff --git a/whisperx/asr.py b/whisperx/asr.py index ba6220b..1ca12ce 100644 --- a/whisperx/asr.py +++ b/whisperx/asr.py @@ -181,6 +181,9 @@ class FasterWhisperPipeline(Pipeline): def preprocess(self, audio): audio = audio['inputs'] + if isinstance(audio, np.ndarray): + audio = torch.from_numpy(audio) + features = log_mel_spectrogram(audio, padding=N_SAMPLES - audio.shape[0]) return {'inputs': features} @@ -253,7 +256,7 @@ class FasterWhisperPipeline(Pipeline): def detect_language(self, audio: np.ndarray): if audio.shape[0] < N_SAMPLES: print("Warning: audio is shorter than 30s, language detection may be inaccurate.") - segment = log_mel_spectrogram(audio[: N_SAMPLES], + segment = log_mel_spectrogram(torch.from_numpy(audio[:N_SAMPLES]), padding=0 if audio.shape[0] >= N_SAMPLES else N_SAMPLES - audio.shape[0]) encoder_output = self.model.encode(segment) results = self.model.model.detect_language(encoder_output) diff --git a/whisperx/audio.py b/whisperx/audio.py index 513ab7c..8ac0674 100644 --- a/whisperx/audio.py +++ b/whisperx/audio.py @@ -22,6 +22,12 @@ N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2 FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token +with np.load( + os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz") +) as f: + MEL_FILTERS = torch.from_numpy(f[f"mel_{80}"]) + + def load_audio(file: str, sr: int = SAMPLE_RATE): """ @@ -79,27 +85,9 @@ def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): return array -@lru_cache(maxsize=None) -def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: - """ - load the mel filterbank matrix for projecting STFT into a Mel spectrogram. - Allows decoupling librosa dependency; saved using: - - np.savez_compressed( - "mel_filters.npz", - mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), - ) - """ - assert n_mels == 80, f"Unsupported n_mels: {n_mels}" - with np.load( - os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz") - ) as f: - return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) - - +@torch.compile(fullgraph=True) def log_mel_spectrogram( - audio: Union[str, np.ndarray, torch.Tensor], - n_mels: int = N_MELS, + audio: torch.Tensor, padding: int = 0, device: Optional[Union[str, torch.device]] = None, ): @@ -108,7 +96,7 @@ def log_mel_spectrogram( Parameters ---------- - audio: Union[str, np.ndarray, torch.Tensor], shape = (*) + audio: torch.Tensor, shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int @@ -125,21 +113,19 @@ def log_mel_spectrogram( torch.Tensor, shape = (80, n_frames) A Tensor that contains the Mel spectrogram """ - if not torch.is_tensor(audio): - if isinstance(audio, str): - audio = load_audio(audio) - audio = torch.from_numpy(audio) + global MEL_FILTERS if device is not None: audio = audio.to(device) if padding > 0: audio = F.pad(audio, (0, padding)) window = torch.hann_window(N_FFT).to(audio.device) - stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) - magnitudes = stft[..., :-1].abs() ** 2 + stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=False) + # Square the real and imaginary components and sum them together, similar to torch.abs() on complex tensors + magnitudes = (stft[:, :-1, :] ** 2).sum(dim=-1) - filters = mel_filters(audio.device, n_mels) - mel_spec = filters @ magnitudes + MEL_FILTERS = MEL_FILTERS.to(audio.device) + mel_spec = MEL_FILTERS @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)