mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
refactor: update import statements to use explicit module paths across multiple files
This commit is contained in:
@ -1,6 +1,5 @@
|
||||
import math
|
||||
from .conjunctions import get_conjunctions, get_comma
|
||||
from typing import TextIO
|
||||
from whisperx.conjunctions import get_conjunctions, get_comma
|
||||
|
||||
def normal_round(n):
|
||||
if n - math.floor(n) < 0.5:
|
||||
|
@ -1,4 +1,7 @@
|
||||
from .alignment import load_align_model, align
|
||||
from .audio import load_audio
|
||||
from .diarize import assign_word_speakers, DiarizationPipeline
|
||||
from .asr import load_model
|
||||
from whisperx.alignment import load_align_model as load_align_model, align as align
|
||||
from whisperx.asr import load_model as load_model
|
||||
from whisperx.audio import load_audio as load_audio
|
||||
from whisperx.diarize import (
|
||||
assign_word_speakers as assign_word_speakers,
|
||||
DiarizationPipeline as DiarizationPipeline,
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from .transcribe import cli
|
||||
from whisperx.transcribe import cli
|
||||
|
||||
|
||||
cli()
|
||||
|
@ -13,9 +13,9 @@ import torch
|
||||
import torchaudio
|
||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||
|
||||
from .audio import SAMPLE_RATE, load_audio
|
||||
from .utils import interpolate_nans
|
||||
from .types import (
|
||||
from whisperx.audio import SAMPLE_RATE, load_audio
|
||||
from whisperx.utils import interpolate_nans
|
||||
from whisperx.types import (
|
||||
AlignedTranscriptionResult,
|
||||
SingleSegment,
|
||||
SingleAlignedSegment,
|
||||
|
@ -11,9 +11,10 @@ from faster_whisper.transcribe import TranscriptionOptions, get_ctranslate2_stor
|
||||
from transformers import Pipeline
|
||||
from transformers.pipelines.pt_utils import PipelineIterator
|
||||
|
||||
from .audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
|
||||
from .types import SingleSegment, TranscriptionResult
|
||||
from .vads import Vad, Silero, Pyannote
|
||||
from whisperx.audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
|
||||
from whisperx.types import SingleSegment, TranscriptionResult
|
||||
from whisperx.vads import Vad, Silero, Pyannote
|
||||
|
||||
|
||||
def find_numeral_symbol_tokens(tokenizer):
|
||||
numeral_symbol_tokens = []
|
||||
|
@ -7,7 +7,7 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .utils import exact_div
|
||||
from whisperx.utils import exact_div
|
||||
|
||||
# hard-coded audio hyperparameters
|
||||
SAMPLE_RATE = 16000
|
||||
|
@ -4,8 +4,8 @@ from pyannote.audio import Pipeline
|
||||
from typing import Optional, Union
|
||||
import torch
|
||||
|
||||
from .audio import load_audio, SAMPLE_RATE
|
||||
from .types import TranscriptionResult, AlignedTranscriptionResult
|
||||
from whisperx.audio import load_audio, SAMPLE_RATE
|
||||
from whisperx.types import TranscriptionResult, AlignedTranscriptionResult
|
||||
|
||||
|
||||
class DiarizationPipeline:
|
||||
|
@ -6,12 +6,12 @@ import warnings
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from .alignment import align, load_align_model
|
||||
from .asr import load_model
|
||||
from .audio import load_audio
|
||||
from .diarize import DiarizationPipeline, assign_word_speakers
|
||||
from .types import AlignedTranscriptionResult, TranscriptionResult
|
||||
from .utils import (
|
||||
from whisperx.alignment import align, load_align_model
|
||||
from whisperx.asr import load_model
|
||||
from whisperx.audio import load_audio
|
||||
from whisperx.diarize import DiarizationPipeline, assign_word_speakers
|
||||
from whisperx.types import AlignedTranscriptionResult, TranscriptionResult
|
||||
from whisperx.utils import (
|
||||
LANGUAGES,
|
||||
TO_LANGUAGE_CODE,
|
||||
get_writer,
|
||||
|
@ -1,3 +1,3 @@
|
||||
from whisperx.vads.pyannote import Pyannote
|
||||
from whisperx.vads.silero import Silero
|
||||
from whisperx.vads.vad import Vad
|
||||
from whisperx.vads.pyannote import Pyannote as Pyannote
|
||||
from whisperx.vads.silero import Silero as Silero
|
||||
from whisperx.vads.vad import Vad as Vad
|
||||
|
@ -1,6 +1,4 @@
|
||||
import hashlib
|
||||
import os
|
||||
import urllib
|
||||
from typing import Callable, Text, Union
|
||||
from typing import Optional
|
||||
|
||||
@ -12,11 +10,11 @@ from pyannote.audio.pipelines import VoiceActivityDetection
|
||||
from pyannote.audio.pipelines.utils import PipelineModel
|
||||
from pyannote.core import Annotation, SlidingWindowFeature
|
||||
from pyannote.core import Segment
|
||||
from tqdm import tqdm
|
||||
|
||||
from whisperx.diarize import Segment as SegmentX
|
||||
from whisperx.vads.vad import Vad
|
||||
|
||||
|
||||
def load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=None, model_fp=None):
|
||||
model_dir = torch.hub._get_torch_home()
|
||||
|
||||
|
Reference in New Issue
Block a user