refactor: update import statements to use explicit module paths across multiple files

This commit is contained in:
Barabazs
2025-03-25 16:13:55 +01:00
parent 8e53866704
commit e7712f496e
10 changed files with 29 additions and 28 deletions

View File

@ -1,6 +1,5 @@
import math import math
from .conjunctions import get_conjunctions, get_comma from whisperx.conjunctions import get_conjunctions, get_comma
from typing import TextIO
def normal_round(n): def normal_round(n):
if n - math.floor(n) < 0.5: if n - math.floor(n) < 0.5:

View File

@ -1,4 +1,7 @@
from .alignment import load_align_model, align from whisperx.alignment import load_align_model as load_align_model, align as align
from .audio import load_audio from whisperx.asr import load_model as load_model
from .diarize import assign_word_speakers, DiarizationPipeline from whisperx.audio import load_audio as load_audio
from .asr import load_model from whisperx.diarize import (
assign_word_speakers as assign_word_speakers,
DiarizationPipeline as DiarizationPipeline,
)

View File

@ -1,4 +1,4 @@
from .transcribe import cli from whisperx.transcribe import cli
cli() cli()

View File

@ -13,9 +13,9 @@ import torch
import torchaudio import torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from .audio import SAMPLE_RATE, load_audio from whisperx.audio import SAMPLE_RATE, load_audio
from .utils import interpolate_nans from whisperx.utils import interpolate_nans
from .types import ( from whisperx.types import (
AlignedTranscriptionResult, AlignedTranscriptionResult,
SingleSegment, SingleSegment,
SingleAlignedSegment, SingleAlignedSegment,

View File

@ -11,9 +11,10 @@ from faster_whisper.transcribe import TranscriptionOptions, get_ctranslate2_stor
from transformers import Pipeline from transformers import Pipeline
from transformers.pipelines.pt_utils import PipelineIterator from transformers.pipelines.pt_utils import PipelineIterator
from .audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram from whisperx.audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
from .types import SingleSegment, TranscriptionResult from whisperx.types import SingleSegment, TranscriptionResult
from .vads import Vad, Silero, Pyannote from whisperx.vads import Vad, Silero, Pyannote
def find_numeral_symbol_tokens(tokenizer): def find_numeral_symbol_tokens(tokenizer):
numeral_symbol_tokens = [] numeral_symbol_tokens = []

View File

@ -7,7 +7,7 @@ import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from .utils import exact_div from whisperx.utils import exact_div
# hard-coded audio hyperparameters # hard-coded audio hyperparameters
SAMPLE_RATE = 16000 SAMPLE_RATE = 16000

View File

@ -4,8 +4,8 @@ from pyannote.audio import Pipeline
from typing import Optional, Union from typing import Optional, Union
import torch import torch
from .audio import load_audio, SAMPLE_RATE from whisperx.audio import load_audio, SAMPLE_RATE
from .types import TranscriptionResult, AlignedTranscriptionResult from whisperx.types import TranscriptionResult, AlignedTranscriptionResult
class DiarizationPipeline: class DiarizationPipeline:

View File

@ -6,12 +6,12 @@ import warnings
import numpy as np import numpy as np
import torch import torch
from .alignment import align, load_align_model from whisperx.alignment import align, load_align_model
from .asr import load_model from whisperx.asr import load_model
from .audio import load_audio from whisperx.audio import load_audio
from .diarize import DiarizationPipeline, assign_word_speakers from whisperx.diarize import DiarizationPipeline, assign_word_speakers
from .types import AlignedTranscriptionResult, TranscriptionResult from whisperx.types import AlignedTranscriptionResult, TranscriptionResult
from .utils import ( from whisperx.utils import (
LANGUAGES, LANGUAGES,
TO_LANGUAGE_CODE, TO_LANGUAGE_CODE,
get_writer, get_writer,

View File

@ -1,3 +1,3 @@
from whisperx.vads.pyannote import Pyannote from whisperx.vads.pyannote import Pyannote as Pyannote
from whisperx.vads.silero import Silero from whisperx.vads.silero import Silero as Silero
from whisperx.vads.vad import Vad from whisperx.vads.vad import Vad as Vad

View File

@ -1,6 +1,4 @@
import hashlib
import os import os
import urllib
from typing import Callable, Text, Union from typing import Callable, Text, Union
from typing import Optional from typing import Optional
@ -12,11 +10,11 @@ from pyannote.audio.pipelines import VoiceActivityDetection
from pyannote.audio.pipelines.utils import PipelineModel from pyannote.audio.pipelines.utils import PipelineModel
from pyannote.core import Annotation, SlidingWindowFeature from pyannote.core import Annotation, SlidingWindowFeature
from pyannote.core import Segment from pyannote.core import Segment
from tqdm import tqdm
from whisperx.diarize import Segment as SegmentX from whisperx.diarize import Segment as SegmentX
from whisperx.vads.vad import Vad from whisperx.vads.vad import Vad
def load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=None, model_fp=None): def load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=None, model_fp=None):
model_dir = torch.hub._get_torch_home() model_dir = torch.hub._get_torch_home()