Added download path parameter.

This commit is contained in:
prameshbajra
2023-05-27 11:38:54 +02:00
parent f1032bb40a
commit 5a47f458ac

View File

@ -13,8 +13,16 @@ from .audio import N_SAMPLES, SAMPLE_RATE, load_audio, log_mel_spectrogram
from .vad import load_vad_model, merge_chunks
from .types import TranscriptionResult, SingleSegment
def load_model(whisper_arch, device, device_index=0, compute_type="float16", asr_options=None, language=None,
vad_options=None, model=None, task="transcribe"):
def load_model(whisper_arch,
device,
device_index=0,
compute_type="float16",
asr_options=None,
language=None,
vad_options=None,
model=None,
task="transcribe",
download_root=None):
'''Load a Whisper model for inference.
Args:
whisper_arch: str - The name of the Whisper model to load.
@ -22,6 +30,7 @@ def load_model(whisper_arch, device, device_index=0, compute_type="float16", asr
compute_type: str - The compute type to use for the model.
options: dict - A dictionary of options to use for the model.
language: str - The language of the model. (use English for now)
download_root: Optional[str] - The root directory to download the model to.
Returns:
A Whisper pipeline.
'''
@ -29,7 +38,11 @@ def load_model(whisper_arch, device, device_index=0, compute_type="float16", asr
if whisper_arch.endswith(".en"):
language = "en"
model = WhisperModel(whisper_arch, device=device, device_index=device_index, compute_type=compute_type)
model = WhisperModel(whisper_arch,
device=device,
device_index=device_index,
compute_type=compute_type,
download_root=download_root)
if language is not None:
tokenizer = faster_whisper.tokenizer.Tokenizer(model.hf_tokenizer, model.model.is_multilingual, task=task, language=language)
else: