From 0efad26066f2db7be1f23ebbcad990210027e0a2 Mon Sep 17 00:00:00 2001 From: Max Bain Date: Mon, 24 Apr 2023 21:26:44 +0100 Subject: [PATCH] pass compute_type --- whisperx/transcribe.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/whisperx/transcribe.py b/whisperx/transcribe.py index 0c6b803..fd6cf52 100644 --- a/whisperx/transcribe.py +++ b/whisperx/transcribe.py @@ -24,7 +24,6 @@ def cli(): parser.add_argument("--batch_size", default=8, type=int, help="device to use for PyTorch inference") parser.add_argument("--compute_type", default="float16", type=str, choices=["float16", "float32", "int8"], help="compute type for computation") - parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") parser.add_argument("--output_format", "-f", type=str, default="all", choices=["all", "srt", "vtt", "txt", "tsv", "json"], help="format of the output file; if not specified, all available formats will be produced") parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages") @@ -82,6 +81,8 @@ def cli(): output_dir: str = args.pop("output_dir") output_format: str = args.pop("output_format") device: str = args.pop("device") + compute_type: str = args.pop("compute_type") + # model_flush: bool = args.pop("model_flush") os.makedirs(output_dir, exist_ok=True) @@ -145,7 +146,7 @@ def cli(): results = [] tmp_results = [] # model = load_model(model_name, device=device, download_root=model_dir) - model = load_model(model_name, device=device, language=args['language'], asr_options=asr_options, vad_options={"vad_onset": vad_onset, "vad_offset": vad_offset},) + model = load_model(model_name, device=device, compute_type=compute_type, language=args['language'], asr_options=asr_options, vad_options={"vad_onset": vad_onset, "vad_offset": vad_offset},) for audio_path in args.pop("audio"): audio = load_audio(audio_path)