mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
Update README.md
Demonstrates use of argument to save model to local path.
This commit is contained in:
@ -165,6 +165,10 @@ compute_type = "float16" # change to "int8" if low on GPU mem (may reduce accura
|
|||||||
# 1. Transcribe with original whisper (batched)
|
# 1. Transcribe with original whisper (batched)
|
||||||
model = whisperx.load_model("large-v2", device, compute_type=compute_type)
|
model = whisperx.load_model("large-v2", device, compute_type=compute_type)
|
||||||
|
|
||||||
|
# save model to local path (optional)
|
||||||
|
# model_dir = "/path/"
|
||||||
|
# model = whisperx.load_model("large-v2", device, compute_type=compute_type, download_root=model_dir)
|
||||||
|
|
||||||
audio = whisperx.load_audio(audio_file)
|
audio = whisperx.load_audio(audio_file)
|
||||||
result = model.transcribe(audio, batch_size=batch_size)
|
result = model.transcribe(audio, batch_size=batch_size)
|
||||||
print(result["segments"]) # before alignment
|
print(result["segments"]) # before alignment
|
||||||
|
Reference in New Issue
Block a user