mirror of
https://github.com/m-bain/whisperX.git
synced 2025-07-01 18:17:27 -04:00
add .ass output
This commit is contained in:
@ -12,7 +12,7 @@ from .audio import SAMPLE_RATE, N_FRAMES, HOP_LENGTH, pad_or_trim, log_mel_spect
|
||||
from .alignment import get_trellis, backtrack, merge_repeats, merge_words
|
||||
from .decoding import DecodingOptions, DecodingResult
|
||||
from .tokenizer import LANGUAGES, TO_LANGUAGE_CODE, get_tokenizer
|
||||
from .utils import exact_div, format_timestamp, optional_int, optional_float, str2bool, write_txt, write_vtt, write_srt
|
||||
from .utils import exact_div, format_timestamp, optional_int, optional_float, str2bool, write_txt, write_vtt, write_srt, write_ass
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .model import Whisper
|
||||
@ -269,7 +269,6 @@ def align(
|
||||
MAX_DURATION = audio.shape[1] / SAMPLE_RATE
|
||||
|
||||
prev_t2 = 0
|
||||
word_level = []
|
||||
for idx, segment in enumerate(transcript):
|
||||
t1 = max(segment['start'] - extend_duration, 0)
|
||||
t2 = min(segment['end'] + extend_duration, MAX_DURATION)
|
||||
@ -287,10 +286,11 @@ def align(
|
||||
|
||||
transcription = segment['text'].strip()
|
||||
t_words = transcription.split(' ')
|
||||
t_words_clean = [re.sub(r"[^a-zA-Z' ]", "", x) for x in t_words]
|
||||
t_words_clean = [''.join([w for w in word if w.upper() in model_dictionary.keys()]) for word in t_words]
|
||||
t_words_nonempty = [x for x in t_words_clean if x != ""]
|
||||
t_words_nonempty_idx = [x for x in range(len(t_words_clean)) if t_words_clean[x] != ""]
|
||||
|
||||
segment['word-level'] = []
|
||||
|
||||
if len(t_words_nonempty) > 0:
|
||||
transcription_cleaned = "|".join(t_words_nonempty).upper()
|
||||
tokens = [model_dictionary[c] for c in transcription_cleaned]
|
||||
@ -315,27 +315,25 @@ def align(
|
||||
segment['end'] = t2_actual
|
||||
prev_t2 = segment['end']
|
||||
|
||||
|
||||
# merge missing words to previous, or merge with next word ahead if idx == 0
|
||||
for x in range(len(t_local)):
|
||||
curr_word = t_words[x]
|
||||
curr_timestamp = t_local[x]
|
||||
if curr_timestamp is not None:
|
||||
word_level.append({"text": curr_word, "start": curr_timestamp[0], "end": curr_timestamp[1]})
|
||||
segment['word-level'].append({"text": curr_word, "start": curr_timestamp[0], "end": curr_timestamp[1]})
|
||||
else:
|
||||
if x == 0:
|
||||
t_words[x+1] = " ".join([curr_word, t_words[x+1]])
|
||||
else:
|
||||
word_level[-1]['text'] += ' ' + curr_word
|
||||
segment['word-level'].append({"text": curr_word, "start": None, "end": None})
|
||||
|
||||
else:
|
||||
# then we resort back to original whisper timestamps
|
||||
# segment['start] and segment['end'] are unchanged
|
||||
prev_t2 = 0
|
||||
word_level.append({"text": segment['text'], "start": segment['start'], "end":segment['end']})
|
||||
segment['word-level'].append({"text": segment['text'], "start": segment['start'], "end":segment['end']})
|
||||
|
||||
print(f"[{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}] {segment['text']}")
|
||||
|
||||
return {"segments": transcript}, {"segments": word_level}
|
||||
return {"segments": transcript}
|
||||
|
||||
def cli():
|
||||
from . import available_models
|
||||
@ -347,11 +345,9 @@ def cli():
|
||||
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
|
||||
# alignment params
|
||||
parser.add_argument("--align_model", default="WAV2VEC2_ASR_LARGE_LV60K_960H", help="Name of phoneme-level ASR model to do alignment")
|
||||
parser.add_argument("--align_extend", default=1, type=float, help="Seconds before and after to extend the whisper segments for alignment")
|
||||
parser.add_argument("--align_extend", default=2, type=float, help="Seconds before and after to extend the whisper segments for alignment")
|
||||
parser.add_argument("--align_from_prev", default=True, type=bool, help="Whether to clip the alignment start time of current segment to the end time of the last aligned word of the previous segment")
|
||||
|
||||
# parser.add_argument("--align_interpolate_missing", default=True, type=bool, help="Whether to interpolate the timestamp of words not tokenized by the align model, e.g. integers")
|
||||
|
||||
parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
|
||||
parser.add_argument("--output_type", default="srt", choices=['all', 'srt', 'vtt', 'txt'], help="directory to save the outputs")
|
||||
|
||||
@ -417,7 +413,7 @@ def cli():
|
||||
|
||||
for audio_path in args.pop("audio"):
|
||||
result = transcribe(model, audio_path, temperature=temperature, **args)
|
||||
result_aligned, result_aligned_word = align(result["segments"], align_model, align_dictionary, audio_path, device,
|
||||
result_aligned = align(result["segments"], align_model, align_dictionary, audio_path, device,
|
||||
extend_duration=align_extend, start_from_previous=align_from_prev)
|
||||
audio_basename = os.path.basename(audio_path)
|
||||
|
||||
@ -425,22 +421,20 @@ def cli():
|
||||
if output_type in ["txt", "all"]:
|
||||
with open(os.path.join(output_dir, audio_basename + ".txt"), "w", encoding="utf-8") as txt:
|
||||
write_txt(result_aligned["segments"], file=txt)
|
||||
with open(os.path.join(output_dir, audio_basename + ".word.txt"), "w", encoding="utf-8") as txt:
|
||||
write_txt(result_aligned_word["segments"], file=txt)
|
||||
|
||||
# save VTT
|
||||
if output_type in ["vtt", "all"]:
|
||||
with open(os.path.join(output_dir, audio_basename + ".vtt"), "w", encoding="utf-8") as vtt:
|
||||
write_vtt(result_aligned["segments"], file=vtt)
|
||||
with open(os.path.join(output_dir, audio_basename + ".word.vtt"), "w", encoding="utf-8") as vtt:
|
||||
write_vtt(result_aligned_word["segments"], file=vtt)
|
||||
|
||||
# save SRT
|
||||
if output_type in ["srt", "all"]:
|
||||
with open(os.path.join(output_dir, audio_basename + ".srt"), "w", encoding="utf-8") as srt:
|
||||
write_srt(result_aligned["segments"], file=srt)
|
||||
with open(os.path.join(output_dir, audio_basename + ".word.srt"), "w", encoding="utf-8") as srt:
|
||||
write_srt(result_aligned_word["segments"], file=srt)
|
||||
|
||||
# save ASS
|
||||
with open(os.path.join(output_dir, audio_basename + ".ass"), "w", encoding="utf-8") as srt:
|
||||
write_ass(result_aligned["segments"], file=srt)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,5 +1,5 @@
|
||||
import zlib
|
||||
from typing import Iterator, TextIO
|
||||
from typing import Iterator, TextIO, Tuple, List
|
||||
|
||||
|
||||
def exact_div(x, y):
|
||||
@ -86,3 +86,138 @@ def write_srt(transcript: Iterator[dict], file: TextIO):
|
||||
file=file,
|
||||
flush=True,
|
||||
)
|
||||
|
||||
|
||||
def write_ass(transcript: Iterator[dict], file: TextIO,
|
||||
color: str = None, underline=True,
|
||||
prefmt: str = None, suffmt: str = None,
|
||||
font: str = None, font_size: int = 24,
|
||||
strip=True, **kwargs):
|
||||
"""
|
||||
Credit: https://github.com/jianfch/stable-ts/blob/ff79549bd01f764427879f07ecd626c46a9a430a/stable_whisper/text_output.py
|
||||
Generate Advanced SubStation Alpha (ASS) file from results to
|
||||
display both phrase-level & word-level timestamp simultaneously by:
|
||||
-using segment-level timestamps display phrases as usual
|
||||
-using word-level timestamps change formats (e.g. color/underline) of the word in the displayed segment
|
||||
Note: ass file is used in the same way as srt, vtt, etc.
|
||||
Parameters
|
||||
----------
|
||||
res: dict
|
||||
results from modified model
|
||||
ass_path: str
|
||||
output path (e.g. caption.ass)
|
||||
color: str
|
||||
color code for a word at its corresponding timestamp
|
||||
<bbggrr> reverse order hexadecimal RGB value (e.g. FF0000 is full intensity blue. Default: 00FF00)
|
||||
underline: bool
|
||||
whether to underline a word at its corresponding timestamp
|
||||
prefmt: str
|
||||
used to specify format for word-level timestamps (must be use with 'suffmt' and overrides 'color'&'underline')
|
||||
appears as such in the .ass file:
|
||||
Hi, {<prefmt>}how{<suffmt>} are you?
|
||||
reference [Appendix A: Style override codes] in http://www.tcax.org/docs/ass-specs.htm
|
||||
suffmt: str
|
||||
used to specify format for word-level timestamps (must be use with 'prefmt' and overrides 'color'&'underline')
|
||||
appears as such in the .ass file:
|
||||
Hi, {<prefmt>}how{<suffmt>} are you?
|
||||
reference [Appendix A: Style override codes] in http://www.tcax.org/docs/ass-specs.htm
|
||||
font: str
|
||||
word font (default: Arial)
|
||||
font_size: int
|
||||
word font size (default: 48)
|
||||
kwargs:
|
||||
used for format styles:
|
||||
'Name', 'Fontname', 'Fontsize', 'PrimaryColour', 'SecondaryColour', 'OutlineColour', 'BackColour', 'Bold',
|
||||
'Italic', 'Underline', 'StrikeOut', 'ScaleX', 'ScaleY', 'Spacing', 'Angle', 'BorderStyle', 'Outline',
|
||||
'Shadow', 'Alignment', 'MarginL', 'MarginR', 'MarginV', 'Encoding'
|
||||
|
||||
"""
|
||||
|
||||
fmt_style_dict = {'Name': 'Default', 'Fontname': 'Arial', 'Fontsize': '48', 'PrimaryColour': '&Hffffff',
|
||||
'SecondaryColour': '&Hffffff', 'OutlineColour': '&H0', 'BackColour': '&H0', 'Bold': '0',
|
||||
'Italic': '0', 'Underline': '0', 'StrikeOut': '0', 'ScaleX': '100', 'ScaleY': '100',
|
||||
'Spacing': '0', 'Angle': '0', 'BorderStyle': '1', 'Outline': '1', 'Shadow': '0',
|
||||
'Alignment': '2', 'MarginL': '10', 'MarginR': '10', 'MarginV': '10', 'Encoding': '0'}
|
||||
|
||||
for k, v in filter(lambda x: 'colour' in x[0].lower() and not str(x[1]).startswith('&H'), kwargs.items()):
|
||||
kwargs[k] = f'&H{kwargs[k]}'
|
||||
|
||||
fmt_style_dict.update((k, v) for k, v in kwargs.items() if k in fmt_style_dict)
|
||||
|
||||
if font:
|
||||
fmt_style_dict.update(Fontname=font)
|
||||
if font_size:
|
||||
fmt_style_dict.update(Fontsize=font_size)
|
||||
|
||||
fmts = f'Format: {", ".join(map(str, fmt_style_dict.keys()))}'
|
||||
|
||||
styles = f'Style: {",".join(map(str, fmt_style_dict.values()))}'
|
||||
|
||||
ass_str = f'[Script Info]\nScriptType: v4.00+\nPlayResX: 384\nPlayResY: 288\nScaledBorderAndShadow: yes\n\n' \
|
||||
f'[V4+ Styles]\n{fmts}\n{styles}\n\n' \
|
||||
f'[Events]\nFormat: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n\n'
|
||||
|
||||
if prefmt or suffmt:
|
||||
if suffmt:
|
||||
assert prefmt, 'prefmt must be used along with suffmt'
|
||||
else:
|
||||
suffmt = r'\r'
|
||||
else:
|
||||
if not color:
|
||||
color = 'HFF00'
|
||||
underline_code = r'\u1' if underline else ''
|
||||
|
||||
prefmt = r'{\1c&' + f'{color.upper()}&{underline_code}' + '}'
|
||||
suffmt = r'{\r}'
|
||||
|
||||
def secs_to_hhmmss(secs: Tuple[float, int]):
|
||||
mm, ss = divmod(secs, 60)
|
||||
hh, mm = divmod(mm, 60)
|
||||
return f'{hh:0>1.0f}:{mm:0>2.0f}:{ss:0>2.2f}'
|
||||
|
||||
|
||||
def dialogue(words: List[str], idx, start, end) -> str:
|
||||
text = ''.join(f' {prefmt}{word}{suffmt}'
|
||||
# if not word.startswith(' ') or word == ' ' else
|
||||
# f' {prefmt}{word.strip()}{suffmt}')
|
||||
if curr_idx == idx else
|
||||
f' {word}'
|
||||
for curr_idx, word in enumerate(words))
|
||||
return f"Dialogue: 0,{secs_to_hhmmss(start)},{secs_to_hhmmss(end)}," \
|
||||
f"Default,,0,0,0,,{text.strip() if strip else text}"
|
||||
|
||||
|
||||
ass_arr = []
|
||||
|
||||
for segment in transcript:
|
||||
curr_words = [wrd['text'] for wrd in segment['word-level']]
|
||||
prev = segment['word-level'][0]['start']
|
||||
for wdx, word in enumerate(segment['word-level']):
|
||||
if word['start'] is not None:
|
||||
|
||||
# fill gap between previous word
|
||||
if word['start'] > prev:
|
||||
filler_ts = {
|
||||
"words": curr_words,
|
||||
"start": prev,
|
||||
"end": word['start'],
|
||||
"idx": -1
|
||||
}
|
||||
ass_arr.append(filler_ts)
|
||||
|
||||
# highlight current word
|
||||
f_word_ts = {
|
||||
"words": curr_words,
|
||||
"start": word['start'],
|
||||
"end": word['end'],
|
||||
"idx": wdx
|
||||
}
|
||||
ass_arr.append(f_word_ts)
|
||||
|
||||
prev = word['end']
|
||||
|
||||
|
||||
|
||||
ass_str += '\n'.join(map(lambda x: dialogue(**x), ass_arr))
|
||||
|
||||
file.write(ass_str)
|
||||
|
Reference in New Issue
Block a user