2020-06-10 16:04:54 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
from contextlib import contextmanager
|
|
|
|
import logging
|
|
|
|
import io
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
from datetime import timedelta
|
2022-01-24 04:07:52 +00:00
|
|
|
from typing import cast, Callable, Dict, Optional, Union
|
2020-06-10 16:04:54 +00:00
|
|
|
|
|
|
|
import ffmpeg
|
|
|
|
import numpy as np
|
|
|
|
import tqdm
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
from ffsubsync.constants import *
|
|
|
|
from ffsubsync.ffmpeg_utils import ffmpeg_bin_path, subprocess_args
|
|
|
|
from ffsubsync.generic_subtitles import GenericSubtitle
|
|
|
|
from ffsubsync.sklearn_shim import TransformerMixin
|
|
|
|
from ffsubsync.sklearn_shim import Pipeline
|
|
|
|
from ffsubsync.subtitle_parser import make_subtitle_parser
|
|
|
|
from ffsubsync.subtitle_transformers import SubtitleScaler
|
|
|
|
|
2020-06-10 16:04:54 +00:00
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
2022-01-24 04:07:52 +00:00
|
|
|
logger: logging.Logger = logging.getLogger(__name__)
|
2020-06-10 16:04:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
def make_subtitle_speech_pipeline(
|
2022-01-24 04:07:52 +00:00
|
|
|
fmt: str = "srt",
|
|
|
|
encoding: str = DEFAULT_ENCODING,
|
|
|
|
caching: bool = False,
|
|
|
|
max_subtitle_seconds: int = DEFAULT_MAX_SUBTITLE_SECONDS,
|
|
|
|
start_seconds: int = DEFAULT_START_SECONDS,
|
|
|
|
scale_factor: float = DEFAULT_SCALE_FACTOR,
|
|
|
|
parser=None,
|
|
|
|
**kwargs,
|
|
|
|
) -> Union[Pipeline, Callable[[float], Pipeline]]:
|
2020-06-10 16:04:54 +00:00
|
|
|
if parser is None:
|
|
|
|
parser = make_subtitle_parser(
|
|
|
|
fmt,
|
|
|
|
encoding=encoding,
|
|
|
|
caching=caching,
|
|
|
|
max_subtitle_seconds=max_subtitle_seconds,
|
2022-01-24 04:07:52 +00:00
|
|
|
start_seconds=start_seconds,
|
|
|
|
**kwargs,
|
2020-06-10 16:04:54 +00:00
|
|
|
)
|
|
|
|
assert parser.encoding == encoding
|
|
|
|
assert parser.max_subtitle_seconds == max_subtitle_seconds
|
|
|
|
assert parser.start_seconds == start_seconds
|
2021-04-13 04:02:29 +00:00
|
|
|
|
|
|
|
def subpipe_maker(framerate_ratio):
|
2022-01-24 04:07:52 +00:00
|
|
|
return Pipeline(
|
|
|
|
[
|
|
|
|
("parse", parser),
|
|
|
|
("scale", SubtitleScaler(framerate_ratio)),
|
|
|
|
(
|
|
|
|
"speech_extract",
|
|
|
|
SubtitleSpeechTransformer(
|
|
|
|
sample_rate=SAMPLE_RATE,
|
|
|
|
start_seconds=start_seconds,
|
|
|
|
framerate_ratio=framerate_ratio,
|
|
|
|
),
|
|
|
|
),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2021-04-13 04:02:29 +00:00
|
|
|
if scale_factor is None:
|
|
|
|
return subpipe_maker
|
|
|
|
else:
|
|
|
|
return subpipe_maker(scale_factor)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_auditok_detector(
|
|
|
|
sample_rate: int, frame_rate: int, non_speech_label: float
|
|
|
|
) -> Callable[[bytes], np.ndarray]:
|
2020-06-10 16:04:54 +00:00
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
from auditok import (
|
|
|
|
BufferAudioSource,
|
|
|
|
ADSFactory,
|
|
|
|
AudioEnergyValidator,
|
|
|
|
StreamTokenizer,
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
except ImportError as e:
|
2022-01-24 04:07:52 +00:00
|
|
|
logger.error(
|
|
|
|
"""Error: auditok not installed!
|
2020-06-10 16:04:54 +00:00
|
|
|
Consider installing it with `pip install auditok`. Note that auditok
|
|
|
|
is GPLv3 licensed, which means that successfully importing it at
|
|
|
|
runtime creates a derivative work that is GPLv3 licensed. For personal
|
|
|
|
use this is fine, but note that any commercial use that relies on
|
|
|
|
auditok must be open source as per the GPLv3!*
|
|
|
|
*Not legal advice. Consult with a lawyer.
|
2022-01-24 04:07:52 +00:00
|
|
|
"""
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
raise e
|
|
|
|
bytes_per_frame = 2
|
|
|
|
frames_per_window = frame_rate // sample_rate
|
2022-01-24 04:07:52 +00:00
|
|
|
validator = AudioEnergyValidator(sample_width=bytes_per_frame, energy_threshold=50)
|
2020-06-10 16:04:54 +00:00
|
|
|
tokenizer = StreamTokenizer(
|
2021-04-13 04:02:29 +00:00
|
|
|
validator=validator,
|
|
|
|
min_length=0.2 * sample_rate,
|
|
|
|
max_length=int(5 * sample_rate),
|
2022-01-24 04:07:52 +00:00
|
|
|
max_continuous_silence=0.25 * sample_rate,
|
2021-04-13 04:02:29 +00:00
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _detect(asegment: bytes) -> np.ndarray:
|
2021-04-13 04:02:29 +00:00
|
|
|
asource = BufferAudioSource(
|
|
|
|
data_buffer=asegment,
|
|
|
|
sampling_rate=frame_rate,
|
|
|
|
sample_width=bytes_per_frame,
|
2022-01-24 04:07:52 +00:00
|
|
|
channels=1,
|
2021-04-13 04:02:29 +00:00
|
|
|
)
|
2022-01-24 04:07:52 +00:00
|
|
|
ads = ADSFactory.ads(audio_source=asource, block_dur=1.0 / sample_rate)
|
2020-06-10 16:04:54 +00:00
|
|
|
ads.open()
|
|
|
|
tokens = tokenizer.tokenize(ads)
|
2021-04-13 04:02:29 +00:00
|
|
|
length = (
|
2022-01-24 04:07:52 +00:00
|
|
|
len(asegment) // bytes_per_frame + frames_per_window - 1
|
2021-04-13 04:02:29 +00:00
|
|
|
) // frames_per_window
|
|
|
|
media_bstring = np.zeros(length + 1)
|
2020-06-10 16:04:54 +00:00
|
|
|
for token in tokens:
|
2022-01-24 04:07:52 +00:00
|
|
|
media_bstring[token[1]] = 1.0
|
|
|
|
media_bstring[token[2] + 1] = non_speech_label - 1.0
|
|
|
|
return np.clip(np.cumsum(media_bstring)[:-1], 0.0, 1.0)
|
|
|
|
|
2020-06-10 16:04:54 +00:00
|
|
|
return _detect
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_webrtcvad_detector(
|
|
|
|
sample_rate: int, frame_rate: int, non_speech_label: float
|
|
|
|
) -> Callable[[bytes], np.ndarray]:
|
2020-06-10 16:04:54 +00:00
|
|
|
import webrtcvad
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2020-06-10 16:04:54 +00:00
|
|
|
vad = webrtcvad.Vad()
|
|
|
|
vad.set_mode(3) # set non-speech pruning aggressiveness from 0 to 3
|
2022-01-24 04:07:52 +00:00
|
|
|
window_duration = 1.0 / sample_rate # duration in seconds
|
2020-06-10 16:04:54 +00:00
|
|
|
frames_per_window = int(window_duration * frame_rate + 0.5)
|
|
|
|
bytes_per_frame = 2
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _detect(asegment: bytes) -> np.ndarray:
|
2020-06-10 16:04:54 +00:00
|
|
|
media_bstring = []
|
|
|
|
failures = 0
|
2022-01-24 04:07:52 +00:00
|
|
|
for start in range(0, len(asegment) // bytes_per_frame, frames_per_window):
|
|
|
|
stop = min(start + frames_per_window, len(asegment) // bytes_per_frame)
|
2020-06-10 16:04:54 +00:00
|
|
|
try:
|
|
|
|
is_speech = vad.is_speech(
|
2022-01-24 04:07:52 +00:00
|
|
|
asegment[start * bytes_per_frame : stop * bytes_per_frame],
|
|
|
|
sample_rate=frame_rate,
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
except:
|
|
|
|
is_speech = False
|
|
|
|
failures += 1
|
|
|
|
# webrtcvad has low recall on mode 3, so treat non-speech as "not sure"
|
2022-01-24 04:07:52 +00:00
|
|
|
media_bstring.append(1.0 if is_speech else non_speech_label)
|
2020-06-10 16:04:54 +00:00
|
|
|
return np.array(media_bstring)
|
|
|
|
|
|
|
|
return _detect
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
class ComputeSpeechFrameBoundariesMixin:
|
|
|
|
def __init__(self) -> None:
|
|
|
|
self.start_frame_: Optional[int] = None
|
|
|
|
self.end_frame_: Optional[int] = None
|
2021-04-13 04:02:29 +00:00
|
|
|
|
|
|
|
@property
|
2022-01-24 04:07:52 +00:00
|
|
|
def num_frames(self) -> Optional[int]:
|
2021-04-13 04:02:29 +00:00
|
|
|
if self.start_frame_ is None or self.end_frame_ is None:
|
|
|
|
return None
|
|
|
|
return self.end_frame_ - self.start_frame_
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def fit_boundaries(
|
|
|
|
self, speech_frames: np.ndarray
|
|
|
|
) -> "ComputeSpeechFrameBoundariesMixin":
|
2021-04-13 04:02:29 +00:00
|
|
|
nz = np.nonzero(speech_frames > 0.5)[0]
|
|
|
|
if len(nz) > 0:
|
|
|
|
self.start_frame_ = np.min(nz)
|
|
|
|
self.end_frame_ = np.max(nz)
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
2020-06-10 16:04:54 +00:00
|
|
|
class VideoSpeechTransformer(TransformerMixin):
|
2021-04-13 04:02:29 +00:00
|
|
|
def __init__(
|
2022-01-24 04:07:52 +00:00
|
|
|
self,
|
|
|
|
vad: str,
|
|
|
|
sample_rate: int,
|
|
|
|
frame_rate: int,
|
|
|
|
non_speech_label: float,
|
|
|
|
start_seconds: int = 0,
|
|
|
|
ffmpeg_path: Optional[str] = None,
|
|
|
|
ref_stream: Optional[str] = None,
|
|
|
|
vlc_mode: bool = False,
|
|
|
|
gui_mode: bool = False,
|
|
|
|
) -> None:
|
2021-04-13 04:02:29 +00:00
|
|
|
super(VideoSpeechTransformer, self).__init__()
|
2022-01-24 04:07:52 +00:00
|
|
|
self.vad: str = vad
|
|
|
|
self.sample_rate: int = sample_rate
|
|
|
|
self.frame_rate: int = frame_rate
|
|
|
|
self._non_speech_label: float = non_speech_label
|
|
|
|
self.start_seconds: int = start_seconds
|
|
|
|
self.ffmpeg_path: Optional[str] = ffmpeg_path
|
|
|
|
self.ref_stream: Optional[str] = ref_stream
|
|
|
|
self.vlc_mode: bool = vlc_mode
|
|
|
|
self.gui_mode: bool = gui_mode
|
|
|
|
self.video_speech_results_: Optional[np.ndarray] = None
|
|
|
|
|
|
|
|
def try_fit_using_embedded_subs(self, fname: str) -> None:
|
2020-06-10 16:04:54 +00:00
|
|
|
embedded_subs = []
|
|
|
|
embedded_subs_times = []
|
|
|
|
if self.ref_stream is None:
|
|
|
|
# check first 5; should cover 99% of movies
|
2022-01-24 04:07:52 +00:00
|
|
|
streams_to_try: List[str] = list(map("0:s:{}".format, range(5)))
|
2020-06-10 16:04:54 +00:00
|
|
|
else:
|
|
|
|
streams_to_try = [self.ref_stream]
|
|
|
|
for stream in streams_to_try:
|
2022-01-24 04:07:52 +00:00
|
|
|
ffmpeg_args = [
|
|
|
|
ffmpeg_bin_path(
|
|
|
|
"ffmpeg", self.gui_mode, ffmpeg_resources_path=self.ffmpeg_path
|
|
|
|
)
|
|
|
|
]
|
|
|
|
ffmpeg_args.extend(
|
|
|
|
[
|
|
|
|
"-loglevel",
|
|
|
|
"fatal",
|
|
|
|
"-nostdin",
|
|
|
|
"-i",
|
|
|
|
fname,
|
|
|
|
"-map",
|
|
|
|
"{}".format(stream),
|
|
|
|
"-f",
|
|
|
|
"srt",
|
|
|
|
"-",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
process = subprocess.Popen(
|
|
|
|
ffmpeg_args, **subprocess_args(include_stdout=True)
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
output = io.BytesIO(process.communicate()[0])
|
|
|
|
if process.returncode != 0:
|
|
|
|
break
|
2022-01-24 04:07:52 +00:00
|
|
|
pipe = cast(
|
|
|
|
Pipeline,
|
|
|
|
make_subtitle_speech_pipeline(start_seconds=self.start_seconds),
|
|
|
|
).fit(output)
|
2020-06-10 16:04:54 +00:00
|
|
|
speech_step = pipe.steps[-1][1]
|
2021-04-13 04:02:29 +00:00
|
|
|
embedded_subs.append(speech_step)
|
2020-06-10 16:04:54 +00:00
|
|
|
embedded_subs_times.append(speech_step.max_time_)
|
|
|
|
if len(embedded_subs) == 0:
|
2021-04-13 04:02:29 +00:00
|
|
|
if self.ref_stream is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
error_msg = "Video file appears to lack subtitle stream"
|
2021-04-13 04:02:29 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
error_msg = "Stream {} not found".format(self.ref_stream)
|
2021-04-13 04:02:29 +00:00
|
|
|
raise ValueError(error_msg)
|
2020-06-10 16:04:54 +00:00
|
|
|
# use longest set of embedded subs
|
2021-04-13 04:02:29 +00:00
|
|
|
subs_to_use = embedded_subs[int(np.argmax(embedded_subs_times))]
|
|
|
|
self.video_speech_results_ = subs_to_use.subtitle_speech_results_
|
2020-06-10 16:04:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def fit(self, fname: str, *_) -> "VideoSpeechTransformer":
|
|
|
|
if "subs" in self.vad and (
|
|
|
|
self.ref_stream is None or self.ref_stream.startswith("0:s:")
|
|
|
|
):
|
2020-06-10 16:04:54 +00:00
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
logger.info("Checking video for subtitles stream...")
|
2020-06-10 16:04:54 +00:00
|
|
|
self.try_fit_using_embedded_subs(fname)
|
2022-01-24 04:07:52 +00:00
|
|
|
logger.info("...success!")
|
2020-06-10 16:04:54 +00:00
|
|
|
return self
|
|
|
|
except Exception as e:
|
|
|
|
logger.info(e)
|
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
total_duration = (
|
|
|
|
float(
|
|
|
|
ffmpeg.probe(
|
|
|
|
fname,
|
|
|
|
cmd=ffmpeg_bin_path(
|
|
|
|
"ffprobe",
|
|
|
|
self.gui_mode,
|
|
|
|
ffmpeg_resources_path=self.ffmpeg_path,
|
|
|
|
),
|
|
|
|
)["format"]["duration"]
|
|
|
|
)
|
|
|
|
- self.start_seconds
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
except Exception as e:
|
|
|
|
logger.warning(e)
|
|
|
|
total_duration = None
|
2022-01-24 04:07:52 +00:00
|
|
|
if "webrtc" in self.vad:
|
|
|
|
detector = _make_webrtcvad_detector(
|
|
|
|
self.sample_rate, self.frame_rate, self._non_speech_label
|
|
|
|
)
|
|
|
|
elif "auditok" in self.vad:
|
|
|
|
detector = _make_auditok_detector(
|
|
|
|
self.sample_rate, self.frame_rate, self._non_speech_label
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
raise ValueError("unknown vad: %s" % self.vad)
|
2020-06-10 16:04:54 +00:00
|
|
|
media_bstring = []
|
2022-01-24 04:07:52 +00:00
|
|
|
ffmpeg_args = [
|
|
|
|
ffmpeg_bin_path(
|
|
|
|
"ffmpeg", self.gui_mode, ffmpeg_resources_path=self.ffmpeg_path
|
|
|
|
)
|
|
|
|
]
|
2020-06-10 16:04:54 +00:00
|
|
|
if self.start_seconds > 0:
|
2022-01-24 04:07:52 +00:00
|
|
|
ffmpeg_args.extend(
|
|
|
|
[
|
|
|
|
"-ss",
|
|
|
|
str(timedelta(seconds=self.start_seconds)),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
ffmpeg_args.extend(["-loglevel", "fatal", "-nostdin", "-i", fname])
|
|
|
|
if self.ref_stream is not None and self.ref_stream.startswith("0:a:"):
|
|
|
|
ffmpeg_args.extend(["-map", self.ref_stream])
|
|
|
|
ffmpeg_args.extend(
|
|
|
|
[
|
|
|
|
"-f",
|
|
|
|
"s16le",
|
|
|
|
"-ac",
|
|
|
|
"1",
|
|
|
|
"-acodec",
|
|
|
|
"pcm_s16le",
|
|
|
|
"-ar",
|
|
|
|
str(self.frame_rate),
|
|
|
|
"-",
|
|
|
|
]
|
|
|
|
)
|
2020-08-05 02:42:27 +00:00
|
|
|
process = subprocess.Popen(ffmpeg_args, **subprocess_args(include_stdout=True))
|
2020-06-10 16:04:54 +00:00
|
|
|
bytes_per_frame = 2
|
|
|
|
frames_per_window = bytes_per_frame * self.frame_rate // self.sample_rate
|
|
|
|
windows_per_buffer = 10000
|
2022-01-24 04:07:52 +00:00
|
|
|
simple_progress = 0.0
|
2020-06-10 16:04:54 +00:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def redirect_stderr(enter_result=None):
|
|
|
|
yield enter_result
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2020-06-10 16:04:54 +00:00
|
|
|
tqdm_extra_args = {}
|
|
|
|
should_print_redirected_stderr = self.gui_mode
|
|
|
|
if self.gui_mode:
|
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
from contextlib import redirect_stderr # type: ignore
|
|
|
|
|
|
|
|
tqdm_extra_args["file"] = sys.stdout
|
2020-06-10 16:04:54 +00:00
|
|
|
except ImportError:
|
|
|
|
should_print_redirected_stderr = False
|
|
|
|
pbar_output = io.StringIO()
|
|
|
|
with redirect_stderr(pbar_output):
|
2022-01-24 04:07:52 +00:00
|
|
|
with tqdm.tqdm(
|
|
|
|
total=total_duration, disable=self.vlc_mode, **tqdm_extra_args
|
|
|
|
) as pbar:
|
2020-06-10 16:04:54 +00:00
|
|
|
while True:
|
2022-01-24 04:07:52 +00:00
|
|
|
in_bytes = process.stdout.read(
|
|
|
|
frames_per_window * windows_per_buffer
|
|
|
|
)
|
2020-06-10 16:04:54 +00:00
|
|
|
if not in_bytes:
|
|
|
|
break
|
|
|
|
newstuff = len(in_bytes) / float(bytes_per_frame) / self.frame_rate
|
2022-01-24 04:07:52 +00:00
|
|
|
if (
|
|
|
|
total_duration is not None
|
|
|
|
and simple_progress + newstuff > total_duration
|
|
|
|
):
|
2020-06-12 19:08:44 +00:00
|
|
|
newstuff = total_duration - simple_progress
|
2020-06-10 16:04:54 +00:00
|
|
|
simple_progress += newstuff
|
|
|
|
pbar.update(newstuff)
|
|
|
|
if self.vlc_mode and total_duration is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
print("%d" % int(simple_progress * 100.0 / total_duration))
|
2020-06-10 16:04:54 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
if should_print_redirected_stderr:
|
|
|
|
assert self.gui_mode
|
|
|
|
# no need to flush since we pass -u to do unbuffered output for gui mode
|
|
|
|
print(pbar_output.read())
|
|
|
|
in_bytes = np.frombuffer(in_bytes, np.uint8)
|
|
|
|
media_bstring.append(detector(in_bytes))
|
|
|
|
if len(media_bstring) == 0:
|
|
|
|
raise ValueError(
|
2022-01-24 04:07:52 +00:00
|
|
|
"Unable to detect speech. Perhaps try specifying a different stream / track, or a different vad."
|
2020-06-10 16:04:54 +00:00
|
|
|
)
|
|
|
|
self.video_speech_results_ = np.concatenate(media_bstring)
|
|
|
|
return self
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def transform(self, *_) -> np.ndarray:
|
2020-06-10 16:04:54 +00:00
|
|
|
return self.video_speech_results_
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
_PAIRED_NESTER: Dict[str, str] = {
|
|
|
|
"(": ")",
|
|
|
|
"{": "}",
|
|
|
|
"[": "]",
|
2021-04-13 04:02:29 +00:00
|
|
|
# FIXME: False positive sometimes when there are html tags, e.g. <i> Hello? </i>
|
|
|
|
# '<': '>',
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# TODO: need way better metadata detector
|
2022-01-24 04:07:52 +00:00
|
|
|
def _is_metadata(content: str, is_beginning_or_end: bool) -> bool:
|
2021-04-13 04:02:29 +00:00
|
|
|
content = content.strip()
|
|
|
|
if len(content) == 0:
|
|
|
|
return True
|
2022-01-24 04:07:52 +00:00
|
|
|
if (
|
|
|
|
content[0] in _PAIRED_NESTER.keys()
|
|
|
|
and content[-1] == _PAIRED_NESTER[content[0]]
|
|
|
|
):
|
2021-04-13 04:02:29 +00:00
|
|
|
return True
|
|
|
|
if is_beginning_or_end:
|
2022-01-24 04:07:52 +00:00
|
|
|
if "english" in content.lower():
|
2021-04-13 04:02:29 +00:00
|
|
|
return True
|
2022-01-24 04:07:52 +00:00
|
|
|
if " - " in content:
|
2021-04-13 04:02:29 +00:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
class SubtitleSpeechTransformer(TransformerMixin, ComputeSpeechFrameBoundariesMixin):
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(
|
|
|
|
self, sample_rate: int, start_seconds: int = 0, framerate_ratio: float = 1.0
|
|
|
|
) -> None:
|
2021-04-13 04:02:29 +00:00
|
|
|
super(SubtitleSpeechTransformer, self).__init__()
|
2022-01-24 04:07:52 +00:00
|
|
|
self.sample_rate: int = sample_rate
|
|
|
|
self.start_seconds: int = start_seconds
|
|
|
|
self.framerate_ratio: float = framerate_ratio
|
|
|
|
self.subtitle_speech_results_: Optional[np.ndarray] = None
|
|
|
|
self.max_time_: Optional[int] = None
|
2020-06-10 16:04:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def fit(self, subs: List[GenericSubtitle], *_) -> "SubtitleSpeechTransformer":
|
2020-06-10 16:04:54 +00:00
|
|
|
max_time = 0
|
|
|
|
for sub in subs:
|
|
|
|
max_time = max(max_time, sub.end.total_seconds())
|
|
|
|
self.max_time_ = max_time - self.start_seconds
|
|
|
|
samples = np.zeros(int(max_time * self.sample_rate) + 2, dtype=float)
|
2022-01-24 04:07:52 +00:00
|
|
|
start_frame = float("inf")
|
2021-04-13 04:02:29 +00:00
|
|
|
end_frame = 0
|
|
|
|
for i, sub in enumerate(subs):
|
|
|
|
if _is_metadata(sub.content, i == 0 or i + 1 == len(subs)):
|
|
|
|
continue
|
2022-01-24 04:07:52 +00:00
|
|
|
start = int(
|
|
|
|
round(
|
|
|
|
(sub.start.total_seconds() - self.start_seconds) * self.sample_rate
|
|
|
|
)
|
|
|
|
)
|
2021-04-13 04:02:29 +00:00
|
|
|
start_frame = min(start_frame, start)
|
2020-06-10 16:04:54 +00:00
|
|
|
duration = sub.end.total_seconds() - sub.start.total_seconds()
|
|
|
|
end = start + int(round(duration * self.sample_rate))
|
2021-04-13 04:02:29 +00:00
|
|
|
end_frame = max(end_frame, end)
|
2022-01-24 04:07:52 +00:00
|
|
|
samples[start:end] = min(1.0 / self.framerate_ratio, 1.0)
|
2020-06-10 16:04:54 +00:00
|
|
|
self.subtitle_speech_results_ = samples
|
2021-04-13 04:02:29 +00:00
|
|
|
self.fit_boundaries(self.subtitle_speech_results_)
|
2020-06-10 16:04:54 +00:00
|
|
|
return self
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def transform(self, *_) -> np.ndarray:
|
|
|
|
assert self.subtitle_speech_results_ is not None
|
2020-06-10 16:04:54 +00:00
|
|
|
return self.subtitle_speech_results_
|
|
|
|
|
|
|
|
|
|
|
|
class DeserializeSpeechTransformer(TransformerMixin):
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(self, non_speech_label: float) -> None:
|
2021-04-13 04:02:29 +00:00
|
|
|
super(DeserializeSpeechTransformer, self).__init__()
|
2022-01-24 04:07:52 +00:00
|
|
|
self._non_speech_label: float = non_speech_label
|
|
|
|
self.deserialized_speech_results_: Optional[np.ndarray] = None
|
2020-06-10 16:04:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def fit(self, fname, *_) -> "DeserializeSpeechTransformer":
|
2020-06-10 16:04:54 +00:00
|
|
|
speech = np.load(fname)
|
2022-01-24 04:07:52 +00:00
|
|
|
if hasattr(speech, "files"):
|
|
|
|
if "speech" in speech.files:
|
|
|
|
speech = speech["speech"]
|
2020-06-10 16:04:54 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
raise ValueError(
|
|
|
|
'could not find "speech" array in '
|
|
|
|
"serialized file; only contains: %s" % speech.files
|
|
|
|
)
|
|
|
|
speech[speech < 1.0] = self._non_speech_label
|
2020-06-10 16:04:54 +00:00
|
|
|
self.deserialized_speech_results_ = speech
|
|
|
|
return self
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def transform(self, *_) -> np.ndarray:
|
|
|
|
assert self.deserialized_speech_results_ is not None
|
2020-06-10 16:04:54 +00:00
|
|
|
return self.deserialized_speech_results_
|