2018-10-31 16:08:29 +00:00
|
|
|
# coding=utf-8
|
|
|
|
|
|
|
|
|
2019-09-17 02:04:27 +00:00
|
|
|
from __future__ import absolute_import
|
2018-10-31 16:08:29 +00:00
|
|
|
import logging
|
|
|
|
import traceback
|
|
|
|
|
|
|
|
import re
|
|
|
|
import types
|
|
|
|
|
|
|
|
import chardet
|
|
|
|
import pysrt
|
|
|
|
import pysubs2
|
|
|
|
from bs4 import UnicodeDammit
|
|
|
|
from pysubs2 import SSAStyle
|
2024-06-08 10:40:00 +00:00
|
|
|
from pysubs2.formats.subrip import parse_tags, MAX_REPRESENTABLE_TIME
|
2018-10-31 16:08:29 +00:00
|
|
|
from pysubs2.time import ms_to_times
|
|
|
|
from subzero.modification import SubtitleModifications
|
2020-09-10 18:26:37 +00:00
|
|
|
from subzero.language import Language
|
2018-10-31 16:08:29 +00:00
|
|
|
from subliminal import Subtitle as Subtitle_
|
2019-04-28 04:02:12 +00:00
|
|
|
from subliminal.subtitle import Episode, Movie, sanitize_release_group, get_equivalent_release_groups
|
|
|
|
from subliminal_patch.utils import sanitize
|
2018-10-31 16:08:29 +00:00
|
|
|
from ftfy import fix_text
|
2019-12-23 23:46:34 +00:00
|
|
|
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
|
2020-05-20 15:29:39 +00:00
|
|
|
from six import text_type
|
2019-12-23 23:46:34 +00:00
|
|
|
|
|
|
|
BOMS = (
|
|
|
|
(BOM_UTF8, "UTF-8"),
|
|
|
|
(BOM_UTF32_BE, "UTF-32-BE"),
|
|
|
|
(BOM_UTF32_LE, "UTF-32-LE"),
|
|
|
|
(BOM_UTF16_BE, "UTF-16-BE"),
|
|
|
|
(BOM_UTF16_LE, "UTF-16-LE"),
|
|
|
|
)
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
ftfy_defaults = {
|
|
|
|
"uncurl_quotes": False,
|
|
|
|
"fix_character_width": False,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class Subtitle(Subtitle_):
|
|
|
|
storage_path = None
|
|
|
|
release_info = None
|
2021-11-30 21:40:51 +00:00
|
|
|
matches = {}
|
2018-10-31 16:08:29 +00:00
|
|
|
hash_verifiable = False
|
|
|
|
hearing_impaired_verifiable = False
|
|
|
|
mods = None
|
|
|
|
plex_media_fps = None
|
|
|
|
skip_wrong_fps = False
|
|
|
|
wrong_fps = False
|
2019-04-04 15:01:37 +00:00
|
|
|
wrong_series = False
|
|
|
|
wrong_season_ep = False
|
2018-10-31 16:08:29 +00:00
|
|
|
is_pack = False
|
|
|
|
asked_for_release_group = None
|
|
|
|
asked_for_episode = None
|
2020-03-22 07:58:31 +00:00
|
|
|
uploader = None # string - uploader username
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
pack_data = None
|
|
|
|
_guessed_encoding = None
|
2019-01-15 12:44:17 +00:00
|
|
|
_is_valid = False
|
2022-03-15 00:54:36 +00:00
|
|
|
use_original_format = False
|
|
|
|
format = "srt" # default format is srt
|
2018-10-31 16:08:29 +00:00
|
|
|
|
2022-03-15 00:54:36 +00:00
|
|
|
def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None, mods=None, original_format=False):
|
2020-09-10 18:26:37 +00:00
|
|
|
# set subtitle language to hi if it's hearing_impaired
|
|
|
|
if hearing_impaired:
|
|
|
|
language = Language.rebuild(language, hi=True)
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
super(Subtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link,
|
|
|
|
encoding=encoding)
|
|
|
|
self.mods = mods
|
2019-01-15 12:44:17 +00:00
|
|
|
self._is_valid = False
|
2022-03-15 00:54:36 +00:00
|
|
|
self.use_original_format = original_format
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
def __repr__(self):
|
2023-06-23 17:21:04 +00:00
|
|
|
r_info = str(self.release_info or "").replace("\n", " | ").strip()
|
|
|
|
return f"<{self.__class__.__name__}: {r_info} [{repr(self.language)}]>"
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def text(self):
|
|
|
|
"""Content as string
|
|
|
|
|
|
|
|
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
|
|
|
|
|
|
|
|
"""
|
|
|
|
if not self.content:
|
|
|
|
return
|
|
|
|
|
2020-05-20 15:29:39 +00:00
|
|
|
if not isinstance(self.content, text_type):
|
2020-05-31 10:46:16 +00:00
|
|
|
return self.content.decode(self.get_encoding(), errors='replace')
|
2018-10-31 16:08:29 +00:00
|
|
|
|
2020-05-20 15:29:39 +00:00
|
|
|
return self.content
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def numeric_id(self):
|
|
|
|
raise NotImplemented
|
|
|
|
|
2020-04-25 13:21:36 +00:00
|
|
|
def get_fps(self):
|
|
|
|
"""
|
|
|
|
:return: frames per second or None if not supported
|
|
|
|
:rtype: float
|
|
|
|
"""
|
|
|
|
return None
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
def make_picklable(self):
|
|
|
|
"""
|
2020-10-27 23:17:40 +00:00
|
|
|
some subtitle instances might have unpicklable objects stored; clean them up here
|
2018-10-31 16:08:29 +00:00
|
|
|
:return: self
|
|
|
|
"""
|
|
|
|
return self
|
|
|
|
|
2020-05-31 10:46:16 +00:00
|
|
|
def get_encoding(self):
|
2020-06-14 05:35:58 +00:00
|
|
|
return self.guess_encoding()
|
2020-05-31 10:46:16 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
def set_encoding(self, encoding):
|
2020-05-31 10:46:16 +00:00
|
|
|
ge = self.get_encoding()
|
2018-10-31 16:08:29 +00:00
|
|
|
if encoding == ge:
|
|
|
|
return
|
|
|
|
|
|
|
|
unicontent = self.text
|
|
|
|
logger.debug("Changing encoding: to %s, from %s", encoding, ge)
|
|
|
|
self.content = unicontent.encode(encoding)
|
|
|
|
self._guessed_encoding = encoding
|
|
|
|
|
|
|
|
def normalize(self):
|
|
|
|
"""
|
|
|
|
Set encoding to UTF-8 and normalize line endings
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
self.set_encoding("utf-8")
|
|
|
|
|
|
|
|
# normalize line endings
|
2019-09-19 01:50:20 +00:00
|
|
|
self.content = self.content.replace(b"\r\n", b"\n").replace(b'\r', b'\n')
|
2018-10-31 16:08:29 +00:00
|
|
|
|
2019-12-23 23:46:34 +00:00
|
|
|
def _check_bom(self, data):
|
|
|
|
return [encoding for bom, encoding in BOMS if data.startswith(bom)]
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
def guess_encoding(self):
|
|
|
|
"""Guess encoding using the language, falling back on chardet.
|
|
|
|
|
|
|
|
:return: the guessed encoding.
|
|
|
|
:rtype: str
|
|
|
|
|
|
|
|
"""
|
|
|
|
if self._guessed_encoding:
|
|
|
|
return self._guessed_encoding
|
|
|
|
|
2020-06-14 05:35:58 +00:00
|
|
|
if self.encoding:
|
|
|
|
# check provider encoding and use it only if it is valid
|
|
|
|
try:
|
|
|
|
self.content.decode(self.encoding)
|
|
|
|
self._guessed_encoding = self.encoding
|
|
|
|
return self._guessed_encoding
|
|
|
|
except:
|
|
|
|
# provider specified encoding is invalid, fallback to guessing
|
|
|
|
pass
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
logger.info('Guessing encoding for language %s', self.language)
|
|
|
|
|
2019-05-27 10:33:39 +00:00
|
|
|
encodings = ['utf-8']
|
2018-10-31 16:08:29 +00:00
|
|
|
|
2019-12-23 23:46:34 +00:00
|
|
|
# check UTF BOMs
|
|
|
|
bom_encodings = self._check_bom(self.content)
|
|
|
|
if bom_encodings:
|
|
|
|
encodings = list(set(enc.lower() for enc in bom_encodings + encodings))
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# add language-specific encodings
|
|
|
|
# http://scratchpad.wikia.com/wiki/Character_Encoding_Recommendation_for_Languages
|
|
|
|
|
|
|
|
if self.language.alpha3 == 'zho':
|
2021-06-24 00:49:06 +00:00
|
|
|
encodings.extend(['cp936', 'gb2312', 'gbk', 'hz', 'iso2022_jp_2', 'cp950', 'big5hkscs', 'big5',
|
|
|
|
'gb18030', 'utf-16'])
|
2018-10-31 16:08:29 +00:00
|
|
|
elif self.language.alpha3 == 'jpn':
|
|
|
|
encodings.extend(['shift-jis', 'cp932', 'euc_jp', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
|
|
|
|
'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', ])
|
|
|
|
elif self.language.alpha3 == 'tha':
|
|
|
|
encodings.extend(['tis-620', 'cp874'])
|
|
|
|
|
|
|
|
# arabian/farsi
|
|
|
|
elif self.language.alpha3 in ('ara', 'fas', 'per'):
|
2021-06-26 13:58:48 +00:00
|
|
|
encodings.extend(['windows-1256', 'utf-16', 'utf-16le', 'ascii', 'iso-8859-6'])
|
2018-10-31 16:08:29 +00:00
|
|
|
elif self.language.alpha3 == 'heb':
|
|
|
|
encodings.extend(['windows-1255', 'iso-8859-8'])
|
|
|
|
elif self.language.alpha3 == 'tur':
|
|
|
|
encodings.extend(['windows-1254', 'iso-8859-9', 'iso-8859-3'])
|
|
|
|
|
|
|
|
# Greek
|
|
|
|
elif self.language.alpha3 in ('grc', 'gre', 'ell'):
|
|
|
|
encodings.extend(['windows-1253', 'cp1253', 'cp737', 'iso8859-7', 'cp875', 'cp869', 'iso2022_jp_2',
|
|
|
|
'mac_greek'])
|
|
|
|
|
|
|
|
# Polish, Czech, Slovak, Hungarian, Slovene, Bosnian, Croatian, Serbian (Latin script),
|
|
|
|
# Romanian and Albanian
|
|
|
|
elif self.language.alpha3 in ('pol', 'cze', 'ces', 'slk', 'slo', 'slv', 'hun', 'bos', 'hbs', 'hrv', 'rsb',
|
|
|
|
'ron', 'rum', 'sqi', 'alb'):
|
|
|
|
|
|
|
|
encodings.extend(['windows-1250', 'iso-8859-2'])
|
|
|
|
|
|
|
|
# Eastern European Group 1
|
|
|
|
if self.language.alpha3 == "slv":
|
|
|
|
encodings.append('iso-8859-4')
|
|
|
|
|
|
|
|
# Albanian
|
|
|
|
elif self.language.alpha3 in ("sqi", "alb"):
|
|
|
|
encodings.extend(['windows-1252', 'iso-8859-15', 'iso-8859-1', 'iso-8859-9'])
|
|
|
|
|
|
|
|
# Bulgarian, Serbian and Macedonian, Ukranian and Russian
|
|
|
|
elif self.language.alpha3 in ('bul', 'srp', 'mkd', 'mac', 'rus', 'ukr'):
|
|
|
|
# Eastern European Group 2
|
|
|
|
if self.language.alpha3 in ('bul', 'mkd', 'mac', 'rus', 'ukr'):
|
|
|
|
encodings.extend(['windows-1251', 'iso-8859-5'])
|
|
|
|
|
|
|
|
elif self.language.alpha3 == 'srp':
|
|
|
|
if self.language.script == "Latn":
|
|
|
|
encodings.extend(['windows-1250', 'iso-8859-2'])
|
|
|
|
elif self.language.script == "Cyrl":
|
|
|
|
encodings.extend(['windows-1251', 'iso-8859-5'])
|
|
|
|
else:
|
|
|
|
encodings.extend(['windows-1250', 'windows-1251', 'iso-8859-2', 'iso-8859-5'])
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Western European (windows-1252) / Northern European
|
2020-06-04 17:24:55 +00:00
|
|
|
encodings.extend(['windows-1252', 'iso-8859-15', 'iso-8859-9', 'iso-8859-4', 'iso-8859-1'])
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
# try to decode
|
|
|
|
logger.debug('Trying encodings %r', encodings)
|
|
|
|
for encoding in encodings:
|
|
|
|
try:
|
|
|
|
self.content.decode(encoding)
|
|
|
|
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
logger.info('Guessed encoding %s', encoding)
|
|
|
|
self._guessed_encoding = encoding
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
logger.warning('Could not guess encoding from language')
|
|
|
|
|
|
|
|
# fallback on chardet
|
|
|
|
encoding = chardet.detect(self.content)['encoding']
|
|
|
|
logger.info('Chardet found encoding %s', encoding)
|
|
|
|
|
|
|
|
if not encoding:
|
|
|
|
# fallback on bs4
|
|
|
|
logger.info('Falling back to bs4 detection')
|
|
|
|
a = UnicodeDammit(self.content)
|
|
|
|
|
|
|
|
logger.info("bs4 detected encoding: %s", a.original_encoding)
|
|
|
|
|
|
|
|
if a.original_encoding:
|
|
|
|
self._guessed_encoding = a.original_encoding
|
|
|
|
return a.original_encoding
|
|
|
|
raise ValueError(u"Couldn't guess the proper encoding for %s", self)
|
|
|
|
|
|
|
|
self._guessed_encoding = encoding
|
|
|
|
return encoding
|
|
|
|
|
|
|
|
def is_valid(self):
|
2022-03-15 00:54:36 +00:00
|
|
|
"""Check if a :attr:`text` is a valid SubRip format. Note that orignal format will pypass the checking
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
:return: whether or not the subtitle is valid.
|
|
|
|
:rtype: bool
|
|
|
|
|
|
|
|
"""
|
2019-01-15 12:44:17 +00:00
|
|
|
if self._is_valid:
|
|
|
|
return True
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
text = self.text
|
|
|
|
if not text:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# valid srt
|
|
|
|
try:
|
|
|
|
pysrt.from_string(text, error_handling=pysrt.ERROR_RAISE)
|
|
|
|
except Exception:
|
|
|
|
logger.error("PySRT-parsing failed, trying pysubs2")
|
|
|
|
else:
|
2019-01-15 12:44:17 +00:00
|
|
|
self._is_valid = True
|
2018-10-31 16:08:29 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
# something else, try to return srt
|
|
|
|
try:
|
|
|
|
logger.debug("Trying parsing with PySubs2")
|
|
|
|
try:
|
|
|
|
# in case of microdvd, try parsing the fps from the subtitle
|
|
|
|
subs = pysubs2.SSAFile.from_string(text)
|
|
|
|
if subs.format == "microdvd":
|
|
|
|
logger.info("Got FPS from MicroDVD subtitle: %s", subs.fps)
|
|
|
|
else:
|
|
|
|
logger.info("Got format: %s", subs.format)
|
2022-03-15 00:54:36 +00:00
|
|
|
if self.use_original_format:
|
|
|
|
self.format = subs.format
|
|
|
|
self._is_valid = True
|
|
|
|
logger.debug("Using original format")
|
|
|
|
return True
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
except pysubs2.UnknownFPSError:
|
2020-04-25 13:21:36 +00:00
|
|
|
# if parsing failed, use frame rate from provider
|
|
|
|
sub_fps = self.get_fps()
|
|
|
|
if not isinstance(sub_fps, float) or sub_fps < 10.0:
|
|
|
|
# or use our media file's fps as a fallback
|
|
|
|
sub_fps = self.plex_media_fps
|
|
|
|
logger.info("No FPS info in subtitle. Using our own media FPS for the MicroDVD subtitle: %s",
|
|
|
|
self.plex_media_fps)
|
|
|
|
subs = pysubs2.SSAFile.from_string(text, fps=sub_fps)
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
unicontent = self.pysubs2_to_unicode(subs)
|
2020-05-31 10:46:16 +00:00
|
|
|
self.content = unicontent.encode(self.get_encoding())
|
2018-10-31 16:08:29 +00:00
|
|
|
except:
|
|
|
|
logger.exception("Couldn't convert subtitle %s to .srt format: %s", self, traceback.format_exc())
|
|
|
|
return False
|
|
|
|
|
2019-01-15 12:44:17 +00:00
|
|
|
self._is_valid = True
|
2018-10-31 16:08:29 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def pysubs2_to_unicode(cls, sub, format="srt"):
|
2020-03-08 04:49:53 +00:00
|
|
|
"""
|
|
|
|
this is a modified version of pysubs2.SubripFormat.to_file with special handling for drawing tags in ASS
|
|
|
|
:param sub:
|
|
|
|
:param format:
|
|
|
|
:return:
|
|
|
|
"""
|
2018-10-31 16:08:29 +00:00
|
|
|
def ms_to_timestamp(ms, mssep=","):
|
|
|
|
"""Convert ms to 'HH:MM:SS,mmm'"""
|
|
|
|
# XXX throw on overflow/underflow?
|
|
|
|
if ms < 0: ms = 0
|
|
|
|
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
|
|
|
|
h, m, s, ms = ms_to_times(ms)
|
|
|
|
return "%02d:%02d:%02d%s%03d" % (h, m, s, mssep, ms)
|
|
|
|
|
|
|
|
def prepare_text(text, style):
|
|
|
|
body = []
|
|
|
|
for fragment, sty in parse_tags(text, style, sub.styles):
|
2020-03-08 13:50:31 +00:00
|
|
|
fragment = fragment.replace(r"\h", u" ")
|
|
|
|
fragment = fragment.replace(r"\n", u"\n")
|
|
|
|
fragment = fragment.replace(r"\N", u"\n")
|
2020-03-08 04:49:53 +00:00
|
|
|
if sty.drawing:
|
|
|
|
raise pysubs2.ContentNotUsable
|
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
if format == "srt":
|
|
|
|
if sty.italic:
|
|
|
|
fragment = u"<i>%s</i>" % fragment
|
|
|
|
if sty.underline:
|
|
|
|
fragment = u"<u>%s</u>" % fragment
|
|
|
|
if sty.strikeout:
|
|
|
|
fragment = u"<s>%s</s>" % fragment
|
|
|
|
elif format == "vtt":
|
|
|
|
if sty.bold:
|
|
|
|
fragment = u"<b>%s</b>" % fragment
|
|
|
|
if sty.italic:
|
|
|
|
fragment = u"<i>%s</i>" % fragment
|
|
|
|
if sty.underline:
|
|
|
|
fragment = u"<u>%s</u>" % fragment
|
|
|
|
|
|
|
|
body.append(fragment)
|
|
|
|
|
|
|
|
return re.sub(u"\n+", u"\n", u"".join(body).strip())
|
|
|
|
|
|
|
|
visible_lines = (line for line in sub if not line.is_comment)
|
|
|
|
|
|
|
|
out = []
|
|
|
|
mssep = ","
|
|
|
|
|
|
|
|
if format == "vtt":
|
|
|
|
out.append("WEBVTT\n\n")
|
|
|
|
mssep = "."
|
|
|
|
|
|
|
|
for i, line in enumerate(visible_lines, 1):
|
|
|
|
start = ms_to_timestamp(line.start, mssep=mssep)
|
|
|
|
end = ms_to_timestamp(line.end, mssep=mssep)
|
2020-03-08 04:49:53 +00:00
|
|
|
try:
|
|
|
|
text = prepare_text(line.text, sub.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
|
|
|
|
except pysubs2.ContentNotUsable:
|
|
|
|
continue
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
out.append(u"%d\n" % i)
|
|
|
|
out.append(u"%s --> %s\n" % (start, end))
|
|
|
|
out.append(u"%s%s" % (text, "\n\n"))
|
|
|
|
|
|
|
|
return u"".join(out)
|
|
|
|
|
|
|
|
def get_modified_content(self, format="srt", debug=False):
|
|
|
|
"""
|
2020-10-27 23:17:40 +00:00
|
|
|
:return: string
|
2018-10-31 16:08:29 +00:00
|
|
|
"""
|
|
|
|
if not self.mods:
|
2020-05-31 10:46:16 +00:00
|
|
|
return fix_text(self.content.decode(encoding=self.get_encoding()), **ftfy_defaults).encode(
|
|
|
|
encoding=self.get_encoding())
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
submods = SubtitleModifications(debug=debug)
|
|
|
|
if submods.load(content=self.text, language=self.language):
|
|
|
|
logger.info("Applying mods: %s", self.mods)
|
|
|
|
submods.modify(*self.mods)
|
|
|
|
self.mods = submods.mods_used
|
|
|
|
|
|
|
|
content = fix_text(self.pysubs2_to_unicode(submods.f, format=format), **ftfy_defaults)\
|
2020-05-31 10:46:16 +00:00
|
|
|
.encode(encoding=self.get_encoding())
|
2018-10-31 16:08:29 +00:00
|
|
|
submods.f = None
|
|
|
|
del submods
|
|
|
|
return content
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
class ModifiedSubtitle(Subtitle):
|
|
|
|
id = None
|
|
|
|
|
|
|
|
|
2020-01-19 04:23:25 +00:00
|
|
|
MERGED_FORMATS = {
|
2021-08-19 07:44:23 +00:00
|
|
|
"TV": ("HDTV", "SDTV", "AHDTV", "Ultra HDTV"),
|
|
|
|
"Air": ("SATRip", "DVB", "PPV", "Digital TV"),
|
|
|
|
"Disk-HD": ("HD-DVD", "Blu-ray", "Ultra HD Blu-ray"),
|
2020-10-27 23:17:40 +00:00
|
|
|
"Disk-SD": ("DVD", "VHS"),
|
2021-06-10 20:00:42 +00:00
|
|
|
"Web": ("Web",),
|
2020-01-19 04:23:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MERGED_FORMATS_REV = dict((v.lower(), k.lower()) for k in MERGED_FORMATS for v in MERGED_FORMATS[k])
|
|
|
|
|
2021-05-15 12:11:03 +00:00
|
|
|
def _has_match(video, guess, key) -> bool:
|
|
|
|
value = getattr(video, key)
|
2021-06-10 20:00:42 +00:00
|
|
|
guess_value = guess.get(key)
|
|
|
|
|
|
|
|
# To avoid extra debug calls
|
|
|
|
if guess_value is None or value is None:
|
2021-05-15 12:11:03 +00:00
|
|
|
return False
|
|
|
|
|
|
|
|
if isinstance(guess_value, list):
|
2021-06-10 20:00:42 +00:00
|
|
|
matched = any(value == item for item in guess_value)
|
|
|
|
else:
|
|
|
|
matched = value == guess_value
|
|
|
|
|
|
|
|
logger.debug("%s matched? %s (%s -> %s)", key, matched, value, guess_value)
|
|
|
|
|
|
|
|
return matched
|
2021-05-15 12:11:03 +00:00
|
|
|
|
|
|
|
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
def guess_matches(video, guess, partial=False):
|
|
|
|
"""Get matches between a `video` and a `guess`.
|
|
|
|
|
|
|
|
If a guess is `partial`, the absence information won't be counted as a match.
|
|
|
|
|
|
|
|
Patch: add multiple release group and formats handling
|
|
|
|
|
|
|
|
:param video: the video.
|
|
|
|
:type video: :class:`~subliminal.video.Video`
|
|
|
|
:param guess: the guess.
|
|
|
|
:type guess: dict
|
|
|
|
:param bool partial: whether or not the guess is partial.
|
|
|
|
:return: matches between the `video` and the `guess`.
|
|
|
|
:rtype: set
|
|
|
|
|
|
|
|
"""
|
|
|
|
matches = set()
|
|
|
|
if isinstance(video, Episode):
|
|
|
|
# series
|
2019-04-28 04:02:12 +00:00
|
|
|
if video.series and 'title' in guess:
|
|
|
|
titles = guess["title"]
|
2019-09-17 02:04:27 +00:00
|
|
|
if not isinstance(titles, list):
|
2019-04-28 04:02:12 +00:00
|
|
|
titles = [titles]
|
|
|
|
|
|
|
|
for title in titles:
|
|
|
|
if sanitize(title) in (sanitize(name) for name in [video.series] + video.alternative_series):
|
|
|
|
matches.add('series')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# title
|
|
|
|
if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title):
|
|
|
|
matches.add('title')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# season
|
|
|
|
if video.season and 'season' in guess and guess['season'] == video.season:
|
|
|
|
matches.add('season')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# episode
|
|
|
|
# Currently we only have single-ep support (guessit returns a multi-ep as a list with int values)
|
|
|
|
# Most providers only support single-ep, so make sure it contains only 1 episode
|
|
|
|
# In case of multi-ep, take the lowest episode (subtitles will normally be available on lowest episode number)
|
|
|
|
if video.episode and 'episode' in guess:
|
2022-11-26 20:26:54 +00:00
|
|
|
episode = episode_guess = guess['episode']
|
|
|
|
if isinstance(episode_guess, list):
|
|
|
|
try:
|
|
|
|
episode = min([int(x) for x in episode_guess])
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
pass
|
2018-10-31 16:08:29 +00:00
|
|
|
if episode == video.episode:
|
|
|
|
matches.add('episode')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# year
|
|
|
|
if video.year and 'year' in guess and guess['year'] == video.year:
|
|
|
|
matches.add('year')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
# count "no year" as an information
|
|
|
|
if not partial and video.original_series and 'year' not in guess:
|
|
|
|
matches.add('year')
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2018-10-31 16:08:29 +00:00
|
|
|
elif isinstance(video, Movie):
|
|
|
|
# year
|
|
|
|
if video.year and 'year' in guess and guess['year'] == video.year:
|
|
|
|
matches.add('year')
|
|
|
|
# title
|
2019-04-04 15:01:37 +00:00
|
|
|
if video.title and 'title' in guess and sanitize(guess['title']) in (
|
|
|
|
sanitize(name) for name in [video.title] + video.alternative_titles):
|
2018-10-31 16:08:29 +00:00
|
|
|
matches.add('title')
|
|
|
|
|
|
|
|
# release_group
|
|
|
|
if 'release_group' in guess:
|
|
|
|
release_groups = guess["release_group"]
|
2019-09-17 02:04:27 +00:00
|
|
|
if not isinstance(release_groups, list):
|
2018-10-31 16:08:29 +00:00
|
|
|
release_groups = [release_groups]
|
|
|
|
|
|
|
|
if video.release_group:
|
|
|
|
for release_group in release_groups:
|
|
|
|
if (sanitize_release_group(release_group) in
|
|
|
|
get_equivalent_release_groups(sanitize_release_group(video.release_group))):
|
|
|
|
matches.add('release_group')
|
|
|
|
break
|
2020-05-20 15:29:39 +00:00
|
|
|
# source
|
|
|
|
if 'source' in guess:
|
|
|
|
formats = guess["source"]
|
2019-09-17 02:04:27 +00:00
|
|
|
if not isinstance(formats, list):
|
2018-10-31 16:08:29 +00:00
|
|
|
formats = [formats]
|
|
|
|
|
2020-05-20 15:29:39 +00:00
|
|
|
if video.source:
|
|
|
|
video_format = video.source.lower()
|
2020-01-19 04:23:25 +00:00
|
|
|
_video_gen_format = MERGED_FORMATS_REV.get(video_format)
|
2021-06-10 20:00:42 +00:00
|
|
|
matched = False
|
2018-10-31 16:08:29 +00:00
|
|
|
for frmt in formats:
|
2020-01-19 04:23:25 +00:00
|
|
|
_guess_gen_frmt = MERGED_FORMATS_REV.get(frmt.lower())
|
2021-06-10 20:00:42 +00:00
|
|
|
# We don't want to match a singleton
|
|
|
|
if _guess_gen_frmt is None: # If the source is not in MERGED_FORMATS
|
|
|
|
_guess_gen_frmt = guess["source"]
|
2018-10-31 16:08:29 +00:00
|
|
|
|
2020-01-19 04:23:25 +00:00
|
|
|
if _guess_gen_frmt == _video_gen_format:
|
2021-06-10 20:00:42 +00:00
|
|
|
matched = True
|
2020-05-20 15:29:39 +00:00
|
|
|
matches.add('source')
|
2018-10-31 16:08:29 +00:00
|
|
|
break
|
2021-06-10 20:00:42 +00:00
|
|
|
|
|
|
|
logger.debug("Source match found? %s: %s -> %s", matched, video.source, formats)
|
|
|
|
|
2020-05-20 15:29:39 +00:00
|
|
|
if "release_group" in matches and "source" not in matches:
|
2021-06-10 20:00:42 +00:00
|
|
|
logger.info("Release group matched but source didn't. Removing release group match.")
|
2020-01-19 04:23:25 +00:00
|
|
|
matches.remove("release_group")
|
|
|
|
|
2021-06-10 20:00:42 +00:00
|
|
|
guess.update({"resolution": guess.get("screen_size")})
|
|
|
|
|
|
|
|
# Solve match keys for potential lists
|
|
|
|
for key in ("video_codec", "audio_codec", "edition", "streaming_service", "resolution"):
|
|
|
|
if _has_match(video, guess, key):
|
|
|
|
matches.add(key)
|
2020-01-19 04:23:25 +00:00
|
|
|
|
2022-11-19 20:38:37 +00:00
|
|
|
for key in ("streaming_service", "edition", "other"):
|
2022-11-05 05:01:37 +00:00
|
|
|
if _check_optional(video, guess, key):
|
|
|
|
matches.add(key)
|
2018-10-31 16:08:29 +00:00
|
|
|
|
|
|
|
return matches
|
2022-11-05 05:01:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _check_optional(video, guess, key="edition"):
|
|
|
|
guess_optional = guess.get(key)
|
|
|
|
video_optional = getattr(video, key, None)
|
|
|
|
|
|
|
|
if video_optional and guess_optional:
|
|
|
|
return _has_match(video, guess, key)
|
|
|
|
|
|
|
|
if not video_optional and not guess_optional:
|
|
|
|
logger.debug("Both video and guess don't have %s. Returning True", key)
|
|
|
|
return True
|
|
|
|
|
|
|
|
logger.debug("One item doesn't have %s (%s -> %s). Returning False", key, guess_optional, video_optional)
|
|
|
|
return False
|