bazarr/libs/subliminal_patch/providers/zimuku.py

430 lines
15 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
2019-09-17 02:04:27 +00:00
from __future__ import absolute_import
import io
import logging
import os
import zipfile
2020-03-25 06:06:06 +00:00
import re
import copy
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import rarfile
2021-02-15 12:29:22 +00:00
from babelfish import language_converters
from subzero.language import Language
from guessit import guessit
from requests import Session
from six import text_type
from random import randint
2021-02-15 12:29:22 +00:00
from subliminal.providers import ParserBeautifulSoup
from subliminal_patch.providers import Provider
2020-03-25 06:06:06 +00:00
from subliminal.subtitle import (
SUBTITLE_EXTENSIONS,
2021-02-15 12:29:22 +00:00
fix_line_ending
)
2021-02-15 12:29:22 +00:00
from subliminal_patch.subtitle import (
2020-03-25 06:06:06 +00:00
Subtitle,
2021-02-15 12:29:22 +00:00
guess_matches
2020-03-25 06:06:06 +00:00
)
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from subliminal.video import Episode, Movie
logger = logging.getLogger(__name__)
2021-02-15 12:29:22 +00:00
language_converters.register('zimuku = subliminal_patch.converters.zimuku:zimukuConverter')
supported_languages = list(language_converters['zimuku'].to_zimuku.keys())
class ZimukuSubtitle(Subtitle):
"""Zimuku Subtitle."""
2020-03-25 06:06:06 +00:00
provider_name = "zimuku"
2020-07-06 22:34:52 +00:00
def __init__(self, language, page_link, version, session, year):
super(ZimukuSubtitle, self).__init__(language, page_link=page_link)
self.version = version
2020-07-06 22:34:52 +00:00
self.release_info = version
2020-03-25 06:06:06 +00:00
self.hearing_impaired = False
self.encoding = "utf-8"
self.session = session
2020-07-06 22:34:52 +00:00
self.year = year
@property
def id(self):
2020-07-06 22:34:52 +00:00
return self.page_link
def get_matches(self, video):
matches = set()
2020-07-06 22:34:52 +00:00
if video.year == self.year:
matches.add('year')
# episode
if isinstance(video, Episode):
2020-03-25 06:06:06 +00:00
info = guessit(self.version, {"type": "episode"})
# other properties
matches |= guess_matches(video, info)
2020-09-22 23:50:02 +00:00
# add year to matches if video doesn't have a year but series, season and episode are matched
if not video.year and all(item in matches for item in ['series', 'season', 'episode']):
matches |= {'year'}
# movie
elif isinstance(video, Movie):
# other properties
matches |= guess_matches(video, guessit(self.version, {"type": "movie"}))
return matches
class ZimukuProvider(Provider):
"""Zimuku Provider."""
2021-02-15 12:29:22 +00:00
languages = {Language(*l) for l in supported_languages}
video_types = (Episode, Movie)
2021-02-15 12:29:22 +00:00
logger.info(str(supported_languages))
2020-03-25 06:06:06 +00:00
2021-05-31 16:06:50 +00:00
server_url = "http://zimuku.org"
search_url = "/search?q={}&vertoken={}"
2021-05-31 16:06:50 +00:00
download_url = "http://zimuku.org/"
2020-03-25 06:06:06 +00:00
subtitle_class = ZimukuSubtitle
def __init__(self):
self.session = None
def stringToHex(self, s):
val = ""
for i in s:
val += hex(ord(i))[2:]
return val
vertoken = ""
location_re = re.compile(
r'self\.location = "(.*)" \+ stringToHex\(screendate\)')
def yunsuo_bypass(self, url, *args, **kwargs):
i = -1
while True:
i += 1
r = self.session.get(url, *args, **kwargs)
if(r.status_code == 404):
tr = self.location_re.findall(r.text)
self.session.cookies.set("srcurl", self.stringToHex(r.url))
if(tr):
verify_resp = self.session.get(
self.server_url+tr[0]+self.stringToHex("1080,1920"), allow_redirects=False)
if(verify_resp.status_code == 302 and self.session.cookies.get("security_session_verify") != None):
pass
continue
if len(self.location_re.findall(r.text)) == 0:
if(r.headers.get("Content-Type") == "text/html; charset=utf-8"):
v = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["html.parser"]
).find(
"input", attrs={'name': 'vertoken'})
if(v):
self.vertoken = v.get("value")
return r
def initialize(self):
self.session = Session()
self.session.headers["User-Agent"] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
def terminate(self):
self.session.close()
2020-07-06 22:34:52 +00:00
def _parse_episode_page(self, link, year):
r = self.yunsuo_bypass(link)
2020-03-25 06:06:06 +00:00
bs_obj = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["html.parser"]
)
subs_body = bs_obj.find("div", class_="subs box clearfix").find("tbody")
subs = []
2020-03-25 06:06:06 +00:00
for sub in subs_body.find_all("tr"):
a = sub.find("a")
name = _extract_name(a.text)
name = os.path.splitext(name)[
0
] # remove ext because it can be an archive type
language = Language("eng")
2020-03-25 06:06:06 +00:00
for img in sub.find("td", class_="tac lang").find_all("img"):
if (
2021-02-15 12:29:22 +00:00
"china" in img.attrs["src"]
and "hongkong" in img.attrs["src"]
):
2021-02-15 12:29:22 +00:00
language = Language("zho").add(Language('zho', 'TW', None))
logger.debug("language:"+str(language))
elif (
2021-02-15 12:29:22 +00:00
"china" in img.attrs["src"]
or "jollyroger" in img.attrs["src"]
):
language = Language("zho")
2021-02-15 12:29:22 +00:00
elif "hongkong" in img.attrs["src"]:
language = Language('zho', 'TW', None)
break
2020-03-25 06:06:06 +00:00
sub_page_link = urljoin(self.server_url, a.attrs["href"])
backup_session = copy.deepcopy(self.session)
backup_session.headers["Referer"] = link
subs.append(
2020-07-06 22:34:52 +00:00
self.subtitle_class(language, sub_page_link, name, backup_session, year)
2020-03-25 06:06:06 +00:00
)
return subs
def query(self, keyword, season=None, episode=None, year=None):
if self.vertoken == "":
self.yunsuo_bypass(self.server_url + '/')
params = keyword
2020-03-25 06:06:06 +00:00
if season:
params += ".S{season:02d}".format(season=season)
elif year:
2020-03-25 06:06:06 +00:00
params += " {:4d}".format(year)
2020-03-25 06:06:06 +00:00
logger.debug("Searching subtitles %r", params)
subtitles = []
search_link = self.server_url + text_type(self.search_url).format(params, self.vertoken)
r = self.yunsuo_bypass(search_link, timeout=30)
r.raise_for_status()
if not r.content:
2020-03-25 06:06:06 +00:00
logger.debug("No data returned from provider")
return []
2020-08-02 07:17:29 +00:00
html = r.content.decode("utf-8", "ignore")
# parse window location
pattern = r"url\s*=\s*'([^']*)'\s*\+\s*url"
parts = re.findall(pattern, html)
redirect_url = search_link
while parts:
parts.reverse()
redirect_url = urljoin(self.server_url, "".join(parts))
r = self.query_resp(redirect_url, timeout=30)
2020-08-02 07:17:29 +00:00
html = r.content.decode("utf-8", "ignore")
parts = re.findall(pattern, html)
logger.debug("search url located: " + redirect_url)
2020-03-25 06:06:06 +00:00
soup = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["lxml", "html.parser"]
)
# non-shooter result page
if soup.find("div", {"class": "item"}):
logger.debug("enter a non-shooter page")
for item in soup.find_all("div", {"class": "item"}):
title_a = item.find("p", class_="tt clearfix").find("a")
2020-08-02 07:17:29 +00:00
subs_year = year
2020-03-25 06:06:06 +00:00
if season:
2020-08-02 07:17:29 +00:00
# episode year in zimuku is the season's year not show's year
actual_subs_year = re.findall(r"\d{4}", title_a.text) or None
if actual_subs_year:
subs_year = int(actual_subs_year[0]) - season + 1
2020-03-25 06:06:06 +00:00
title = title_a.text
season_cn1 = re.search("第(.*)季", title)
if not season_cn1:
season_cn1 = ""
else:
season_cn1 = season_cn1.group(1).strip()
season_cn2 = num_to_cn(str(season))
if season_cn1 != season_cn2:
continue
episode_link = self.server_url + title_a.attrs["href"]
2020-07-06 22:34:52 +00:00
new_subs = self._parse_episode_page(episode_link, subs_year)
2020-03-25 06:06:06 +00:00
subtitles += new_subs
2021-02-15 12:29:22 +00:00
# NOTE: shooter result pages are ignored due to the existence of zimuku provider
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
elif isinstance(video, Movie):
titles = [video.title] + video.alternative_titles
else:
titles = []
subtitles = []
# query for subtitles with the show_id
for title in titles:
if isinstance(video, Episode):
2020-03-25 06:06:06 +00:00
subtitles += [
s
for s in self.query(
title,
season=video.season,
episode=video.episode,
year=video.year,
)
if s.language in languages
]
elif isinstance(video, Movie):
2020-03-25 06:06:06 +00:00
subtitles += [
s
for s in self.query(title, year=video.year)
if s.language in languages
]
return subtitles
def download_subtitle(self, subtitle):
def _get_archive_dowload_link(yunsuopass, sub_page_link):
r = yunsuopass(sub_page_link)
2020-03-25 06:06:06 +00:00
bs_obj = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["html.parser"]
)
down_page_link = bs_obj.find("a", {"id": "down1"}).attrs["href"]
down_page_link = urljoin(sub_page_link, down_page_link)
r = yunsuopass(down_page_link)
2020-03-25 06:06:06 +00:00
bs_obj = ParserBeautifulSoup(
r.content.decode("utf-8", "ignore"), ["html.parser"]
)
download_link = bs_obj.find("a", {"rel": "nofollow"})
download_link = download_link.attrs["href"]
download_link = urljoin(sub_page_link, download_link)
return download_link
# download the subtitle
logger.info("Downloading subtitle %r", subtitle)
self.session = subtitle.session
download_link = _get_archive_dowload_link(self.yunsuo_bypass, subtitle.page_link)
r = self.yunsuo_bypass(download_link, headers={'Referer': subtitle.page_link}, timeout=30)
2020-03-25 06:06:06 +00:00
r.raise_for_status()
try:
filename = r.headers["Content-Disposition"]
except KeyError:
logger.debug("Unable to parse subtitles filename. Dropping this subtitles.")
return
2020-03-25 06:06:06 +00:00
if not r.content:
logger.debug("Unable to download subtitle. No data returned from provider")
return
archive_stream = io.BytesIO(r.content)
archive = None
if rarfile.is_rarfile(archive_stream):
logger.debug("Identified rar archive")
if ".rar" not in filename:
logger.debug(
".rar should be in the downloaded file name: {}".format(filename)
)
return
archive = rarfile.RarFile(archive_stream)
subtitle_content = _get_subtitle_from_archive(archive)
elif zipfile.is_zipfile(archive_stream):
logger.debug("Identified zip archive")
if ".zip" not in filename:
logger.debug(
".zip should be in the downloaded file name: {}".format(filename)
)
return
archive = zipfile.ZipFile(archive_stream)
subtitle_content = _get_subtitle_from_archive(archive)
else:
is_sub = ""
for sub_ext in SUBTITLE_EXTENSIONS:
if sub_ext in filename:
is_sub = sub_ext
break
2020-03-25 06:06:06 +00:00
if not is_sub:
logger.debug(
"unknown subtitle ext int downloaded file name: {}".format(filename)
)
return
logger.debug("Identified {} file".format(is_sub))
subtitle_content = r.content
2020-03-25 06:06:06 +00:00
if subtitle_content:
subtitle.content = fix_line_ending(subtitle_content)
else:
logger.debug("Could not extract subtitle from %r", archive)
def _get_subtitle_from_archive(archive):
2020-03-25 06:06:06 +00:00
extract_subname, max_score = "", -1
for subname in archive.namelist():
# discard hidden files
2020-03-25 06:06:06 +00:00
if os.path.split(subname)[-1].startswith("."):
continue
# discard non-subtitle files
2020-03-25 06:06:06 +00:00
if not subname.lower().endswith(SUBTITLE_EXTENSIONS):
continue
2021-02-15 12:29:22 +00:00
# prefer ass/ssa/srt subtitles with double languages or simplified/traditional chinese
score = ("ass" in subname or "ssa" in subname or "srt" in subname) * 1
2020-03-25 06:06:06 +00:00
if "简体" in subname or "chs" in subname or ".gb." in subname:
score += 2
if "繁体" in subname or "cht" in subname or ".big5." in subname:
score += 2
2021-02-15 12:29:22 +00:00
if "chs.eng" in subname or "chs&eng" in subname or "cht.eng" in subname or "cht&eng" in subname:
score += 2
if "中英" in subname or "简英" in subname or "繁英" in subname or "双语" in subname or "简体&英文" in subname or "繁体&英文" in subname:
2020-03-25 06:06:06 +00:00
score += 4
logger.debug("subtitle {}, score: {}".format(subname, score))
if score > max_score:
max_score = score
extract_subname = subname
return archive.read(extract_subname) if max_score != -1 else None
def _extract_name(name):
""" filter out Chinese characters from subtitle names """
name, suffix = os.path.splitext(name)
c_pattern = "[\u4e00-\u9fff]"
e_pattern = "[a-zA-Z]"
c_indices = [m.start(0) for m in re.finditer(c_pattern, name)]
e_indices = [m.start(0) for m in re.finditer(e_pattern, name)]
target, discard = e_indices, c_indices
if len(target) == 0:
return ""
first_target, last_target = target[0], target[-1]
first_discard = discard[0] if discard else -1
last_discard = discard[-1] if discard else -1
if last_discard < first_target:
new_name = name[first_target:]
elif last_target < first_discard:
new_name = name[:first_discard]
else:
# try to find maximum continous part
result, start, end = [0, 1], -1, 0
while end < len(name):
while end not in e_indices and end < len(name):
end += 1
if end == len(name):
break
start = end
while end not in c_indices and end < len(name):
end += 1
if end - start > result[1] - result[0]:
result = [start, end]
start = end
end += 1
new_name = name[result[0] : result[1]]
new_name = new_name.strip() + suffix
return new_name
def num_to_cn(number):
""" convert numbers(1-99) to Chinese """
assert number.isdigit() and 1 <= int(number) <= 99
trans_map = {n: c for n, c in zip(("123456789"), ("一二三四五六七八九"))}
if len(number) == 1:
return trans_map[number]
else:
part1 = "" if number[0] == "1" else trans_map[number[0]] + ""
part2 = trans_map[number[1]] if number[1] != "0" else ""
return part1 + part2