bazarr/libs/subliminal_patch/providers/subdivx.py

290 lines
8.8 KiB
Python
Raw Normal View History

2019-05-25 15:40:52 +00:00
# -*- coding: utf-8 -*-
2019-09-17 02:04:27 +00:00
from __future__ import absolute_import
2019-05-25 15:40:52 +00:00
import logging
2021-03-28 18:32:21 +00:00
import re
2019-05-25 15:40:52 +00:00
import time
from subzero.language import Language
from requests import Session
from subliminal import __short_version__
from subliminal.providers import ParserBeautifulSoup
2019-05-25 15:40:52 +00:00
from subliminal.video import Episode, Movie
from subliminal_patch.exceptions import APIThrottled
2019-09-17 02:04:27 +00:00
from six.moves import range
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.providers import Provider
from subliminal_patch.providers.utils import get_archive_from_bytes
from subliminal_patch.providers.utils import get_subtitle_from_archive
from subliminal_patch.providers.utils import update_matches
2019-05-25 15:40:52 +00:00
_SERVER_URL = "https://www.subdivx.com"
2021-03-28 18:32:21 +00:00
_CLEAN_TITLE_RES = [
2021-03-28 18:32:21 +00:00
(r"subt[ií]tulos de", ""),
(r"´|`", "'"),
(r" {2,}", " "),
]
2021-12-30 19:41:16 +00:00
_YEAR_RE = re.compile(r"(\(\d{4}\))")
2019-05-25 15:40:52 +00:00
logger = logging.getLogger(__name__)
class SubdivxSubtitle(Subtitle):
2021-01-25 21:18:32 +00:00
provider_name = "subdivx"
2019-05-25 15:40:52 +00:00
hash_verifiable = False
def __init__(
self, language, video, page_link, title, description, uploader, download_url
):
2021-01-25 21:18:32 +00:00
super(SubdivxSubtitle, self).__init__(
language, hearing_impaired=False, page_link=page_link
)
self.video = video
2019-05-25 15:40:52 +00:00
self.title = title
self.download_url = download_url
self.description = description
self.uploader = uploader
self.release_info = self.title
if self.description and self.description.strip():
2021-01-25 21:18:32 +00:00
self.release_info += " | " + self.description
2019-05-25 15:40:52 +00:00
@property
def id(self):
2019-09-21 12:09:11 +00:00
return self.page_link
2019-05-25 15:40:52 +00:00
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
2019-05-25 17:35:13 +00:00
# already matched in search query
2021-01-25 21:18:32 +00:00
matches.update(["title", "series", "season", "episode", "year"])
2019-05-25 15:40:52 +00:00
# movie
elif isinstance(video, Movie):
2019-05-25 17:35:13 +00:00
# already matched in search query
2021-01-25 21:18:32 +00:00
matches.update(["title", "year"])
update_matches(matches, video, self.description)
2019-05-25 15:40:52 +00:00
# Don't lowercase; otherwise it will match a lot of false positives
if video.release_group and video.release_group in self.description:
matches.add("release_group")
2019-05-25 15:40:52 +00:00
return matches
_IDUSER_COOKIE = "VkZaRk9WQlJQVDA12809"
2019-05-25 15:40:52 +00:00
class SubdivxSubtitlesProvider(Provider):
2021-01-25 21:18:32 +00:00
provider_name = "subdivx"
2019-05-25 15:40:52 +00:00
hash_verifiable = False
languages = {Language("spa", "MX")} | {Language.fromalpha2("es")}
video_types = (Episode, Movie)
2019-05-25 15:40:52 +00:00
subtitle_class = SubdivxSubtitle
multi_result_throttle = 2
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
2021-01-25 21:18:32 +00:00
self.session.headers["User-Agent"] = f"Subliminal/{__short_version__}"
self.session.cookies.update({"iduser_cookie": _IDUSER_COOKIE})
2019-05-25 15:40:52 +00:00
def terminate(self):
self.session.close()
def query(self, video, languages):
subtitles = []
if isinstance(video, Episode):
for query in (
f"{video.series} S{video.season:02}E{video.episode:02}",
f"{video.series} S{video.season:02}",
):
subtitles += self._handle_multi_page_search(query, video)
# Fallback
if not subtitles:
subtitles += self._handle_multi_page_search(video.series, video)
else:
# Subdvix has problems searching foreign movies if the year is
2021-01-25 21:18:32 +00:00
# appended. A proper solution would be filtering results with the
# year in self._parse_subtitles_page.
subtitles += self._handle_multi_page_search(video.title, video)
return subtitles
2019-05-25 15:40:52 +00:00
def _handle_multi_page_search(self, query, video, max_loops=3):
2019-05-25 15:40:52 +00:00
params = {
"buscar2": query,
"accion": "5",
"masdesc": "",
"subtitulos": "1",
"realiza_b": "1",
"pg": "1",
2019-05-25 15:40:52 +00:00
}
logger.debug("Query: %s", query)
2019-05-25 15:40:52 +00:00
loops = 1
max_loops_not_met = True
2019-05-25 15:40:52 +00:00
while max_loops_not_met:
loops += 1
max_loops_not_met = loops < max_loops
2019-05-25 15:40:52 +00:00
page_subtitles = self._get_page_subtitles(params, video)
logger.debug("Yielding %d subtitles", len(page_subtitles))
yield from page_subtitles
2019-05-25 15:40:52 +00:00
if len(page_subtitles) < 100:
break # this is the last page
2021-01-25 21:18:32 +00:00
params["pg"] += 1 # search next page
time.sleep(self.multi_result_throttle)
2019-05-25 15:40:52 +00:00
def _get_page_subtitles(self, params, video):
search_link = f"{_SERVER_URL}/index.php"
response = self.session.get(
search_link, params=params, allow_redirects=True, timeout=20
)
try:
page_subtitles = self._parse_subtitles_page(video, response)
except Exception as error:
logger.error(f"Error parsing subtitles list: {error}")
return []
return page_subtitles
2019-05-25 15:40:52 +00:00
def list_subtitles(self, video, languages):
return self.query(video, languages)
2019-05-25 15:40:52 +00:00
def download_subtitle(self, subtitle):
# download the subtitle
logger.info("Downloading subtitle %r", subtitle)
2019-09-21 12:09:11 +00:00
# download zip / rar file with the subtitle
response = self.session.get(
subtitle.download_url,
headers={"Referer": subtitle.page_link},
timeout=30,
)
response.raise_for_status()
2019-05-25 15:40:52 +00:00
# TODO: add MustGetBlacklisted support
archive = get_archive_from_bytes(response.content)
if archive is None:
raise APIThrottled("Unknwon compressed format")
episode = None
if isinstance(subtitle.video, Episode):
episode = subtitle.video.episode
2019-05-25 15:40:52 +00:00
subtitle.content = get_subtitle_from_archive(archive, episode=episode)
def _parse_subtitles_page(self, video, response):
subtitles = []
2021-01-25 21:18:32 +00:00
page_soup = ParserBeautifulSoup(
response.content.decode("utf-8", "ignore"), ["lxml", "html.parser"]
)
title_soups = page_soup.find_all("div", {"id": "menu_detalle_buscador"})
body_soups = page_soup.find_all("div", {"id": "buscador_detalle"})
2021-12-30 19:41:16 +00:00
episode = isinstance(video, Episode)
for subtitle in range(0, len(title_soups)):
title_soup, body_soup = title_soups[subtitle], body_soups[subtitle]
# title
2021-12-30 19:41:16 +00:00
title = _clean_title(title_soup.find("a").text)
# Forced subtitles are not supported
if title.lower().rstrip().endswith(("forzado", "forzados")):
logger.debug("Skipping forced subtitles: %s", title)
continue
# Check movie title (if the video is a movie)
if not episode and not _check_movie(video, title):
continue
# Data
datos = body_soup.find("div", {"id": "buscador_detalle_sub_datos"}).text
# Ignore multi-disc and non-srt subtitles
if not any(item in datos for item in ("Cds:</b> 1", "SubRip")):
continue
spain = "/pais/7.gif" in datos
language = Language.fromalpha2("es") if spain else Language("spa", "MX")
2019-05-25 15:40:52 +00:00
# description
sub_details = body_soup.find("div", {"id": "buscador_detalle_sub"}).text
description = sub_details.replace(",", " ")
# uploader
2021-01-25 21:18:32 +00:00
uploader = body_soup.find("a", {"class": "link1"}).text
download_url = _get_download_url(body_soup)
page_link = title_soup.find("a")["href"]
2019-05-25 15:40:52 +00:00
2021-01-25 21:18:32 +00:00
subtitle = self.subtitle_class(
language, video, page_link, title, description, uploader, download_url
2021-01-25 21:18:32 +00:00
)
2021-01-25 21:18:32 +00:00
logger.debug("Found subtitle %r", subtitle)
subtitles.append(subtitle)
return subtitles
2020-10-03 23:41:41 +00:00
def _clean_title(title):
"""
Normalize apostrophes and spaces to avoid matching problems
(e.g. Subtitulos de Carlito´s Way -> Carlito's Way)
"""
for og, new in _CLEAN_TITLE_RES:
title = re.sub(og, new, title, flags=re.IGNORECASE)
return title
def _get_download_url(data):
try:
return [
a_.get("href")
for a_ in data.find_all("a")
if "bajar.php" in a_.get("href", "n/a")
][0]
except IndexError:
return None
2021-12-30 19:41:16 +00:00
def _check_movie(video, title):
if str(video.year) not in title:
return False
aka_split = re.split("aka", title, flags=re.IGNORECASE)
alt_title = None
if len(aka_split) == 2:
alt_title = aka_split[-1].strip()
try:
actual_movie_title = _YEAR_RE.split(title)[0].strip()
except IndexError:
return False
all_titles = [
v_title.lower() for v_title in [video.title, *video.alternative_titles]
]
return (
actual_movie_title.lower() in all_titles
or (alt_title or "").lower() in all_titles
)