Merge development into master

This commit is contained in:
github-actions[bot] 2022-12-31 16:37:03 +00:00 committed by GitHub
commit 0c7e422297
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 290 additions and 449 deletions

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Execute - name: Execute
uses: benc-uk/workflow-dispatch@v1 uses: benc-uk/workflow-dispatch@v121
with: with:
workflow: "release_beta_to_dev" workflow: "release_beta_to_dev"
token: ${{ secrets.WF_GITHUB_TOKEN }} token: ${{ secrets.WF_GITHUB_TOKEN }}

View File

@ -35,6 +35,9 @@ class WebHooksPlex(Resource):
args = self.post_request_parser.parse_args() args = self.post_request_parser.parse_args()
json_webhook = args.get('payload') json_webhook = args.get('payload')
parsed_json_webhook = json.loads(json_webhook) parsed_json_webhook = json.loads(json_webhook)
if 'Guid' not in parsed_json_webhook['Metadata']:
logging.debug('No GUID provided in Plex json payload. Probably a pre-roll video.')
return "No GUID found in JSON request body", 200
event = parsed_json_webhook['event'] event = parsed_json_webhook['event']
if event not in ['media.play']: if event not in ['media.play']:

View File

@ -78,7 +78,8 @@ defaults = {
'wanted_search_frequency_movie': '3', 'wanted_search_frequency_movie': '3',
'subzero_mods': '[]', 'subzero_mods': '[]',
'dont_notify_manual_actions': 'False', 'dont_notify_manual_actions': 'False',
'hi_extension': 'hi' 'hi_extension': 'hi',
'embedded_subtitles_parser': 'ffprobe'
}, },
'auth': { 'auth': {
'type': 'None', 'type': 'None',
@ -297,6 +298,11 @@ settings.general.base_url = base_url_slash_cleaner(uri=settings.general.base_url
settings.sonarr.base_url = base_url_slash_cleaner(uri=settings.sonarr.base_url) settings.sonarr.base_url = base_url_slash_cleaner(uri=settings.sonarr.base_url)
settings.radarr.base_url = base_url_slash_cleaner(uri=settings.radarr.base_url) settings.radarr.base_url = base_url_slash_cleaner(uri=settings.radarr.base_url)
# fixing issue with improper page_size value
if settings.general.page_size not in ['25', '50', '100', '250', '500', '1000']:
settings.general.page_size = defaults['general']['page_size']
# save updated settings to file
if os.path.exists(os.path.join(args.config_dir, 'config', 'config.ini')): if os.path.exists(os.path.join(args.config_dir, 'config', 'config.ini')):
with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
settings.write(handle) settings.write(handle)

View File

@ -287,7 +287,8 @@ def provider_throttle(name, exception):
logging.info("Throttling %s for %s, until %s, because of: %s. Exception info: %r", name, logging.info("Throttling %s for %s, until %s, because of: %s. Exception info: %r", name,
throttle_description, throttle_until.strftime("%y/%m/%d %H:%M"), cls_name, exception.args[0] throttle_description, throttle_until.strftime("%y/%m/%d %H:%M"), cls_name, exception.args[0]
if exception.args else None) if exception.args else None)
update_throttled_provider()
update_throttled_provider()
def throttled_count(name): def throttled_count(name):

View File

@ -10,8 +10,9 @@ import time
import rarfile import rarfile
from dogpile.cache.region import register_backend as register_cache_backend from dogpile.cache.region import register_backend as register_cache_backend
from subliminal_patch.extensions import provider_registry
from app.config import settings, configure_captcha_func from app.config import settings, configure_captcha_func, get_array_from
from app.get_args import args from app.get_args import args
from app.logger import configure_logging from app.logger import configure_logging
from utilities.binaries import get_binary, BinaryNotFound from utilities.binaries import get_binary, BinaryNotFound
@ -193,6 +194,14 @@ with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini')
settings.write(handle) settings.write(handle)
# Remove deprecated providers from enabled providers in config.ini
existing_providers = provider_registry.names()
enabled_providers = get_array_from(settings.general.enabled_providers)
settings.general.enabled_providers = str([x for x in enabled_providers if x in existing_providers])
with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
settings.write(handle)
def init_binaries(): def init_binaries():
try: try:
exe = get_binary("unar") exe = get_binary("unar")

View File

@ -174,7 +174,10 @@ def check_missing_languages(path, media_type):
.get_or_none() .get_or_none()
if not confirmed_missing_subs: if not confirmed_missing_subs:
return None reversed_path = path_mappings.path_replace_reverse(path) if media_type == 'series' else \
path_mappings.path_replace_reverse_movie(path)
logging.debug(f"BAZARR no media with this path have been found in database: {reversed_path}")
return []
languages = [] languages = []
for language in ast.literal_eval(confirmed_missing_subs['missing_subtitles']): for language in ast.literal_eval(confirmed_missing_subs['missing_subtitles']):

View File

@ -32,38 +32,45 @@ def refine_from_ffprobe(path, video):
data = parse_video_metadata(file=path, file_size=file_id['file_size'], data = parse_video_metadata(file=path, file_size=file_id['file_size'],
episode_file_id=file_id['episode_file_id']) episode_file_id=file_id['episode_file_id'])
if not data['ffprobe']: if 'ffprobe' not in data and 'mediainfo' not in data:
logging.debug("No FFprobe available in cache for this file: {}".format(path)) logging.debug("No cache available for this file: {}".format(path))
return video return video
logging.debug('FFprobe found: %s', data['ffprobe']) if data['ffprobe']:
logging.debug('FFprobe found: %s', data['ffprobe'])
if 'video' not in data['ffprobe']: parser_data = data['ffprobe']
logging.debug('BAZARR FFprobe was unable to find video tracks in the file!') elif data['mediainfo']:
logging.debug('Mediainfo found: %s', data['mediainfo'])
parser_data = data['mediainfo']
else: else:
if 'resolution' in data['ffprobe']['video'][0]: parser_data = {}
if 'video' not in parser_data:
logging.debug('BAZARR parser was unable to find video tracks in the file!')
else:
if 'resolution' in parser_data['video'][0]:
if not video.resolution: if not video.resolution:
video.resolution = data['ffprobe']['video'][0]['resolution'] video.resolution = parser_data['video'][0]['resolution']
if 'codec' in data['ffprobe']['video'][0]: if 'codec' in parser_data['video'][0]:
if not video.video_codec: if not video.video_codec:
video.video_codec = data['ffprobe']['video'][0]['codec'] video.video_codec = parser_data['video'][0]['codec']
if 'frame_rate' in data['ffprobe']['video'][0]: if 'frame_rate' in parser_data['video'][0]:
if not video.fps: if not video.fps:
if isinstance(data['ffprobe']['video'][0]['frame_rate'], float): if isinstance(parser_data['video'][0]['frame_rate'], float):
video.fps = data['ffprobe']['video'][0]['frame_rate'] video.fps = parser_data['video'][0]['frame_rate']
else: else:
try: try:
video.fps = data['ffprobe']['video'][0]['frame_rate'].magnitude video.fps = parser_data['video'][0]['frame_rate'].magnitude
except AttributeError: except AttributeError:
video.fps = data['ffprobe']['video'][0]['frame_rate'] video.fps = parser_data['video'][0]['frame_rate']
if 'audio' not in data['ffprobe']: if 'audio' not in parser_data:
logging.debug('BAZARR FFprobe was unable to find audio tracks in the file!') logging.debug('BAZARR parser was unable to find audio tracks in the file!')
else: else:
if 'codec' in data['ffprobe']['audio'][0]: if 'codec' in parser_data['audio'][0]:
if not video.audio_codec: if not video.audio_codec:
video.audio_codec = data['ffprobe']['audio'][0]['codec'] video.audio_codec = parser_data['audio'][0]['codec']
for track in data['ffprobe']['audio']: for track in parser_data['audio']:
if 'language' in track: if 'language' in track:
video.audio_languages.add(track['language'].alpha3) video.audio_languages.add(track['language'].alpha3)

View File

@ -1,16 +1,14 @@
# coding=utf-8 # coding=utf-8
import logging import logging
import os
import pickle import pickle
import enzyme
from knowit.api import know from knowit.api import know
from enzyme.exceptions import MalformedMKVError
from languages.custom_lang import CustomLanguage from languages.custom_lang import CustomLanguage
from app.database import TableEpisodes, TableMovies from app.database import TableEpisodes, TableMovies
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
from app.config import settings
def _handle_alpha3(detected_language: dict): def _handle_alpha3(detected_language: dict):
@ -46,20 +44,23 @@ def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=No
codec = detected_language.get("format") # or None codec = detected_language.get("format") # or None
subtitles_list.append([language, forced, hearing_impaired, codec]) subtitles_list.append([language, forced, hearing_impaired, codec])
elif data["enzyme"]: elif 'mediainfo' in data and data["mediainfo"] and "subtitle" in data["mediainfo"]:
for subtitle_track in data["enzyme"].subtitle_tracks: for detected_language in data["mediainfo"]["subtitle"]:
hearing_impaired = ( if "language" not in detected_language:
subtitle_track.name and "sdh" in subtitle_track.name.lower() continue
)
subtitles_list.append( # Avoid commentary subtitles
[ name = detected_language.get("name", "").lower()
subtitle_track.language, if "commentary" in name:
subtitle_track.forced, logging.debug("Ignoring commentary subtitle: %s", name)
hearing_impaired, continue
subtitle_track.codec_id,
] language = _handle_alpha3(detected_language)
)
forced = detected_language.get("forced", False)
hearing_impaired = detected_language.get("hearing_impaired", False)
codec = detected_language.get("format") # or None
subtitles_list.append([language, forced, hearing_impaired, codec])
return subtitles_list return subtitles_list
@ -68,11 +69,13 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
# Define default data keys value # Define default data keys value
data = { data = {
"ffprobe": {}, "ffprobe": {},
"enzyme": {}, "mediainfo": {},
"file_id": episode_file_id or movie_file_id, "file_id": episode_file_id or movie_file_id,
"file_size": file_size, "file_size": file_size,
} }
embedded_subs_parser = settings.general.embedded_subtitles_parser
if use_cache: if use_cache:
# Get the actual cache value form database # Get the actual cache value form database
if episode_file_id: if episode_file_id:
@ -95,31 +98,38 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
except Exception: except Exception:
pass pass
else: else:
# Check if file size and file id matches and if so, we return the cached value # Check if file size and file id matches and if so, we return the cached value if available for the
# desired parser
if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]: if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]:
return cached_value if embedded_subs_parser in cached_value and cached_value[embedded_subs_parser]:
return cached_value
else:
# no valid cache
pass
else:
# cache mut be renewed
pass
# if not, we retrieve the metadata from the file # if not, we retrieve the metadata from the file
from utilities.binaries import get_binary from utilities.binaries import get_binary
ffprobe_path = get_binary("ffprobe") ffprobe_path = mediainfo_path = None
if embedded_subs_parser == 'ffprobe':
ffprobe_path = get_binary("ffprobe")
elif embedded_subs_parser == 'mediainfo':
mediainfo_path = get_binary("mediainfo")
# if we have ffprobe available # if we have ffprobe available
if ffprobe_path: if ffprobe_path:
data["ffprobe"] = know(video_path=file, context={"provider": "ffmpeg", "ffmpeg": ffprobe_path}) data["ffprobe"] = know(video_path=file, context={"provider": "ffmpeg", "ffmpeg": ffprobe_path})
# if not, we use enzyme for mkv files # or if we have mediainfo available
elif mediainfo_path:
data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo", "mediainfo": mediainfo_path})
# else, we warn user of missing binary
else: else:
if os.path.splitext(file)[1] == ".mkv": logging.error("BAZARR require ffmpeg/ffprobe or mediainfo, please install it and make sure to choose it in "
with open(file, "rb") as f: "Settings-->Subtitles.")
try: return
mkv = enzyme.MKV(f)
except MalformedMKVError:
logging.error(
"BAZARR cannot analyze this MKV with our built-in MKV parser, you should install "
"ffmpeg/ffprobe: " + file
)
else:
data["enzyme"] = mkv
# we write to db the result and return the newly cached ffprobe dict # we write to db the result and return the newly cached ffprobe dict
if episode_file_id: if episode_file_id:

View File

@ -43,7 +43,7 @@
"eslint": "^8.26.0", "eslint": "^8.26.0",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-hooks": "^4.6.0",
"husky": "^8.0.0", "husky": "^8.0.2",
"jsdom": "^20.0.1", "jsdom": "^20.0.1",
"lodash": "^4.17.0", "lodash": "^4.17.0",
"moment": "^2.29", "moment": "^2.29",
@ -6307,9 +6307,9 @@
} }
}, },
"node_modules/husky": { "node_modules/husky": {
"version": "8.0.1", "version": "8.0.2",
"resolved": "https://registry.npmjs.org/husky/-/husky-8.0.1.tgz", "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.2.tgz",
"integrity": "sha512-xs7/chUH/CKdOCs7Zy0Aev9e/dKOMZf3K1Az1nar3tzlv0jfqnYtu235bstsWTmXOR0EfINrPa97yy4Lz6RiKw==", "integrity": "sha512-Tkv80jtvbnkK3mYWxPZePGFpQ/tT3HNSs/sasF9P2YfkMezDl3ON37YN6jUUI4eTg5LcyVynlb6r4eyvOmspvg==",
"dev": true, "dev": true,
"bin": { "bin": {
"husky": "lib/bin.js" "husky": "lib/bin.js"
@ -14406,9 +14406,9 @@
"dev": true "dev": true
}, },
"husky": { "husky": {
"version": "8.0.1", "version": "8.0.2",
"resolved": "https://registry.npmjs.org/husky/-/husky-8.0.1.tgz", "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.2.tgz",
"integrity": "sha512-xs7/chUH/CKdOCs7Zy0Aev9e/dKOMZf3K1Az1nar3tzlv0jfqnYtu235bstsWTmXOR0EfINrPa97yy4Lz6RiKw==", "integrity": "sha512-Tkv80jtvbnkK3mYWxPZePGFpQ/tT3HNSs/sasF9P2YfkMezDl3ON37YN6jUUI4eTg5LcyVynlb6r4eyvOmspvg==",
"dev": true "dev": true
}, },
"iconv-lite": { "iconv-lite": {

View File

@ -47,7 +47,7 @@
"eslint": "^8.26.0", "eslint": "^8.26.0",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-hooks": "^4.6.0",
"husky": "^8.0.0", "husky": "^8.0.2",
"jsdom": "^20.0.1", "jsdom": "^20.0.1",
"lodash": "^4.17.0", "lodash": "^4.17.0",
"moment": "^2.29", "moment": "^2.29",

View File

@ -14,12 +14,16 @@ import {
} from "@mantine/core"; } from "@mantine/core";
import { useForm } from "@mantine/form"; import { useForm } from "@mantine/form";
import { isObject } from "lodash"; import { isObject } from "lodash";
import { FunctionComponent, useMemo } from "react"; import { FunctionComponent, useCallback, useMemo } from "react";
import { useMutation } from "react-query"; import { useMutation } from "react-query";
import { Card } from "../components"; import { Card } from "../components";
import { notificationsKey } from "../keys"; import { notificationsKey } from "../keys";
import { useSettingValue, useUpdateArray } from "../utilities/hooks"; import { useSettingValue, useUpdateArray } from "../utilities/hooks";
const notificationHook = (notifications: Settings.NotificationInfo[]) => {
return notifications.map((info) => JSON.stringify(info));
};
interface Props { interface Props {
selections: readonly Settings.NotificationInfo[]; selections: readonly Settings.NotificationInfo[];
payload: Settings.NotificationInfo | null; payload: Settings.NotificationInfo | null;
@ -122,6 +126,13 @@ export const NotificationView: FunctionComponent = () => {
"name" "name"
); );
const updateWrapper = useCallback(
(info: Settings.NotificationInfo) => {
update(info, notificationHook);
},
[update]
);
const modals = useModals(); const modals = useModals();
const elements = useMemo(() => { const elements = useMemo(() => {
@ -135,12 +146,12 @@ export const NotificationView: FunctionComponent = () => {
modals.openContextModal(NotificationModal, { modals.openContextModal(NotificationModal, {
payload, payload,
selections: notifications, selections: notifications,
onComplete: update, onComplete: updateWrapper,
}) })
} }
></Card> ></Card>
)); ));
}, [modals, notifications, update]); }, [modals, notifications, updateWrapper]);
return ( return (
<SimpleGrid cols={3}> <SimpleGrid cols={3}>
@ -151,7 +162,7 @@ export const NotificationView: FunctionComponent = () => {
modals.openContextModal(NotificationModal, { modals.openContextModal(NotificationModal, {
payload: null, payload: null,
selections: notifications ?? [], selections: notifications ?? [],
onComplete: update, onComplete: updateWrapper,
}) })
} }
></Card> ></Card>

View File

@ -78,12 +78,13 @@ const SettingsSchedulerView: FunctionComponent = () => {
</CollapseBox> </CollapseBox>
<Check <Check
label="Use cached ffprobe results" label="Use cached embedded subtitles parser results"
settingKey="settings-sonarr-use_ffprobe_cache" settingKey="settings-sonarr-use_ffprobe_cache"
></Check> ></Check>
<Message> <Message>
If disabled, Bazarr will use ffprobe to index video file properties on If disabled, Bazarr will use the embedded subtitles parser to index
each run. This will result in higher disk I/O. episodes file properties on each run. This will result in higher disk
I/O.
</Message> </Message>
<Selector <Selector
@ -114,12 +115,12 @@ const SettingsSchedulerView: FunctionComponent = () => {
</CollapseBox> </CollapseBox>
<Check <Check
label="Use cached ffprobe results" label="Use cached embedded subtitles parser results"
settingKey="settings-radarr-use_ffprobe_cache" settingKey="settings-radarr-use_ffprobe_cache"
></Check> ></Check>
<Message> <Message>
If disabled, Bazarr will use ffprobe to index video file properties on If disabled, Bazarr will use embedded subtitles parser to index movies
each run. This will result in higher disk I/O. file properties on each run. This will result in higher disk I/O.
</Message> </Message>
</Section> </Section>
<Section header="Search and Upgrade Subtitles"> <Section header="Search and Upgrade Subtitles">

View File

@ -20,6 +20,7 @@ import {
adaptiveSearchingDeltaOption, adaptiveSearchingDeltaOption,
antiCaptchaOption, antiCaptchaOption,
colorOptions, colorOptions,
embeddedSubtitlesParserOption,
folderOptions, folderOptions,
hiExtensionOptions, hiExtensionOptions,
} from "./options"; } from "./options";
@ -278,6 +279,14 @@ const SettingsSubtitlesView: FunctionComponent = () => {
Hide embedded subtitles for languages that are not currently Hide embedded subtitles for languages that are not currently
desired. desired.
</Message> </Message>
<Selector
settingKey="settings-general-embedded_subtitles_parser"
settingOptions={{
onSaved: (v) => (v === undefined ? "ffprobe" : v),
}}
options={embeddedSubtitlesParserOption}
></Selector>
<Message>Embedded subtitles video parser</Message>
</CollapseBox> </CollapseBox>
</Section> </Section>
<Section header="Post-Processing"> <Section header="Post-Processing">

View File

@ -41,6 +41,18 @@ export const antiCaptchaOption: SelectorOption<string>[] = [
}, },
]; ];
export const embeddedSubtitlesParserOption: SelectorOption<string>[] = [
{
label: "ffprobe (faster)",
value: "ffprobe",
},
{
label:
"mediainfo (slower but may give better results. Must be already installed)",
value: "mediainfo",
},
];
export const adaptiveSearchingDelayOption: SelectorOption<string>[] = [ export const adaptiveSearchingDelayOption: SelectorOption<string>[] = [
{ {
label: "1 week", label: "1 week",

View File

@ -57,7 +57,7 @@ export function useFormActions() {
} }
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
type HookType = (value: any) => unknown; export type HookType = (value: any) => unknown;
export type FormKey = keyof FormValues; export type FormKey = keyof FormValues;
export type FormValues = { export type FormValues = {

View File

@ -1,7 +1,11 @@
import { LOG } from "@/utilities/console"; import { LOG } from "@/utilities/console";
import { get, isNull, isUndefined, uniqBy } from "lodash"; import { get, isNull, isUndefined, uniqBy } from "lodash";
import { useCallback, useMemo, useRef } from "react"; import { useCallback, useMemo, useRef } from "react";
import { useFormActions, useStagedValues } from "../utilities/FormValues"; import {
HookType,
useFormActions,
useStagedValues,
} from "../utilities/FormValues";
import { useSettings } from "../utilities/SettingsProvider"; import { useSettings } from "../utilities/SettingsProvider";
export interface BaseInput<T> { export interface BaseInput<T> {
@ -94,9 +98,9 @@ export function useUpdateArray<T>(key: string, compare: keyof T) {
}, [key, stagedValue]); }, [key, stagedValue]);
return useCallback( return useCallback(
(v: T) => { (v: T, hook?: HookType) => {
const newArray = uniqBy([v, ...staged], compareRef.current); const newArray = uniqBy([v, ...staged], compareRef.current);
setValue(newArray, key); setValue(newArray, key, hook);
}, },
[staged, setValue, key] [staged, setValue, key]
); );

View File

@ -4,4 +4,4 @@
from .container import FFprobeVideoContainer from .container import FFprobeVideoContainer
from .stream import FFprobeSubtitleStream from .stream import FFprobeSubtitleStream
__version__ = "0.2.5" __version__ = "0.2.6"

View File

@ -41,8 +41,7 @@ class FFprobeSubtitleStream:
) )
self.disposition = FFprobeSubtitleDisposition(stream.get("disposition", {})) self.disposition = FFprobeSubtitleDisposition(stream.get("disposition", {}))
if stream.get("tags") is not None: self.disposition.update_from_tags(stream.get("tags", {}) or {})
self.disposition.update_from_tags(stream["tags"])
def convert_args(self, convert_format, outfile): def convert_args(self, convert_format, outfile):
""" """

View File

@ -9,6 +9,7 @@ from urllib.parse import quote
from urllib.parse import parse_qs from urllib.parse import parse_qs
from requests.exceptions import HTTPError from requests.exceptions import HTTPError
import rarfile import rarfile
from bs4 import FeatureNotFound
from guessit import guessit from guessit import guessit
from requests.exceptions import RequestException from requests.exceptions import RequestException
@ -204,6 +205,10 @@ class LegendasdivxProvider(Provider):
raise IPAddressBlocked("LegendasDivx.pt :: Your IP is blocked on this server.") raise IPAddressBlocked("LegendasDivx.pt :: Your IP is blocked on this server.")
logger.error("Legendasdivx.pt :: HTTP Error %s", e) logger.error("Legendasdivx.pt :: HTTP Error %s", e)
raise TooManyRequests("Legendasdivx.pt :: HTTP Error %s", e) raise TooManyRequests("Legendasdivx.pt :: HTTP Error %s", e)
except FeatureNotFound:
logger.error("LegendasDivx.pt :: lxml Python module isn't installed. Make sure to install requirements.")
raise ConfigurationError("LegendasDivx.pt :: lxml Python module isn't installed. Make sure to install "
"requirements.")
except Exception as e: except Exception as e:
logger.error("LegendasDivx.pt :: Uncaught error: %r", e) logger.error("LegendasDivx.pt :: Uncaught error: %r", e)
raise ServiceUnavailable("LegendasDivx.pt :: Uncaught error: %r", e) raise ServiceUnavailable("LegendasDivx.pt :: Uncaught error: %r", e)

View File

@ -1,289 +0,0 @@
# coding=utf-8
from __future__ import absolute_import
import logging
import rarfile
import os
from subliminal.exceptions import ConfigurationError
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guessit, sanitize, region, type_map, \
raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \
SUBTITLE_EXTENSIONS, language_converters, ServiceUnavailable
from requests.exceptions import RequestException
from subliminal_patch.providers import reinitialize_on_error
from subliminal_patch.subtitle import guess_matches
from subzero.language import Language
logger = logging.getLogger(__name__)
class LegendasTVSubtitle(_LegendasTVSubtitle):
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, type, title, year, imdb_id, season, archive, name)
self.archive.content = None
self.release_info = name.rstrip('.srt').split('/')[-1]
self.page_link = archive.link
def make_picklable(self):
self.archive.content = None
return self
def get_matches(self, video, hearing_impaired=False):
matches = set()
# episode
if isinstance(video, Episode) and self.type == 'episode':
# series
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type}))
return matches
class LegendasTVProvider(_LegendasTVProvider):
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
video_types = (Episode, Movie)
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=None, featured_only=False):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.tool_setup()
except rarfile.RarCannotExec:
raise ConfigurationError('RAR extraction tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
self.featured_only = featured_only
@staticmethod
def is_valid_title(title, title_id, sanitized_title, season, year, imdb_id):
"""Check if is a valid title."""
if title["imdb_id"] and title["imdb_id"] == imdb_id:
logger.debug(u'Matched title "%s" as IMDB ID %s', sanitized_title, title["imdb_id"])
return True
if title["title2"] and sanitize(title['title2']) == sanitized_title:
logger.debug(u'Matched title "%s" as "%s"', sanitized_title, title["title2"])
return True
return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, titles, season, title_year, imdb_id):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str titles: the titles to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles_found = {}
for title in titles:
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles_found[title_id] = title
logger.debug('Found %d titles', len(titles_found))
return titles_found
@reinitialize_on_error((RequestException, ServiceUnavailable), attempts=1)
def query(self, language, titles, season=None, episode=None, year=None, imdb_id=None):
# search for titles
titles_found = self.search_titles(titles, season, year, imdb_id)
subtitles = []
# iterate over titles
for title_id, t in titles_found.items():
# Skip episodes or movies if it's not what was requested
if (season and t['type'] == 'movie') or (not season and t['type'] == 'episode'):
continue
# Skip if season isn't matching
if season and season != t.get('season'):
continue
# Skip if season wasn't provided (not an episode) but one is returned by provider (wrong type)
if not season and t.get('season'):
continue
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episode)
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# Check if featured
if self.featured_only and a.featured == False:
logger.info('Subtitle is not featured, skipping')
continue
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = str(a.id + "|" + a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
imdb = video.series_imdb_id
else:
titles = [video.title] + video.alternative_titles
imdb = video.imdb_id
subtitles = [s for l in languages for s in
self.query(l, titles, season=season, episode=episode, year=video.year, imdb_id=imdb)]
if subtitles:
return subtitles
else:
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)
subtitle.archive.content = None
def get_archives(self, title_id, language_code, title_type, season, episode):
return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type,
season, episode)

View File

@ -454,7 +454,17 @@ def checked(fn, raise_api_limit=False, validate_token=False, validate_json=False
elif status_code == 403: elif status_code == 403:
raise ProviderError("Bazarr API key seems to be in problem") raise ProviderError("Bazarr API key seems to be in problem")
elif status_code == 406: elif status_code == 406:
raise DownloadLimitExceeded("Daily download limit reached") try:
json_response = response.json()
download_count = json_response['requests']
remaining_download = json_response['remaining']
quota_reset_time = json_response['reset_time']
except JSONDecodeError:
raise ProviderError('Invalid JSON returned by provider')
else:
raise DownloadLimitExceeded(f"Daily download limit reached. {download_count} subtitles have been "
f"downloaded and {remaining_download} remaining subtitles can be "
f"downloaded. Quota will be reset in {quota_reset_time}.")
elif status_code == 410: elif status_code == 410:
raise ProviderError("Download as expired") raise ProviderError("Download as expired")
elif status_code == 429: elif status_code == 429:

View File

@ -70,7 +70,9 @@ class RegieLiveProvider(Provider):
def initialize(self): def initialize(self):
self.session = Session() self.session = Session()
self.url = 'http://api.regielive.ro/kodi/cauta.php' #self.url = 'http://api.regielive.ro/kodi/cauta.php'
# this is a proxy API/scraper for subtitrari.regielive.ro used for subtitles search only
self.url = 'http://subtitles.24-7.ro/index.php'
self.api = 'API-KODI-KINGUL' self.api = 'API-KODI-KINGUL'
self.headers = {'RL-API': self.api} self.headers = {'RL-API': self.api}

View File

@ -147,6 +147,11 @@ class Subf2mProvider(Provider):
for n in range(retry): for n in range(retry):
req = self._session.get(url, stream=True) req = self._session.get(url, stream=True)
if req.status_code == 403:
logger.debug("Access to this resource is forbidden: %s", url)
break
# Sometimes subf2m will return a 503 code. This error usually disappears # Sometimes subf2m will return a 503 code. This error usually disappears
# retrying the query # retrying the query
if req.status_code == 503: if req.status_code == 503:

View File

@ -1,28 +1,30 @@
# coding=utf-8 # coding=utf-8
from __future__ import absolute_import from __future__ import absolute_import
import os
import io
import logging import logging
import re import re
from zipfile import ZipFile, is_zipfile from subliminal.providers import ParserBeautifulSoup
from rarfile import RarFile, is_rarfile from subliminal.video import Episode
from guessit import guessit from subliminal.video import Movie
from subliminal_patch.providers import Provider from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches from subliminal_patch.providers.utils import get_archive_from_bytes
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming from subliminal_patch.providers.utils import get_subtitle_from_archive
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST from subliminal_patch.providers.utils import update_matches
from subliminal.exceptions import ProviderError from subliminal_patch.subtitle import guess_matches
from subliminal.providers import ParserBeautifulSoup from subliminal_patch.subtitle import Subtitle
from subliminal.video import Episode, Movie from subliminal_patch.utils import \
from subliminal.subtitle import SUBTITLE_EXTENSIONS fix_inconsistent_naming as _fix_inconsistent_naming
from subliminal_patch.utils import sanitize
from subzero.language import Language from subzero.language import Language
# parsing regex definitions # parsing regex definitions
title_re = re.compile(r'(?P<title>(?:.+(?= [Aa][Kk][Aa] ))|.+)(?:(?:.+)(?P<altitle>(?<= [Aa][Kk][Aa] ).+))?') title_re = re.compile(r'(?P<title>(?:.+(?= [Aa][Kk][Aa] ))|.+)(?:(?:.+)(?P<altitle>(?<= [Aa][Kk][Aa] ).+))?')
_SEASON_RE = re.compile(r"(s|(season|sezonul)\s)(?P<x>\d{1,2})", flags=re.IGNORECASE)
def fix_inconsistent_naming(title): def fix_inconsistent_naming(title):
"""Fix titles with inconsistent naming using dictionary and sanitize them. """Fix titles with inconsistent naming using dictionary and sanitize them.
@ -48,7 +50,7 @@ class SubtitrarinoiSubtitle(Subtitle):
super(SubtitrarinoiSubtitle, self).__init__(language) super(SubtitrarinoiSubtitle, self).__init__(language)
self.sid = sid self.sid = sid
self.title = title self.title = title
self.imdb_id = imdb_id self.imdb_id = (imdb_id or "").rstrip("/")
self.download_link = download_link self.download_link = download_link
self.year = year self.year = year
self.download_count = download_count self.download_count = download_count
@ -87,8 +89,7 @@ class SubtitrarinoiSubtitle(Subtitle):
if video.imdb_id and self.imdb_id == video.imdb_id: if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id') matches.add('imdb_id')
# guess match others update_matches(matches, video, self.comments)
matches |= guess_matches(video, guessit(self.comments, {"type": "movie"}))
else: else:
# title # title
@ -100,16 +101,19 @@ class SubtitrarinoiSubtitle(Subtitle):
if video.series_imdb_id and self.imdb_id == video.series_imdb_id: if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('imdb_id') matches.add('imdb_id')
# season season = _SEASON_RE.search(self.comments)
if f"Sezonul {video.season}" in self.comments: if season is not None:
matches.add('season') season = int(season.group("x"))
if season == video.season:
matches.add('season')
logger.debug("Season matched? %s [%s -> %s]", "season" in matches, video.season, self.comments)
# episode # episode
if {"imdb_id", "season"}.issubset(matches): if {"imdb_id", "season"}.issubset(matches):
matches.add('episode') matches.add('episode')
# guess match others update_matches(matches, video, self.comments)
matches |= guess_matches(video, guessit(self.comments, {"type": "episode"}))
self.matches = matches self.matches = matches
@ -277,42 +281,5 @@ class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin):
r = self.session.get(subtitle.download_link, headers={'Referer': self.api_url}, timeout=10) r = self.session.get(subtitle.download_link, headers={'Referer': self.api_url}, timeout=10)
r.raise_for_status() r.raise_for_status()
# open the archive archive = get_archive_from_bytes(r.content)
archive_stream = io.BytesIO(r.content) subtitle.content = get_subtitle_from_archive(archive, episode=subtitle.desired_episode)
if is_rarfile(archive_stream):
logger.debug('Archive identified as rar')
archive = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug('Archive identified as zip')
archive = ZipFile(archive_stream)
else:
subtitle.content = r.content
if subtitle.is_valid():
return
subtitle.content = None
raise ProviderError('Unidentified archive type')
if subtitle.is_episode:
subtitle.content = self._get_subtitle_from_archive(subtitle, archive)
else:
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
@staticmethod
def _get_subtitle_from_archive(subtitle, archive):
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
_guess = guessit(name)
if subtitle.desired_episode == _guess['episode']:
return archive.read(name)
return None
# vim: set expandtab ts=4 sw=4:

View File

@ -5,7 +5,7 @@ import logging
import re import re
import zipfile import zipfile
from random import randint from random import randint
from urllib.parse import urlparse, parse_qs, quote from urllib.parse import urljoin, urlparse, parse_qs, quote
import rarfile import rarfile
from guessit import guessit from guessit import guessit
@ -179,7 +179,7 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
# If the response is a redirect and doesnt point to an error message page, then we are logged in # If the response is a redirect and doesnt point to an error message page, then we are logged in
if res.status_code == 302 and location_qs['msg_type'][0] == 'i': if res.status_code == 302 and location_qs['msg_type'][0] == 'i':
if 'omezené' in location_qs['msg'][0]: if 'omezené' in location_qs['msg'][0].lower():
raise AuthenticationError("V.I.P. account is required for this provider to work!") raise AuthenticationError("V.I.P. account is required for this provider to work!")
else: else:
logger.info("Titulky.com: Successfully logged in, caching cookies for future connections...") logger.info("Titulky.com: Successfully logged in, caching cookies for future connections...")
@ -203,35 +203,44 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
cache.delete('titulky_user_agent') cache.delete('titulky_user_agent')
# If the response is a redirect and doesnt point to an error message page, then we are logged out # If the response is a redirect and doesnt point to an error message page, then we are logged out
if res.status_code == 302 and location_qs['msg_type'][0] == 'i': if res.is_redirect and location_qs['msg_type'][0] == 'i':
return True return True
else: else:
raise AuthenticationError("Logout failed.") raise AuthenticationError("Logout failed.")
# GET request a page. This functions acts as a requests.session.get proxy handling expired cached cookies # GET request a page. This functions acts as a requests.session.get proxy handling expired cached cookies
# and subsequent relogging and sending the original request again. If all went well, returns the response. # and subsequent relogging and sending the original request again. If all went well, returns the response.
# Additionally handle allow_redirects by ourselves to follow redirects UNLESS they are redirecting to an
# error page. In such case we would like to know what has happend and act accordingly.
def get_request(self, url, ref=server_url, allow_redirects=False, _recursion=0): def get_request(self, url, ref=server_url, allow_redirects=False, _recursion=0):
# That's deep... recursion... Stop. We don't have infinite memmory. And don't want to # That's deep... recursion... Stop. We don't have infinite memmory. And don't want to
# spam titulky's server either. So we have to just accept the defeat. Let it throw! # spam titulky's server either. So we have to just accept the defeat. Let it throw!
if _recursion >= 5: if _recursion >= 10:
raise AuthenticationError("Got into a loop and couldn't get authenticated!") raise AuthenticationError("Got into a redirect loop! Oops.")
logger.debug(f"Titulky.com: Fetching url: {url}") logger.debug(f"Titulky.com: Fetching url: {url}")
res = self.session.get( res = self.session.get(
url, url,
timeout=self.timeout, timeout=self.timeout,
allow_redirects=allow_redirects, allow_redirects=False,
headers={'Referer': quote(ref) if ref else None}) # URL encode ref if it has value headers={'Referer': quote(ref) if ref else None}) # URL encode ref if it has value
# Check if we got redirected because login cookies expired. if res.is_redirect:
# Note: microoptimization - don't bother parsing qs for non 302 responses. # Dont bother doing anything if we do not want to redirect. Just return the original response..
if res.status_code == 302: if allow_redirects is False:
return res
location_qs = parse_qs(urlparse(res.headers['Location']).query) location_qs = parse_qs(urlparse(res.headers['Location']).query)
if location_qs['msg_type'][0] == 'e' and "Přihlašte se" in location_qs['msg'][0]: # If the msg_type query parameter does NOT equal to 'e' or is absent, follow the URL in the Location header.
if allow_redirects is True and ('msg_type' not in location_qs or ('msg_type' in location_qs and location_qs['msg_type'][0] != 'e')):
return self.get_request(urljoin(res.headers['Origin'] or self.server_url, res.headers['Location']), ref=url, allow_redirects=True, _recursion=(_recursion + 1))
# Check if we got redirected because login cookies expired.
if "přihlašte" in location_qs['msg'][0].lower():
logger.info(f"Titulky.com: Login cookies expired.") logger.info(f"Titulky.com: Login cookies expired.")
self.login(True) self.login(True)
return self.get_request(url, ref=ref, _recursion=(_recursion + 1)) return self.get_request(url, ref=ref, allow_redirects=True, _recursion=(_recursion + 1))
return res return res

View File

@ -135,11 +135,14 @@ class WizdomProvider(Provider):
# search # search
logger.debug('Using IMDB ID %r', imdb_id) logger.debug('Using IMDB ID %r', imdb_id)
url = 'https://{}/api/releases/{}'.format(self.server_url, imdb_id) url = 'https://{}/api/releases/{}'.format(self.server_url, imdb_id)
page_link = 'http://{}/#/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id) page_link = 'http://{}/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id)
# get the list of subtitles # get the list of subtitles
logger.debug('Getting the list of subtitles') logger.debug('Getting the list of subtitles')
r = self.session.get(url) r = self.session.get(url)
if r.status_code == 500:
logger.debug(f'No subtitles found for imdb id {imdb_id}')
return []
r.raise_for_status() r.raise_for_status()
try: try:
results = r.json() results = r.json()
@ -199,7 +202,7 @@ class WizdomProvider(Provider):
def download_subtitle(self, subtitle): def download_subtitle(self, subtitle):
# download # download
url = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle.subtitle_id) url = 'http://{}/api/files/sub/{}'.format(self.server_url, subtitle.subtitle_id)
r = self.session.get(url, headers={'Referer': subtitle.page_link}, timeout=10) r = self.session.get(url, headers={'Referer': subtitle.page_link}, timeout=10)
r.raise_for_status() r.raise_for_status()

View File

@ -29,7 +29,7 @@ class YifySubtitle(Subtitle):
super(YifySubtitle, self).__init__(language) super(YifySubtitle, self).__init__(language)
self.page_link = page_link self.page_link = page_link
self.hearing_impaired = hi self.hearing_impaired = hi
self.release_info = release self.release_info = release.replace('\n', ', ')
self.uploader = uploader self.uploader = uploader
self.rating = rating self.rating = rating
@ -116,8 +116,8 @@ class YifySubtitlesProvider(Provider):
td = row.findAll('td') td = row.findAll('td')
rating = int(td[0].text) rating = int(td[0].text)
sub_lang = td[1].text sub_lang = td[1].text
release = re.sub(r'^subtitle ', '', td[2].text) release = re.sub(r'^\nsubtitle ', '', td[2].text)
page_link = server_url + td[2].find('a').get('href') page_link = td[2].find('a').get('href')
hi = True if td[3].find('span', {'class': 'hi-subtitle'}) else False hi = True if td[3].find('span', {'class': 'hi-subtitle'}) else False
uploader = td[4].text uploader = td[4].text

View File

@ -7,7 +7,6 @@ attrs==22.1.0
charamel==1.0.0 charamel==1.0.0
deep-translator==1.9.1 deep-translator==1.9.1
dogpile.cache==1.1.8 dogpile.cache==1.1.8
enzyme==0.4.1
fese==0.1.2 fese==0.1.2
ffsubsync==0.4.20 ffsubsync==0.4.20
flask-cors==3.0.10 flask-cors==3.0.10
@ -110,6 +109,7 @@ cloudscraper==1.2.58
#deathbycaptcha # unknown version, only found on gist #deathbycaptcha # unknown version, only found on gist
decorator==5.1.1 decorator==5.1.1
dnspython==2.2.1 dnspython==2.2.1
enzyme==0.4.1
ftfy==6.1.1 ftfy==6.1.1
html5lib==1.1 html5lib==1.1
Js2Py==0.74 Js2Py==0.74

View File

@ -0,0 +1,54 @@
import pytest
from subliminal_patch.providers.subtitrarinoi import SubtitrarinoiProvider
from subliminal_patch.providers.subtitrarinoi import SubtitrarinoiSubtitle
from subzero.language import Language
romanian = Language("ron")
def test_list_subtitles(episodes):
episode = episodes["breaking_bad_s01e01"]
with SubtitrarinoiProvider() as provider:
assert provider.list_subtitles(episode, [romanian])
@pytest.fixture
def subtitrari_subtitle():
yield SubtitrarinoiSubtitle(
romanian,
"https://www.subtitrari-noi.ro/7493-subtitrari noi.ro\ ",
3,
"Sezonul 1 ep. 1-7 Sincronizari si pentru variantele HDTV x264 (Sincro atty)",
"Breaking Bad",
"tt0903747/",
"Alice",
"https://www.subtitrari-noi.ro/index.php?page=movie_details&act=1&id=7493",
2008,
4230,
True,
1,
)
@pytest.mark.parametrize("comment", ["season 01", "Sezonul 1 ep. 1-7", "S01"])
def test_subtitle_get_matches_episode(subtitrari_subtitle, episodes, comment):
episode = episodes["breaking_bad_s01e01"]
episode.episode = 1
subtitrari_subtitle.comments = comment
assert {"season", "episode", "series", "imdb_id"}.issubset(
subtitrari_subtitle.get_matches(episode)
)
@pytest.mark.parametrize("comment", ["season 02", "Sezonul 2 ep. 1-7", "version 01"])
def test_subtitle_get_matches_episode_false(subtitrari_subtitle, episodes, comment):
episode = episodes["breaking_bad_s01e01"]
episode.episode = 1
subtitrari_subtitle.comments = comment
assert not {"season", "episode"}.issubset(subtitrari_subtitle.get_matches(episode))
def test_provider_download_subtitle(subtitrari_subtitle):
with SubtitrarinoiProvider() as provider:
provider.download_subtitle(subtitrari_subtitle)
assert subtitrari_subtitle.is_valid()