From e6cab17a2942360cac30784775db2f4fd16afaa6 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Mon, 5 Dec 2022 09:58:22 -0500 Subject: [PATCH 01/25] Fixed some edge case where no media could be found in db with a specific path. #2003 --- bazarr/subtitles/download.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bazarr/subtitles/download.py b/bazarr/subtitles/download.py index f5a4b1edd..bc871db22 100644 --- a/bazarr/subtitles/download.py +++ b/bazarr/subtitles/download.py @@ -174,7 +174,10 @@ def check_missing_languages(path, media_type): .get_or_none() if not confirmed_missing_subs: - return None + reversed_path = path_mappings.path_replace_reverse(path) if media_type == 'series' else \ + path_mappings.path_replace_reverse_movie(path) + logging.debug(f"BAZARR no media with this path have been found in database: {reversed_path}") + return [] languages = [] for language in ast.literal_eval(confirmed_missing_subs['missing_subtitles']): From cc42e7bc5c8011a544bb5968b67b56347d0cb224 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Wed, 7 Dec 2022 06:38:43 -0500 Subject: [PATCH 02/25] Fixed improper page_size value that prevent series and movies page from loading. #1995 --- bazarr/app/config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bazarr/app/config.py b/bazarr/app/config.py index ce4d0e3a5..faee369e1 100644 --- a/bazarr/app/config.py +++ b/bazarr/app/config.py @@ -297,6 +297,11 @@ settings.general.base_url = base_url_slash_cleaner(uri=settings.general.base_url settings.sonarr.base_url = base_url_slash_cleaner(uri=settings.sonarr.base_url) settings.radarr.base_url = base_url_slash_cleaner(uri=settings.radarr.base_url) +# fixing issue with improper page_size value +if settings.general.page_size not in ['25', '50', '100', '250', '500', '1000']: + settings.general.page_size = defaults['general']['page_size'] + +# save updated settings to file if os.path.exists(os.path.join(args.config_dir, 'config', 'config.ini')): with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: settings.write(handle) From 6a17a7fecd9130a9f8ad477e14293ad002973ea1 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Sat, 10 Dec 2022 07:31:47 -0500 Subject: [PATCH 03/25] no log: fixed throttled providers count update of UI --- bazarr/app/get_providers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bazarr/app/get_providers.py b/bazarr/app/get_providers.py index 9535f3b94..72842dc36 100644 --- a/bazarr/app/get_providers.py +++ b/bazarr/app/get_providers.py @@ -287,7 +287,8 @@ def provider_throttle(name, exception): logging.info("Throttling %s for %s, until %s, because of: %s. Exception info: %r", name, throttle_description, throttle_until.strftime("%y/%m/%d %H:%M"), cls_name, exception.args[0] if exception.args else None) - update_throttled_provider() + + update_throttled_provider() def throttled_count(name): From 979301dee01241028ca1c5e9e30f7cf6fb6cf451 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Mon, 12 Dec 2022 06:33:31 -0500 Subject: [PATCH 04/25] Updated opensubtitles.com to give more information to the download limit reached exception. --- libs/subliminal_patch/providers/opensubtitlescom.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libs/subliminal_patch/providers/opensubtitlescom.py b/libs/subliminal_patch/providers/opensubtitlescom.py index ca8d90193..532998c0d 100644 --- a/libs/subliminal_patch/providers/opensubtitlescom.py +++ b/libs/subliminal_patch/providers/opensubtitlescom.py @@ -454,7 +454,17 @@ def checked(fn, raise_api_limit=False, validate_token=False, validate_json=False elif status_code == 403: raise ProviderError("Bazarr API key seems to be in problem") elif status_code == 406: - raise DownloadLimitExceeded("Daily download limit reached") + try: + json_response = response.json() + download_count = json_response['requests'] + remaining_download = json_response['remaining'] + quota_reset_time = json_response['reset_time'] + except JSONDecodeError: + raise ProviderError('Invalid JSON returned by provider') + else: + raise DownloadLimitExceeded(f"Daily download limit reached. {download_count} subtitles have been " + f"downloaded and {remaining_download} remaining subtitles can be " + f"downloaded. Quota will be reset in {quota_reset_time}.") elif status_code == 410: raise ProviderError("Download as expired") elif status_code == 429: From f3f276985e0912214051466372fdf0261359ba53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 01:47:44 +0800 Subject: [PATCH 05/25] no log: Bump benc-uk/workflow-dispatch from 1 to 121 (#1991) Bumps [benc-uk/workflow-dispatch](https://github.com/benc-uk/workflow-dispatch) from 1 to 121. - [Release notes](https://github.com/benc-uk/workflow-dispatch/releases) - [Commits](https://github.com/benc-uk/workflow-dispatch/compare/v1...v121) --- updated-dependencies: - dependency-name: benc-uk/workflow-dispatch dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/schedule.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/schedule.yaml b/.github/workflows/schedule.yaml index 968853834..69eddf032 100644 --- a/.github/workflows/schedule.yaml +++ b/.github/workflows/schedule.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Execute - uses: benc-uk/workflow-dispatch@v1 + uses: benc-uk/workflow-dispatch@v121 with: workflow: "release_beta_to_dev" token: ${{ secrets.WF_GITHUB_TOKEN }} From eeb4b326959adf4803ba83e647a64e992b0408bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 01:48:11 +0800 Subject: [PATCH 06/25] no log: Bump axios from 0.27.2 to 1.2.1 in /frontend (#2004) Bumps [axios](https://github.com/axios/axios) from 0.27.2 to 1.2.1. - [Release notes](https://github.com/axios/axios/releases) - [Changelog](https://github.com/axios/axios/blob/v1.x/CHANGELOG.md) - [Commits](https://github.com/axios/axios/compare/v0.27.2...v1.2.1) --- updated-dependencies: - dependency-name: axios dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- frontend/package-lock.json | 34 +++++++++++++++++++++++----------- frontend/package.json | 2 +- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 10563e019..d0ef62aa3 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -15,7 +15,7 @@ "@mantine/hooks": "^5.6.0", "@mantine/modals": "^5.6.0", "@mantine/notifications": "^5.6.0", - "axios": "^0.27.2", + "axios": "^1.2.1", "react": "^17.0.2", "react-dom": "^17.0.2", "react-query": "^3.39.2", @@ -3852,12 +3852,13 @@ } }, "node_modules/axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.2.1.tgz", + "integrity": "sha512-I88cFiGu9ryt/tfVEi4kX2SITsvDddTajXTOFmt2uK1ZVA8LytjtdeyefdQWEf5PU8w+4SSJDoYnggflB5tW4A==", "dependencies": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" } }, "node_modules/axobject-query": { @@ -8221,6 +8222,11 @@ "react-is": "^16.13.1" } }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, "node_modules/psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", @@ -12636,12 +12642,13 @@ "dev": true }, "axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.2.1.tgz", + "integrity": "sha512-I88cFiGu9ryt/tfVEi4kX2SITsvDddTajXTOFmt2uK1ZVA8LytjtdeyefdQWEf5PU8w+4SSJDoYnggflB5tW4A==", "requires": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" } }, "axobject-query": { @@ -15792,6 +15799,11 @@ "react-is": "^16.13.1" } }, + "proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, "psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index 1595e1189..b8a227cd1 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -19,7 +19,7 @@ "@mantine/modals": "^5.6.0", "@mantine/notifications": "^5.6.0", "@mantine/dropzone": "^5.6.0", - "axios": "^0.27.2", + "axios": "^1.2.1", "react": "^17.0.2", "react-dom": "^17.0.2", "react-query": "^3.39.2", From 1180cb702d1076bbc3992d8c01f17f7c1cda27b9 Mon Sep 17 00:00:00 2001 From: LASER-Yi Date: Tue, 13 Dec 2022 02:10:55 +0800 Subject: [PATCH 07/25] no log: Revert "Bump axios from 0.27.2 to 1.2.1 in /frontend (#2004)" This reverts commit eeb4b326959adf4803ba83e647a64e992b0408bb. --- frontend/package-lock.json | 34 +++++++++++----------------------- frontend/package.json | 2 +- 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index d0ef62aa3..10563e019 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -15,7 +15,7 @@ "@mantine/hooks": "^5.6.0", "@mantine/modals": "^5.6.0", "@mantine/notifications": "^5.6.0", - "axios": "^1.2.1", + "axios": "^0.27.2", "react": "^17.0.2", "react-dom": "^17.0.2", "react-query": "^3.39.2", @@ -3852,13 +3852,12 @@ } }, "node_modules/axios": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.2.1.tgz", - "integrity": "sha512-I88cFiGu9ryt/tfVEi4kX2SITsvDddTajXTOFmt2uK1ZVA8LytjtdeyefdQWEf5PU8w+4SSJDoYnggflB5tW4A==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", + "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", "dependencies": { - "follow-redirects": "^1.15.0", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "follow-redirects": "^1.14.9", + "form-data": "^4.0.0" } }, "node_modules/axobject-query": { @@ -8222,11 +8221,6 @@ "react-is": "^16.13.1" } }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, "node_modules/psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", @@ -12642,13 +12636,12 @@ "dev": true }, "axios": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.2.1.tgz", - "integrity": "sha512-I88cFiGu9ryt/tfVEi4kX2SITsvDddTajXTOFmt2uK1ZVA8LytjtdeyefdQWEf5PU8w+4SSJDoYnggflB5tW4A==", + "version": "0.27.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", + "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", "requires": { - "follow-redirects": "^1.15.0", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "follow-redirects": "^1.14.9", + "form-data": "^4.0.0" } }, "axobject-query": { @@ -15799,11 +15792,6 @@ "react-is": "^16.13.1" } }, - "proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, "psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index b8a227cd1..1595e1189 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -19,7 +19,7 @@ "@mantine/modals": "^5.6.0", "@mantine/notifications": "^5.6.0", "@mantine/dropzone": "^5.6.0", - "axios": "^1.2.1", + "axios": "^0.27.2", "react": "^17.0.2", "react-dom": "^17.0.2", "react-query": "^3.39.2", From 06f0fe9972c2e6d06a54d2acee94e5f03521811e Mon Sep 17 00:00:00 2001 From: LASER-Yi Date: Tue, 13 Dec 2022 02:12:26 +0800 Subject: [PATCH 08/25] Fix Notification settings not saving after removing Discord #2005 --- .../Settings/Notifications/components.tsx | 19 +++++++++++++++---- .../pages/Settings/utilities/FormValues.ts | 2 +- .../src/pages/Settings/utilities/hooks.ts | 10 +++++++--- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/frontend/src/pages/Settings/Notifications/components.tsx b/frontend/src/pages/Settings/Notifications/components.tsx index ba18ca658..8ad33ff46 100644 --- a/frontend/src/pages/Settings/Notifications/components.tsx +++ b/frontend/src/pages/Settings/Notifications/components.tsx @@ -14,12 +14,16 @@ import { } from "@mantine/core"; import { useForm } from "@mantine/form"; import { isObject } from "lodash"; -import { FunctionComponent, useMemo } from "react"; +import { FunctionComponent, useCallback, useMemo } from "react"; import { useMutation } from "react-query"; import { Card } from "../components"; import { notificationsKey } from "../keys"; import { useSettingValue, useUpdateArray } from "../utilities/hooks"; +const notificationHook = (notifications: Settings.NotificationInfo[]) => { + return notifications.map((info) => JSON.stringify(info)); +}; + interface Props { selections: readonly Settings.NotificationInfo[]; payload: Settings.NotificationInfo | null; @@ -122,6 +126,13 @@ export const NotificationView: FunctionComponent = () => { "name" ); + const updateWrapper = useCallback( + (info: Settings.NotificationInfo) => { + update(info, notificationHook); + }, + [update] + ); + const modals = useModals(); const elements = useMemo(() => { @@ -135,12 +146,12 @@ export const NotificationView: FunctionComponent = () => { modals.openContextModal(NotificationModal, { payload, selections: notifications, - onComplete: update, + onComplete: updateWrapper, }) } > )); - }, [modals, notifications, update]); + }, [modals, notifications, updateWrapper]); return ( @@ -151,7 +162,7 @@ export const NotificationView: FunctionComponent = () => { modals.openContextModal(NotificationModal, { payload: null, selections: notifications ?? [], - onComplete: update, + onComplete: updateWrapper, }) } > diff --git a/frontend/src/pages/Settings/utilities/FormValues.ts b/frontend/src/pages/Settings/utilities/FormValues.ts index 63dc28f97..2fbe54b93 100644 --- a/frontend/src/pages/Settings/utilities/FormValues.ts +++ b/frontend/src/pages/Settings/utilities/FormValues.ts @@ -57,7 +57,7 @@ export function useFormActions() { } // eslint-disable-next-line @typescript-eslint/no-explicit-any -type HookType = (value: any) => unknown; +export type HookType = (value: any) => unknown; export type FormKey = keyof FormValues; export type FormValues = { diff --git a/frontend/src/pages/Settings/utilities/hooks.ts b/frontend/src/pages/Settings/utilities/hooks.ts index 4da28959d..b5e3e1f61 100644 --- a/frontend/src/pages/Settings/utilities/hooks.ts +++ b/frontend/src/pages/Settings/utilities/hooks.ts @@ -1,7 +1,11 @@ import { LOG } from "@/utilities/console"; import { get, isNull, isUndefined, uniqBy } from "lodash"; import { useCallback, useMemo, useRef } from "react"; -import { useFormActions, useStagedValues } from "../utilities/FormValues"; +import { + HookType, + useFormActions, + useStagedValues, +} from "../utilities/FormValues"; import { useSettings } from "../utilities/SettingsProvider"; export interface BaseInput { @@ -94,9 +98,9 @@ export function useUpdateArray(key: string, compare: keyof T) { }, [key, stagedValue]); return useCallback( - (v: T) => { + (v: T, hook?: HookType) => { const newArray = uniqBy([v, ...staged], compareRef.current); - setValue(newArray, key); + setValue(newArray, key, hook); }, [staged, setValue, key] ); From 609c5d7847878487858684c965731def377fd487 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Tue, 13 Dec 2022 07:21:30 -0500 Subject: [PATCH 09/25] Fixed wizdom provider when imdb id isn't available. #2001 --- libs/subliminal_patch/providers/wizdom.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/subliminal_patch/providers/wizdom.py b/libs/subliminal_patch/providers/wizdom.py index 9b75589db..ed5482ad9 100644 --- a/libs/subliminal_patch/providers/wizdom.py +++ b/libs/subliminal_patch/providers/wizdom.py @@ -140,6 +140,9 @@ class WizdomProvider(Provider): # get the list of subtitles logger.debug('Getting the list of subtitles') r = self.session.get(url) + if r.status_code == 500: + logger.debug(f'No subtitles found for imdb id {imdb_id}') + return [] r.raise_for_status() try: results = r.json() From 993168e901e47b5932bd0abfc784909ac14a3199 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Tue, 13 Dec 2022 09:06:06 -0500 Subject: [PATCH 10/25] Added logging to legendasdivx provider when lxml isn't installed properly. #2009 --- libs/subliminal_patch/providers/legendasdivx.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libs/subliminal_patch/providers/legendasdivx.py b/libs/subliminal_patch/providers/legendasdivx.py index 8a83d9b60..46a52ffd1 100644 --- a/libs/subliminal_patch/providers/legendasdivx.py +++ b/libs/subliminal_patch/providers/legendasdivx.py @@ -9,6 +9,7 @@ from urllib.parse import quote from urllib.parse import parse_qs from requests.exceptions import HTTPError import rarfile +from bs4 import FeatureNotFound from guessit import guessit from requests.exceptions import RequestException @@ -204,6 +205,10 @@ class LegendasdivxProvider(Provider): raise IPAddressBlocked("LegendasDivx.pt :: Your IP is blocked on this server.") logger.error("Legendasdivx.pt :: HTTP Error %s", e) raise TooManyRequests("Legendasdivx.pt :: HTTP Error %s", e) + except FeatureNotFound: + logger.error("LegendasDivx.pt :: lxml Python module isn't installed. Make sure to install requirements.") + raise ConfigurationError("LegendasDivx.pt :: lxml Python module isn't installed. Make sure to install " + "requirements.") except Exception as e: logger.error("LegendasDivx.pt :: Uncaught error: %r", e) raise ServiceUnavailable("LegendasDivx.pt :: Uncaught error: %r", e) From e6d089381285ab3e945971dad7899e17062062a6 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Tue, 13 Dec 2022 21:14:32 -0500 Subject: [PATCH 11/25] Added routine to remove deprecated providers from enabled providers in config.ini --- bazarr/app/get_providers.py | 8 ++++++++ bazarr/init.py | 5 +++++ 2 files changed, 13 insertions(+) diff --git a/bazarr/app/get_providers.py b/bazarr/app/get_providers.py index 72842dc36..ba41a0ea2 100644 --- a/bazarr/app/get_providers.py +++ b/bazarr/app/get_providers.py @@ -108,6 +108,14 @@ PROVIDERS_FORCED_OFF = ["addic7ed", "tvsubtitles", "legendasdivx", "legendastv", throttle_count = {} +def clean_enabled_providers(): + existing_providers = provider_registry.names() + enabled_providers = get_array_from(settings.general.enabled_providers) + settings.general.enabled_providers = str([x for x in enabled_providers if x in existing_providers]) + with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: + settings.write(handle) + + def provider_pool(): if settings.general.getboolean('multithreading'): return subliminal_patch.core.SZAsyncProviderPool diff --git a/bazarr/init.py b/bazarr/init.py index 64f0ba45e..b9110dfb9 100644 --- a/bazarr/init.py +++ b/bazarr/init.py @@ -14,6 +14,7 @@ from dogpile.cache.region import register_backend as register_cache_backend from app.config import settings, configure_captcha_func from app.get_args import args from app.logger import configure_logging +from app.get_providers import clean_enabled_providers from utilities.binaries import get_binary, BinaryNotFound from utilities.path_mappings import path_mappings from utilities.backup import restore_from_backup @@ -193,6 +194,10 @@ with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini') settings.write(handle) +# Remove deprecated providers from enabled providers in config.ini +clean_enabled_providers() + + def init_binaries(): try: exe = get_binary("unar") From 383d906749656491aedf333fda6fa8f50983c2bd Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Tue, 13 Dec 2022 21:16:00 -0500 Subject: [PATCH 12/25] Removed Legendastv provider since the website as been shutdown. #2012 --- libs/subliminal_patch/providers/legendastv.py | 289 ------------------ 1 file changed, 289 deletions(-) delete mode 100644 libs/subliminal_patch/providers/legendastv.py diff --git a/libs/subliminal_patch/providers/legendastv.py b/libs/subliminal_patch/providers/legendastv.py deleted file mode 100644 index 3c3476b53..000000000 --- a/libs/subliminal_patch/providers/legendastv.py +++ /dev/null @@ -1,289 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -import logging -import rarfile -import os -from subliminal.exceptions import ConfigurationError - -from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \ - LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guessit, sanitize, region, type_map, \ - raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \ - SUBTITLE_EXTENSIONS, language_converters, ServiceUnavailable - -from requests.exceptions import RequestException -from subliminal_patch.providers import reinitialize_on_error -from subliminal_patch.subtitle import guess_matches -from subzero.language import Language - -logger = logging.getLogger(__name__) - - -class LegendasTVSubtitle(_LegendasTVSubtitle): - def __init__(self, language, type, title, year, imdb_id, season, archive, name): - super(LegendasTVSubtitle, self).__init__(language, type, title, year, imdb_id, season, archive, name) - self.archive.content = None - self.release_info = name.rstrip('.srt').split('/')[-1] - self.page_link = archive.link - - def make_picklable(self): - self.archive.content = None - return self - - def get_matches(self, video, hearing_impaired=False): - matches = set() - - # episode - if isinstance(video, Episode) and self.type == 'episode': - # series - if video.series and (sanitize(self.title) in ( - sanitize(name) for name in [video.series] + video.alternative_series)): - matches.add('series') - - # year - if video.original_series and self.year is None or video.year and video.year == self.year: - matches.add('year') - - # imdb_id - if video.series_imdb_id and self.imdb_id == video.series_imdb_id: - matches.add('series_imdb_id') - - # movie - elif isinstance(video, Movie) and self.type == 'movie': - # title - if video.title and (sanitize(self.title) in ( - sanitize(name) for name in [video.title] + video.alternative_titles)): - matches.add('title') - - # year - if video.year and self.year == video.year: - matches.add('year') - - # imdb_id - if video.imdb_id and self.imdb_id == video.imdb_id: - matches.add('imdb_id') - - # name - matches |= guess_matches(video, guessit(self.name, {'type': self.type})) - - return matches - - -class LegendasTVProvider(_LegendasTVProvider): - languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()} - video_types = (Episode, Movie) - subtitle_class = LegendasTVSubtitle - - def __init__(self, username=None, password=None, featured_only=False): - - # Provider needs UNRAR installed. If not available raise ConfigurationError - try: - rarfile.tool_setup() - except rarfile.RarCannotExec: - raise ConfigurationError('RAR extraction tool not available') - - if any((username, password)) and not all((username, password)): - raise ConfigurationError('Username and password must be specified') - - self.username = username - self.password = password - self.logged_in = False - self.session = None - self.featured_only = featured_only - - @staticmethod - def is_valid_title(title, title_id, sanitized_title, season, year, imdb_id): - """Check if is a valid title.""" - if title["imdb_id"] and title["imdb_id"] == imdb_id: - logger.debug(u'Matched title "%s" as IMDB ID %s', sanitized_title, title["imdb_id"]) - return True - - if title["title2"] and sanitize(title['title2']) == sanitized_title: - logger.debug(u'Matched title "%s" as "%s"', sanitized_title, title["title2"]) - return True - - return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year) - - @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value) - def search_titles(self, titles, season, title_year, imdb_id): - """Search for titles matching the `title`. - - For episodes, each season has it own title - :param str titles: the titles to search for. - :param int season: season of the title - :param int title_year: year of the title - :return: found titles. - :rtype: dict - """ - titles_found = {} - - for title in titles: - sanitized_titles = [sanitize(title)] - ignore_characters = {'\'', '.'} - if any(c in title for c in ignore_characters): - sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters)) - - for sanitized_title in sanitized_titles: - # make the query - if season: - logger.info('Searching episode title %r for season %r', sanitized_title, season) - else: - logger.info('Searching movie title %r', sanitized_title) - - r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10) - raise_for_status(r) - results = json.loads(r.text) - - # loop over results - for result in results: - source = result['_source'] - - # extract id - title_id = int(source['id_filme']) - - # extract type - title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None} - - # extract title, year and country - name, year, country = title_re.match(source['dsc_nome']).groups() - title['title'] = name - - if "dsc_nome_br" in source: - name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups() - title['title2'] = name2 - - # extract imdb_id - if source['id_imdb'] != '0': - if not source['id_imdb'].startswith('tt'): - title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7) - else: - title['imdb_id'] = source['id_imdb'] - - # extract season - if title['type'] == 'episode': - if source['temporada'] and source['temporada'].isdigit(): - title['season'] = int(source['temporada']) - else: - match = season_re.search(source['dsc_nome_br']) - if match: - title['season'] = int(match.group('season')) - else: - logger.debug('No season detected for title %d (%s)', title_id, name) - - # extract year - if year: - title['year'] = int(year) - elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit(): - # year is based on season air date hence the adjustment - title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1 - - # add title only if is valid - # Check against title without ignored chars - if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id): - logger.debug(u'Found title: %s', title) - titles_found[title_id] = title - - logger.debug('Found %d titles', len(titles_found)) - - return titles_found - - @reinitialize_on_error((RequestException, ServiceUnavailable), attempts=1) - def query(self, language, titles, season=None, episode=None, year=None, imdb_id=None): - # search for titles - titles_found = self.search_titles(titles, season, year, imdb_id) - - subtitles = [] - # iterate over titles - for title_id, t in titles_found.items(): - # Skip episodes or movies if it's not what was requested - if (season and t['type'] == 'movie') or (not season and t['type'] == 'episode'): - continue - - # Skip if season isn't matching - if season and season != t.get('season'): - continue - - # Skip if season wasn't provided (not an episode) but one is returned by provider (wrong type) - if not season and t.get('season'): - continue - - logger.info('Getting archives for title %d and language %d', title_id, language.legendastv) - archives = self.get_archives(title_id, language.legendastv, t['type'], season, episode) - if not archives: - logger.info('No archives found for title %d and language %d', title_id, language.legendastv) - - # iterate over title's archives - for a in archives: - - # Check if featured - if self.featured_only and a.featured == False: - logger.info('Subtitle is not featured, skipping') - continue - - # compute an expiration time based on the archive timestamp - expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds() - - # attempt to get the releases from the cache - cache_key = str(a.id + "|" + a.name) - releases = region.get(cache_key, expiration_time=expiration_time) - - # the releases are not in cache or cache is expired - if releases == NO_VALUE: - logger.info('Releases not found in cache') - - # download archive - self.download_archive(a) - - # extract the releases - releases = [] - for name in a.content.namelist(): - # discard the legendastv file - if name.startswith('Legendas.tv'): - continue - - # discard hidden files - if os.path.split(name)[-1].startswith('.'): - continue - - # discard non-subtitle files - if not name.lower().endswith(SUBTITLE_EXTENSIONS): - continue - - releases.append(name) - - # cache the releases - region.set(cache_key, releases) - - # iterate over releases - for r in releases: - subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'), - t.get('season'), a, r) - logger.debug('Found subtitle %r', subtitle) - subtitles.append(subtitle) - - return subtitles - - def list_subtitles(self, video, languages): - season = episode = None - if isinstance(video, Episode): - titles = [video.series] + video.alternative_series - season = video.season - episode = video.episode - imdb = video.series_imdb_id - else: - titles = [video.title] + video.alternative_titles - imdb = video.imdb_id - - subtitles = [s for l in languages for s in - self.query(l, titles, season=season, episode=episode, year=video.year, imdb_id=imdb)] - if subtitles: - return subtitles - else: - return [] - - def download_subtitle(self, subtitle): - super(LegendasTVProvider, self).download_subtitle(subtitle) - subtitle.archive.content = None - - def get_archives(self, title_id, language_code, title_type, season, episode): - return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type, - season, episode) From 2622a0896eec60ed0647175cdc42be5a971aeab0 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Wed, 14 Dec 2022 12:56:33 -0500 Subject: [PATCH 13/25] Fixed Plex webhook trying to search for subtitles for pre-roll video. #2013 --- bazarr/api/webhooks/plex.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bazarr/api/webhooks/plex.py b/bazarr/api/webhooks/plex.py index f1b6d27a0..89cfe89aa 100644 --- a/bazarr/api/webhooks/plex.py +++ b/bazarr/api/webhooks/plex.py @@ -35,6 +35,9 @@ class WebHooksPlex(Resource): args = self.post_request_parser.parse_args() json_webhook = args.get('payload') parsed_json_webhook = json.loads(json_webhook) + if 'Guid' not in parsed_json_webhook['Metadata']: + logging.debug('No GUID provided in Plex json payload. Probably a pre-roll video.') + return "No GUID found in JSON request body", 200 event = parsed_json_webhook['event'] if event not in ['media.play']: From 71e749724f1faec40698b7f2c046eed3beb79954 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Bart=C3=ADk?= <63553146+sambartik@users.noreply.github.com> Date: Wed, 14 Dec 2022 23:05:41 +0100 Subject: [PATCH 14/25] Improve titulky session expiration handling (#2014) --- libs/subliminal_patch/providers/titulky.py | 31 ++++++++++++++-------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/libs/subliminal_patch/providers/titulky.py b/libs/subliminal_patch/providers/titulky.py index 9fdc1661c..6d2a9aef3 100644 --- a/libs/subliminal_patch/providers/titulky.py +++ b/libs/subliminal_patch/providers/titulky.py @@ -5,7 +5,7 @@ import logging import re import zipfile from random import randint -from urllib.parse import urlparse, parse_qs, quote +from urllib.parse import urljoin, urlparse, parse_qs, quote import rarfile from guessit import guessit @@ -179,7 +179,7 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin): # If the response is a redirect and doesnt point to an error message page, then we are logged in if res.status_code == 302 and location_qs['msg_type'][0] == 'i': - if 'omezené' in location_qs['msg'][0]: + if 'omezené' in location_qs['msg'][0].lower(): raise AuthenticationError("V.I.P. account is required for this provider to work!") else: logger.info("Titulky.com: Successfully logged in, caching cookies for future connections...") @@ -203,35 +203,44 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin): cache.delete('titulky_user_agent') # If the response is a redirect and doesnt point to an error message page, then we are logged out - if res.status_code == 302 and location_qs['msg_type'][0] == 'i': + if res.is_redirect and location_qs['msg_type'][0] == 'i': return True else: raise AuthenticationError("Logout failed.") # GET request a page. This functions acts as a requests.session.get proxy handling expired cached cookies # and subsequent relogging and sending the original request again. If all went well, returns the response. + # Additionally handle allow_redirects by ourselves to follow redirects UNLESS they are redirecting to an + # error page. In such case we would like to know what has happend and act accordingly. def get_request(self, url, ref=server_url, allow_redirects=False, _recursion=0): # That's deep... recursion... Stop. We don't have infinite memmory. And don't want to # spam titulky's server either. So we have to just accept the defeat. Let it throw! - if _recursion >= 5: - raise AuthenticationError("Got into a loop and couldn't get authenticated!") + if _recursion >= 10: + raise AuthenticationError("Got into a redirect loop! Oops.") logger.debug(f"Titulky.com: Fetching url: {url}") res = self.session.get( url, timeout=self.timeout, - allow_redirects=allow_redirects, + allow_redirects=False, headers={'Referer': quote(ref) if ref else None}) # URL encode ref if it has value - # Check if we got redirected because login cookies expired. - # Note: microoptimization - don't bother parsing qs for non 302 responses. - if res.status_code == 302: + if res.is_redirect: + # Dont bother doing anything if we do not want to redirect. Just return the original response.. + if allow_redirects is False: + return res + location_qs = parse_qs(urlparse(res.headers['Location']).query) - if location_qs['msg_type'][0] == 'e' and "Přihlašte se" in location_qs['msg'][0]: + # If the msg_type query parameter does NOT equal to 'e' or is absent, follow the URL in the Location header. + if allow_redirects is True and ('msg_type' not in location_qs or ('msg_type' in location_qs and location_qs['msg_type'][0] != 'e')): + return self.get_request(urljoin(res.headers['Origin'] or self.server_url, res.headers['Location']), ref=url, allow_redirects=True, _recursion=(_recursion + 1)) + + # Check if we got redirected because login cookies expired. + if "přihlašte" in location_qs['msg'][0].lower(): logger.info(f"Titulky.com: Login cookies expired.") self.login(True) - return self.get_request(url, ref=ref, _recursion=(_recursion + 1)) + return self.get_request(url, ref=ref, allow_redirects=True, _recursion=(_recursion + 1)) return res From 6693b988115450e7823d1735cb2eb431ab72db39 Mon Sep 17 00:00:00 2001 From: morpheus65535 Date: Thu, 15 Dec 2022 11:00:56 -0500 Subject: [PATCH 15/25] Fixed issues with new database creation since e6d089381285ab3e945971dad7899e17062062a6 --- bazarr/app/get_providers.py | 8 -------- bazarr/init.py | 10 +++++++--- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/bazarr/app/get_providers.py b/bazarr/app/get_providers.py index ba41a0ea2..72842dc36 100644 --- a/bazarr/app/get_providers.py +++ b/bazarr/app/get_providers.py @@ -108,14 +108,6 @@ PROVIDERS_FORCED_OFF = ["addic7ed", "tvsubtitles", "legendasdivx", "legendastv", throttle_count = {} -def clean_enabled_providers(): - existing_providers = provider_registry.names() - enabled_providers = get_array_from(settings.general.enabled_providers) - settings.general.enabled_providers = str([x for x in enabled_providers if x in existing_providers]) - with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: - settings.write(handle) - - def provider_pool(): if settings.general.getboolean('multithreading'): return subliminal_patch.core.SZAsyncProviderPool diff --git a/bazarr/init.py b/bazarr/init.py index b9110dfb9..ce434acbf 100644 --- a/bazarr/init.py +++ b/bazarr/init.py @@ -10,11 +10,11 @@ import time import rarfile from dogpile.cache.region import register_backend as register_cache_backend +from subliminal_patch.extensions import provider_registry -from app.config import settings, configure_captcha_func +from app.config import settings, configure_captcha_func, get_array_from from app.get_args import args from app.logger import configure_logging -from app.get_providers import clean_enabled_providers from utilities.binaries import get_binary, BinaryNotFound from utilities.path_mappings import path_mappings from utilities.backup import restore_from_backup @@ -195,7 +195,11 @@ with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini') # Remove deprecated providers from enabled providers in config.ini -clean_enabled_providers() +existing_providers = provider_registry.names() +enabled_providers = get_array_from(settings.general.enabled_providers) +settings.general.enabled_providers = str([x for x in enabled_providers if x in existing_providers]) +with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle: + settings.write(handle) def init_binaries(): From 7640a6d5a420db2c74648208be62513d1b788da0 Mon Sep 17 00:00:00 2001 From: Vitiko Date: Thu, 15 Dec 2022 19:03:44 -0400 Subject: [PATCH 16/25] Embedded Subtitles provider: improve streams parsing Fix made updating fese to latest version --- libs/fese/__init__.py | 2 +- libs/fese/stream.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/libs/fese/__init__.py b/libs/fese/__init__.py index e22c03152..243201ef2 100755 --- a/libs/fese/__init__.py +++ b/libs/fese/__init__.py @@ -4,4 +4,4 @@ from .container import FFprobeVideoContainer from .stream import FFprobeSubtitleStream -__version__ = "0.2.5" +__version__ = "0.2.6" diff --git a/libs/fese/stream.py b/libs/fese/stream.py index 340951380..1685239ce 100755 --- a/libs/fese/stream.py +++ b/libs/fese/stream.py @@ -41,8 +41,7 @@ class FFprobeSubtitleStream: ) self.disposition = FFprobeSubtitleDisposition(stream.get("disposition", {})) - if stream.get("tags") is not None: - self.disposition.update_from_tags(stream["tags"]) + self.disposition.update_from_tags(stream.get("tags", {}) or {}) def convert_args(self, convert_format, outfile): """ From e503afdc2c287d2965a390fbba78cf2ce8fd0351 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Dec 2022 13:43:51 +0800 Subject: [PATCH 17/25] no log: Bump husky from 8.0.1 to 8.0.2 in /frontend (#2010) Bumps [husky](https://github.com/typicode/husky) from 8.0.1 to 8.0.2. - [Release notes](https://github.com/typicode/husky/releases) - [Commits](https://github.com/typicode/husky/compare/v8.0.1...v8.0.2) --- updated-dependencies: - dependency-name: husky dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- frontend/package-lock.json | 14 +++++++------- frontend/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 10563e019..41bceb7ba 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -43,7 +43,7 @@ "eslint": "^8.26.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-react-hooks": "^4.6.0", - "husky": "^8.0.0", + "husky": "^8.0.2", "jsdom": "^20.0.1", "lodash": "^4.17.0", "moment": "^2.29", @@ -6307,9 +6307,9 @@ } }, "node_modules/husky": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.1.tgz", - "integrity": "sha512-xs7/chUH/CKdOCs7Zy0Aev9e/dKOMZf3K1Az1nar3tzlv0jfqnYtu235bstsWTmXOR0EfINrPa97yy4Lz6RiKw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.2.tgz", + "integrity": "sha512-Tkv80jtvbnkK3mYWxPZePGFpQ/tT3HNSs/sasF9P2YfkMezDl3ON37YN6jUUI4eTg5LcyVynlb6r4eyvOmspvg==", "dev": true, "bin": { "husky": "lib/bin.js" @@ -14406,9 +14406,9 @@ "dev": true }, "husky": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.1.tgz", - "integrity": "sha512-xs7/chUH/CKdOCs7Zy0Aev9e/dKOMZf3K1Az1nar3tzlv0jfqnYtu235bstsWTmXOR0EfINrPa97yy4Lz6RiKw==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/husky/-/husky-8.0.2.tgz", + "integrity": "sha512-Tkv80jtvbnkK3mYWxPZePGFpQ/tT3HNSs/sasF9P2YfkMezDl3ON37YN6jUUI4eTg5LcyVynlb6r4eyvOmspvg==", "dev": true }, "iconv-lite": { diff --git a/frontend/package.json b/frontend/package.json index 1595e1189..9a6e6dc5b 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -47,7 +47,7 @@ "eslint": "^8.26.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-react-hooks": "^4.6.0", - "husky": "^8.0.0", + "husky": "^8.0.2", "jsdom": "^20.0.1", "lodash": "^4.17.0", "moment": "^2.29", From 577a0d65428f950b0074307e21145258120f5769 Mon Sep 17 00:00:00 2001 From: idanlah Date: Sat, 17 Dec 2022 23:26:35 +0200 Subject: [PATCH 18/25] Fixed wizdom provider urls --- libs/subliminal_patch/providers/wizdom.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/subliminal_patch/providers/wizdom.py b/libs/subliminal_patch/providers/wizdom.py index ed5482ad9..12666f541 100644 --- a/libs/subliminal_patch/providers/wizdom.py +++ b/libs/subliminal_patch/providers/wizdom.py @@ -135,7 +135,7 @@ class WizdomProvider(Provider): # search logger.debug('Using IMDB ID %r', imdb_id) url = 'https://{}/api/releases/{}'.format(self.server_url, imdb_id) - page_link = 'http://{}/#/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id) + page_link = 'http://{}/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id) # get the list of subtitles logger.debug('Getting the list of subtitles') @@ -202,7 +202,7 @@ class WizdomProvider(Provider): def download_subtitle(self, subtitle): # download - url = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle.subtitle_id) + url = 'http://{}/api/files/sub/{}'.format(self.server_url, subtitle.subtitle_id) r = self.session.get(url, headers={'Referer': subtitle.page_link}, timeout=10) r.raise_for_status() From 82c9e1434108f5c030da08a09375d4ba3ed8d50b Mon Sep 17 00:00:00 2001 From: Alexandru Catalin Ene Date: Tue, 20 Dec 2022 13:26:52 +0200 Subject: [PATCH 19/25] Updated regielive provider to use a search proxy --- libs/subliminal_patch/providers/regielive.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/subliminal_patch/providers/regielive.py b/libs/subliminal_patch/providers/regielive.py index b35ae5c36..94fceef88 100644 --- a/libs/subliminal_patch/providers/regielive.py +++ b/libs/subliminal_patch/providers/regielive.py @@ -70,7 +70,9 @@ class RegieLiveProvider(Provider): def initialize(self): self.session = Session() - self.url = 'http://api.regielive.ro/kodi/cauta.php' + #self.url = 'http://api.regielive.ro/kodi/cauta.php' + # this is a proxy API/scraper for subtitrari.regielive.ro used for subtitles search only + self.url = 'http://subtitles.24-7.ro/index.php' self.api = 'API-KODI-KINGUL' self.headers = {'RL-API': self.api} From e6864a9001474ede32ccc980c0399ab2ee097956 Mon Sep 17 00:00:00 2001 From: Vitiko Date: Sat, 17 Dec 2022 01:48:05 -0400 Subject: [PATCH 20/25] Subf2m provider: handle 403 --- libs/subliminal_patch/providers/subf2m.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libs/subliminal_patch/providers/subf2m.py b/libs/subliminal_patch/providers/subf2m.py index 262ebc309..5ab637c6f 100644 --- a/libs/subliminal_patch/providers/subf2m.py +++ b/libs/subliminal_patch/providers/subf2m.py @@ -147,6 +147,11 @@ class Subf2mProvider(Provider): for n in range(retry): req = self._session.get(url, stream=True) + + if req.status_code == 403: + logger.debug("Access to this resource is forbidden: %s", url) + break + # Sometimes subf2m will return a 503 code. This error usually disappears # retrying the query if req.status_code == 503: From 1233026adc896055d73576e23e826cb6b8e3c5b2 Mon Sep 17 00:00:00 2001 From: Vitiko Date: Tue, 20 Dec 2022 17:22:49 -0400 Subject: [PATCH 21/25] Subtitrarinoi provider: add several improvements * Fix episode matches (#2018) * Use standard utils to download subtitles * Use standard utils to guess matches (partially implemented) --- .../providers/subtitrarinoi.py | 85 ++++++------------- tests/subliminal_patch/test_subtitrarinoi.py | 54 ++++++++++++ 2 files changed, 80 insertions(+), 59 deletions(-) create mode 100644 tests/subliminal_patch/test_subtitrarinoi.py diff --git a/libs/subliminal_patch/providers/subtitrarinoi.py b/libs/subliminal_patch/providers/subtitrarinoi.py index 0593ce055..d9795666a 100644 --- a/libs/subliminal_patch/providers/subtitrarinoi.py +++ b/libs/subliminal_patch/providers/subtitrarinoi.py @@ -1,28 +1,30 @@ # coding=utf-8 from __future__ import absolute_import -import os -import io + import logging import re -from zipfile import ZipFile, is_zipfile -from rarfile import RarFile, is_rarfile -from guessit import guessit +from subliminal.providers import ParserBeautifulSoup +from subliminal.video import Episode +from subliminal.video import Movie from subliminal_patch.providers import Provider from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin -from subliminal_patch.subtitle import Subtitle, guess_matches -from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming -from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST -from subliminal.exceptions import ProviderError -from subliminal.providers import ParserBeautifulSoup -from subliminal.video import Episode, Movie -from subliminal.subtitle import SUBTITLE_EXTENSIONS +from subliminal_patch.providers.utils import get_archive_from_bytes +from subliminal_patch.providers.utils import get_subtitle_from_archive +from subliminal_patch.providers.utils import update_matches +from subliminal_patch.subtitle import guess_matches +from subliminal_patch.subtitle import Subtitle +from subliminal_patch.utils import \ + fix_inconsistent_naming as _fix_inconsistent_naming +from subliminal_patch.utils import sanitize from subzero.language import Language # parsing regex definitions title_re = re.compile(r'(?P(?:.+(?= [Aa][Kk][Aa] ))|.+)(?:(?:.+)(?P<altitle>(?<= [Aa][Kk][Aa] ).+))?') +_SEASON_RE = re.compile(r"(s|(season|sezonul)\s)(?P<x>\d{1,2})", flags=re.IGNORECASE) + def fix_inconsistent_naming(title): """Fix titles with inconsistent naming using dictionary and sanitize them. @@ -48,7 +50,7 @@ class SubtitrarinoiSubtitle(Subtitle): super(SubtitrarinoiSubtitle, self).__init__(language) self.sid = sid self.title = title - self.imdb_id = imdb_id + self.imdb_id = (imdb_id or "").rstrip("/") self.download_link = download_link self.year = year self.download_count = download_count @@ -87,8 +89,7 @@ class SubtitrarinoiSubtitle(Subtitle): if video.imdb_id and self.imdb_id == video.imdb_id: matches.add('imdb_id') - # guess match others - matches |= guess_matches(video, guessit(self.comments, {"type": "movie"})) + update_matches(matches, video, self.comments) else: # title @@ -100,16 +101,19 @@ class SubtitrarinoiSubtitle(Subtitle): if video.series_imdb_id and self.imdb_id == video.series_imdb_id: matches.add('imdb_id') - # season - if f"Sezonul {video.season}" in self.comments: - matches.add('season') + season = _SEASON_RE.search(self.comments) + if season is not None: + season = int(season.group("x")) + if season == video.season: + matches.add('season') + + logger.debug("Season matched? %s [%s -> %s]", "season" in matches, video.season, self.comments) # episode if {"imdb_id", "season"}.issubset(matches): matches.add('episode') - # guess match others - matches |= guess_matches(video, guessit(self.comments, {"type": "episode"})) + update_matches(matches, video, self.comments) self.matches = matches @@ -277,42 +281,5 @@ class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin): r = self.session.get(subtitle.download_link, headers={'Referer': self.api_url}, timeout=10) r.raise_for_status() - # open the archive - archive_stream = io.BytesIO(r.content) - if is_rarfile(archive_stream): - logger.debug('Archive identified as rar') - archive = RarFile(archive_stream) - elif is_zipfile(archive_stream): - logger.debug('Archive identified as zip') - archive = ZipFile(archive_stream) - else: - subtitle.content = r.content - if subtitle.is_valid(): - return - subtitle.content = None - - raise ProviderError('Unidentified archive type') - - if subtitle.is_episode: - subtitle.content = self._get_subtitle_from_archive(subtitle, archive) - else: - subtitle.content = self.get_subtitle_from_archive(subtitle, archive) - - @staticmethod - def _get_subtitle_from_archive(subtitle, archive): - for name in archive.namelist(): - # discard hidden files - if os.path.split(name)[-1].startswith('.'): - continue - - # discard non-subtitle files - if not name.lower().endswith(SUBTITLE_EXTENSIONS): - continue - - _guess = guessit(name) - if subtitle.desired_episode == _guess['episode']: - return archive.read(name) - - return None - -# vim: set expandtab ts=4 sw=4: + archive = get_archive_from_bytes(r.content) + subtitle.content = get_subtitle_from_archive(archive, episode=subtitle.desired_episode) diff --git a/tests/subliminal_patch/test_subtitrarinoi.py b/tests/subliminal_patch/test_subtitrarinoi.py new file mode 100644 index 000000000..816e8db4f --- /dev/null +++ b/tests/subliminal_patch/test_subtitrarinoi.py @@ -0,0 +1,54 @@ +import pytest +from subliminal_patch.providers.subtitrarinoi import SubtitrarinoiProvider +from subliminal_patch.providers.subtitrarinoi import SubtitrarinoiSubtitle +from subzero.language import Language + +romanian = Language("ron") + + +def test_list_subtitles(episodes): + episode = episodes["breaking_bad_s01e01"] + with SubtitrarinoiProvider() as provider: + assert provider.list_subtitles(episode, [romanian]) + + +@pytest.fixture +def subtitrari_subtitle(): + yield SubtitrarinoiSubtitle( + romanian, + "https://www.subtitrari-noi.ro/7493-subtitrari noi.ro\ ", + 3, + "Sezonul 1 ep. 1-7 Sincronizari si pentru variantele HDTV x264 (Sincro atty)", + "Breaking Bad", + "tt0903747/", + "Alice", + "https://www.subtitrari-noi.ro/index.php?page=movie_details&act=1&id=7493", + 2008, + 4230, + True, + 1, + ) + + +@pytest.mark.parametrize("comment", ["season 01", "Sezonul 1 ep. 1-7", "S01"]) +def test_subtitle_get_matches_episode(subtitrari_subtitle, episodes, comment): + episode = episodes["breaking_bad_s01e01"] + episode.episode = 1 + subtitrari_subtitle.comments = comment + assert {"season", "episode", "series", "imdb_id"}.issubset( + subtitrari_subtitle.get_matches(episode) + ) + + +@pytest.mark.parametrize("comment", ["season 02", "Sezonul 2 ep. 1-7", "version 01"]) +def test_subtitle_get_matches_episode_false(subtitrari_subtitle, episodes, comment): + episode = episodes["breaking_bad_s01e01"] + episode.episode = 1 + subtitrari_subtitle.comments = comment + assert not {"season", "episode"}.issubset(subtitrari_subtitle.get_matches(episode)) + + +def test_provider_download_subtitle(subtitrari_subtitle): + with SubtitrarinoiProvider() as provider: + provider.download_subtitle(subtitrari_subtitle) + assert subtitrari_subtitle.is_valid() From c4b8345e659e38992456e31a4c21bbf0a7d38545 Mon Sep 17 00:00:00 2001 From: morpheus65535 <louis_vezina@hotmail.com> Date: Tue, 20 Dec 2022 23:37:52 -0500 Subject: [PATCH 22/25] Added mediainfo as potential embedded subtitles parser. #2007 --- bazarr/app/config.py | 3 +- bazarr/subtitles/refiners/ffprobe.py | 45 +++++++++++-------- .../subtitles/tools/embedded_subs_reader.py | 33 ++++++++++++-- .../src/pages/Settings/Subtitles/index.tsx | 9 ++++ .../src/pages/Settings/Subtitles/options.ts | 12 +++++ 5 files changed, 79 insertions(+), 23 deletions(-) diff --git a/bazarr/app/config.py b/bazarr/app/config.py index faee369e1..0ce861856 100644 --- a/bazarr/app/config.py +++ b/bazarr/app/config.py @@ -78,7 +78,8 @@ defaults = { 'wanted_search_frequency_movie': '3', 'subzero_mods': '[]', 'dont_notify_manual_actions': 'False', - 'hi_extension': 'hi' + 'hi_extension': 'hi', + 'embedded_subtitles_parser': 'ffprobe' }, 'auth': { 'type': 'None', diff --git a/bazarr/subtitles/refiners/ffprobe.py b/bazarr/subtitles/refiners/ffprobe.py index 2ffcac1c6..f0e1ce0bd 100644 --- a/bazarr/subtitles/refiners/ffprobe.py +++ b/bazarr/subtitles/refiners/ffprobe.py @@ -32,38 +32,45 @@ def refine_from_ffprobe(path, video): data = parse_video_metadata(file=path, file_size=file_id['file_size'], episode_file_id=file_id['episode_file_id']) - if not data['ffprobe']: + if not data['ffprobe'] or data['mediainfo']: logging.debug("No FFprobe available in cache for this file: {}".format(path)) return video - logging.debug('FFprobe found: %s', data['ffprobe']) - - if 'video' not in data['ffprobe']: - logging.debug('BAZARR FFprobe was unable to find video tracks in the file!') + if data['ffprobe']: + logging.debug('FFprobe found: %s', data['ffprobe']) + parser_data = data['ffprobe'] + elif data['mediainfo']: + logging.debug('Mediainfo found: %s', data['mediainfo']) + parser_data = data['mediainfo'] else: - if 'resolution' in data['ffprobe']['video'][0]: + parser_data = {} + + if 'video' not in parser_data: + logging.debug('BAZARR parser was unable to find video tracks in the file!') + else: + if 'resolution' in parser_data['video'][0]: if not video.resolution: - video.resolution = data['ffprobe']['video'][0]['resolution'] - if 'codec' in data['ffprobe']['video'][0]: + video.resolution = parser_data['video'][0]['resolution'] + if 'codec' in parser_data['video'][0]: if not video.video_codec: - video.video_codec = data['ffprobe']['video'][0]['codec'] - if 'frame_rate' in data['ffprobe']['video'][0]: + video.video_codec = parser_data['video'][0]['codec'] + if 'frame_rate' in parser_data['video'][0]: if not video.fps: - if isinstance(data['ffprobe']['video'][0]['frame_rate'], float): - video.fps = data['ffprobe']['video'][0]['frame_rate'] + if isinstance(parser_data['video'][0]['frame_rate'], float): + video.fps = parser_data['video'][0]['frame_rate'] else: try: - video.fps = data['ffprobe']['video'][0]['frame_rate'].magnitude + video.fps = parser_data['video'][0]['frame_rate'].magnitude except AttributeError: - video.fps = data['ffprobe']['video'][0]['frame_rate'] + video.fps = parser_data['video'][0]['frame_rate'] - if 'audio' not in data['ffprobe']: - logging.debug('BAZARR FFprobe was unable to find audio tracks in the file!') + if 'audio' not in parser_data: + logging.debug('BAZARR parser was unable to find audio tracks in the file!') else: - if 'codec' in data['ffprobe']['audio'][0]: + if 'codec' in parser_data['audio'][0]: if not video.audio_codec: - video.audio_codec = data['ffprobe']['audio'][0]['codec'] - for track in data['ffprobe']['audio']: + video.audio_codec = parser_data['audio'][0]['codec'] + for track in parser_data['audio']: if 'language' in track: video.audio_languages.add(track['language'].alpha3) diff --git a/bazarr/subtitles/tools/embedded_subs_reader.py b/bazarr/subtitles/tools/embedded_subs_reader.py index 3a6ff50c5..99c8490f6 100644 --- a/bazarr/subtitles/tools/embedded_subs_reader.py +++ b/bazarr/subtitles/tools/embedded_subs_reader.py @@ -11,6 +11,7 @@ from enzyme.exceptions import MalformedMKVError from languages.custom_lang import CustomLanguage from app.database import TableEpisodes, TableMovies from utilities.path_mappings import path_mappings +from app.config import settings def _handle_alpha3(detected_language: dict): @@ -46,6 +47,24 @@ def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=No codec = detected_language.get("format") # or None subtitles_list.append([language, forced, hearing_impaired, codec]) + elif 'mediainfo' in data and data["mediainfo"] and "subtitle" in data["mediainfo"]: + for detected_language in data["mediainfo"]["subtitle"]: + if "language" not in detected_language: + continue + + # Avoid commentary subtitles + name = detected_language.get("name", "").lower() + if "commentary" in name: + logging.debug("Ignoring commentary subtitle: %s", name) + continue + + language = _handle_alpha3(detected_language) + + forced = detected_language.get("forced", False) + hearing_impaired = detected_language.get("hearing_impaired", False) + codec = detected_language.get("format") # or None + subtitles_list.append([language, forced, hearing_impaired, codec]) + elif data["enzyme"]: for subtitle_track in data["enzyme"].subtitle_tracks: hearing_impaired = ( @@ -68,6 +87,7 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No # Define default data keys value data = { "ffprobe": {}, + "mediainfo": {}, "enzyme": {}, "file_id": episode_file_id or movie_file_id, "file_size": file_size, @@ -102,12 +122,19 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No # if not, we retrieve the metadata from the file from utilities.binaries import get_binary - ffprobe_path = get_binary("ffprobe") + ffprobe_path = mediainfo_path = None + if settings.general.embedded_subtitles_parser == 'ffprobe': + ffprobe_path = get_binary("ffprobe") + elif settings.general.embedded_subtitles_parser == 'mediainfo': + mediainfo_path = get_binary("mediainfo") # if we have ffprobe available if ffprobe_path: data["ffprobe"] = know(video_path=file, context={"provider": "ffmpeg", "ffmpeg": ffprobe_path}) - # if not, we use enzyme for mkv files + # or if we have mediainfo available + elif mediainfo_path: + data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo", "mediainfo": mediainfo_path}) + # else, we use enzyme for mkv files else: if os.path.splitext(file)[1] == ".mkv": with open(file, "rb") as f: @@ -116,7 +143,7 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No except MalformedMKVError: logging.error( "BAZARR cannot analyze this MKV with our built-in MKV parser, you should install " - "ffmpeg/ffprobe: " + file + "ffmpeg/ffprobe or mediainfo: " + file ) else: data["enzyme"] = mkv diff --git a/frontend/src/pages/Settings/Subtitles/index.tsx b/frontend/src/pages/Settings/Subtitles/index.tsx index 2c71b3cdc..a1927682f 100644 --- a/frontend/src/pages/Settings/Subtitles/index.tsx +++ b/frontend/src/pages/Settings/Subtitles/index.tsx @@ -20,6 +20,7 @@ import { adaptiveSearchingDeltaOption, antiCaptchaOption, colorOptions, + embeddedSubtitlesParserOption, folderOptions, hiExtensionOptions, } from "./options"; @@ -278,6 +279,14 @@ const SettingsSubtitlesView: FunctionComponent = () => { Hide embedded subtitles for languages that are not currently desired. </Message> + <Selector + settingKey="settings-general-embedded_subtitles_parser" + settingOptions={{ + onSaved: (v) => (v === undefined ? "ffprobe" : v), + }} + options={embeddedSubtitlesParserOption} + ></Selector> + <Message>Embedded subtitles video parser</Message> </CollapseBox> </Section> <Section header="Post-Processing"> diff --git a/frontend/src/pages/Settings/Subtitles/options.ts b/frontend/src/pages/Settings/Subtitles/options.ts index 5549a4128..62c4f60b2 100644 --- a/frontend/src/pages/Settings/Subtitles/options.ts +++ b/frontend/src/pages/Settings/Subtitles/options.ts @@ -41,6 +41,18 @@ export const antiCaptchaOption: SelectorOption<string>[] = [ }, ]; +export const embeddedSubtitlesParserOption: SelectorOption<string>[] = [ + { + label: "ffprobe (faster)", + value: "ffprobe", + }, + { + label: + "mediainfo (slower but may give better results. Must be already installed)", + value: "mediainfo", + }, +]; + export const adaptiveSearchingDelayOption: SelectorOption<string>[] = [ { label: "1 week", From 0970f19d96443401b92099ceaa1e8b654258b077 Mon Sep 17 00:00:00 2001 From: morpheus65535 <louis_vezina@hotmail.com> Date: Wed, 21 Dec 2022 19:30:59 -0500 Subject: [PATCH 23/25] Fixed mediainfo integration issues. #2007 --- bazarr/subtitles/refiners/ffprobe.py | 7 +++++-- bazarr/subtitles/tools/embedded_subs_reader.py | 16 ++++++++++++---- frontend/src/pages/Settings/Scheduler/index.tsx | 13 +++++++------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/bazarr/subtitles/refiners/ffprobe.py b/bazarr/subtitles/refiners/ffprobe.py index f0e1ce0bd..9751814e6 100644 --- a/bazarr/subtitles/refiners/ffprobe.py +++ b/bazarr/subtitles/refiners/ffprobe.py @@ -32,8 +32,8 @@ def refine_from_ffprobe(path, video): data = parse_video_metadata(file=path, file_size=file_id['file_size'], episode_file_id=file_id['episode_file_id']) - if not data['ffprobe'] or data['mediainfo']: - logging.debug("No FFprobe available in cache for this file: {}".format(path)) + if not any([data['ffprobe'], data['mediainfo'], data['enzyme']]): + logging.error("No cache available for this file: {}".format(path)) return video if data['ffprobe']: @@ -42,6 +42,9 @@ def refine_from_ffprobe(path, video): elif data['mediainfo']: logging.debug('Mediainfo found: %s', data['mediainfo']) parser_data = data['mediainfo'] + elif data['enzyme']: + logging.debug('Enzyme found: %s', data['enzyme']) + parser_data = data['enzyme'] else: parser_data = {} diff --git a/bazarr/subtitles/tools/embedded_subs_reader.py b/bazarr/subtitles/tools/embedded_subs_reader.py index 99c8490f6..94860ffb7 100644 --- a/bazarr/subtitles/tools/embedded_subs_reader.py +++ b/bazarr/subtitles/tools/embedded_subs_reader.py @@ -93,6 +93,8 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No "file_size": file_size, } + embedded_subs_parser = settings.general.embedded_subtitles_parser + if use_cache: # Get the actual cache value form database if episode_file_id: @@ -115,17 +117,23 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No except Exception: pass else: - # Check if file size and file id matches and if so, we return the cached value + # Check if file size and file id matches and if so, we return the cached value if available for the + # desired parser if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]: - return cached_value + if ((embedded_subs_parser == 'ffprobe' and 'ffprobe' in cached_value and cached_value['ffprobe']) or + (embedded_subs_parser == 'mediainfo' and 'mediainfo' in cached_value and + cached_value['mediainfo']) or + (all(['ffprobe', 'mediainfo']) not in cached_value and 'enzyme' in cached_value and + cached_value['enzyme'])): + return cached_value # if not, we retrieve the metadata from the file from utilities.binaries import get_binary ffprobe_path = mediainfo_path = None - if settings.general.embedded_subtitles_parser == 'ffprobe': + if embedded_subs_parser == 'ffprobe': ffprobe_path = get_binary("ffprobe") - elif settings.general.embedded_subtitles_parser == 'mediainfo': + elif embedded_subs_parser == 'mediainfo': mediainfo_path = get_binary("mediainfo") # if we have ffprobe available diff --git a/frontend/src/pages/Settings/Scheduler/index.tsx b/frontend/src/pages/Settings/Scheduler/index.tsx index c885f6854..e2a236197 100644 --- a/frontend/src/pages/Settings/Scheduler/index.tsx +++ b/frontend/src/pages/Settings/Scheduler/index.tsx @@ -78,12 +78,13 @@ const SettingsSchedulerView: FunctionComponent = () => { </CollapseBox> <Check - label="Use cached ffprobe results" + label="Use cached embedded subtitles parser results" settingKey="settings-sonarr-use_ffprobe_cache" ></Check> <Message> - If disabled, Bazarr will use ffprobe to index video file properties on - each run. This will result in higher disk I/O. + If disabled, Bazarr will use the embedded subtitles parser to index + episodes file properties on each run. This will result in higher disk + I/O. </Message> <Selector @@ -114,12 +115,12 @@ const SettingsSchedulerView: FunctionComponent = () => { </CollapseBox> <Check - label="Use cached ffprobe results" + label="Use cached embedded subtitles parser results" settingKey="settings-radarr-use_ffprobe_cache" ></Check> <Message> - If disabled, Bazarr will use ffprobe to index video file properties on - each run. This will result in higher disk I/O. + If disabled, Bazarr will use embedded subtitles parser to index movies + file properties on each run. This will result in higher disk I/O. </Message> </Section> <Section header="Search and Upgrade Subtitles"> From 5d36b80c0753cff951d3198a197cf709f43efca2 Mon Sep 17 00:00:00 2001 From: morpheus65535 <louis_vezina@hotmail.com> Date: Thu, 22 Dec 2022 11:37:37 -0500 Subject: [PATCH 24/25] Removed Enzyme for embedded subtitles indexing and fixed mediainfo integration issues. #2007 --- bazarr/subtitles/refiners/ffprobe.py | 7 +-- .../subtitles/tools/embedded_subs_reader.py | 47 +++++-------------- libs/version.txt | 2 +- 3 files changed, 14 insertions(+), 42 deletions(-) diff --git a/bazarr/subtitles/refiners/ffprobe.py b/bazarr/subtitles/refiners/ffprobe.py index 9751814e6..9e080ae51 100644 --- a/bazarr/subtitles/refiners/ffprobe.py +++ b/bazarr/subtitles/refiners/ffprobe.py @@ -32,8 +32,8 @@ def refine_from_ffprobe(path, video): data = parse_video_metadata(file=path, file_size=file_id['file_size'], episode_file_id=file_id['episode_file_id']) - if not any([data['ffprobe'], data['mediainfo'], data['enzyme']]): - logging.error("No cache available for this file: {}".format(path)) + if 'ffprobe' not in data and 'mediainfo' not in data: + logging.debug("No cache available for this file: {}".format(path)) return video if data['ffprobe']: @@ -42,9 +42,6 @@ def refine_from_ffprobe(path, video): elif data['mediainfo']: logging.debug('Mediainfo found: %s', data['mediainfo']) parser_data = data['mediainfo'] - elif data['enzyme']: - logging.debug('Enzyme found: %s', data['enzyme']) - parser_data = data['enzyme'] else: parser_data = {} diff --git a/bazarr/subtitles/tools/embedded_subs_reader.py b/bazarr/subtitles/tools/embedded_subs_reader.py index 94860ffb7..b4153ead1 100644 --- a/bazarr/subtitles/tools/embedded_subs_reader.py +++ b/bazarr/subtitles/tools/embedded_subs_reader.py @@ -1,12 +1,9 @@ # coding=utf-8 import logging -import os import pickle -import enzyme from knowit.api import know -from enzyme.exceptions import MalformedMKVError from languages.custom_lang import CustomLanguage from app.database import TableEpisodes, TableMovies @@ -65,21 +62,6 @@ def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=No codec = detected_language.get("format") # or None subtitles_list.append([language, forced, hearing_impaired, codec]) - elif data["enzyme"]: - for subtitle_track in data["enzyme"].subtitle_tracks: - hearing_impaired = ( - subtitle_track.name and "sdh" in subtitle_track.name.lower() - ) - - subtitles_list.append( - [ - subtitle_track.language, - subtitle_track.forced, - hearing_impaired, - subtitle_track.codec_id, - ] - ) - return subtitles_list @@ -88,7 +70,6 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No data = { "ffprobe": {}, "mediainfo": {}, - "enzyme": {}, "file_id": episode_file_id or movie_file_id, "file_size": file_size, } @@ -120,12 +101,14 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No # Check if file size and file id matches and if so, we return the cached value if available for the # desired parser if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]: - if ((embedded_subs_parser == 'ffprobe' and 'ffprobe' in cached_value and cached_value['ffprobe']) or - (embedded_subs_parser == 'mediainfo' and 'mediainfo' in cached_value and - cached_value['mediainfo']) or - (all(['ffprobe', 'mediainfo']) not in cached_value and 'enzyme' in cached_value and - cached_value['enzyme'])): + if embedded_subs_parser in cached_value and cached_value[embedded_subs_parser]: return cached_value + else: + # no valid cache + pass + else: + # cache mut be renewed + pass # if not, we retrieve the metadata from the file from utilities.binaries import get_binary @@ -142,19 +125,11 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No # or if we have mediainfo available elif mediainfo_path: data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo", "mediainfo": mediainfo_path}) - # else, we use enzyme for mkv files + # else, we warn user of missing binary else: - if os.path.splitext(file)[1] == ".mkv": - with open(file, "rb") as f: - try: - mkv = enzyme.MKV(f) - except MalformedMKVError: - logging.error( - "BAZARR cannot analyze this MKV with our built-in MKV parser, you should install " - "ffmpeg/ffprobe or mediainfo: " + file - ) - else: - data["enzyme"] = mkv + logging.error("BAZARR require ffmpeg/ffprobe or mediainfo, please install it and make sure to choose it in " + "Settings-->Subtitles.") + return # we write to db the result and return the newly cached ffprobe dict if episode_file_id: diff --git a/libs/version.txt b/libs/version.txt index 54977eaaa..8316736be 100644 --- a/libs/version.txt +++ b/libs/version.txt @@ -7,7 +7,6 @@ attrs==22.1.0 charamel==1.0.0 deep-translator==1.9.1 dogpile.cache==1.1.8 -enzyme==0.4.1 fese==0.1.2 ffsubsync==0.4.20 flask-cors==3.0.10 @@ -110,6 +109,7 @@ cloudscraper==1.2.58 #deathbycaptcha # unknown version, only found on gist decorator==5.1.1 dnspython==2.2.1 +enzyme==0.4.1 ftfy==6.1.1 html5lib==1.1 Js2Py==0.74 From e57a99d39eb6f6095568a11642f80b42c4512095 Mon Sep 17 00:00:00 2001 From: morpheus65535 <louis_vezina@hotmail.com> Date: Sat, 31 Dec 2022 11:14:09 -0500 Subject: [PATCH 25/25] Fixed yifysubtitles provider. #2029 --- libs/subliminal_patch/providers/yifysubtitles.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/subliminal_patch/providers/yifysubtitles.py b/libs/subliminal_patch/providers/yifysubtitles.py index 5e5216b72..d8b0e88d3 100644 --- a/libs/subliminal_patch/providers/yifysubtitles.py +++ b/libs/subliminal_patch/providers/yifysubtitles.py @@ -29,7 +29,7 @@ class YifySubtitle(Subtitle): super(YifySubtitle, self).__init__(language) self.page_link = page_link self.hearing_impaired = hi - self.release_info = release + self.release_info = release.replace('\n', ', ') self.uploader = uploader self.rating = rating @@ -116,8 +116,8 @@ class YifySubtitlesProvider(Provider): td = row.findAll('td') rating = int(td[0].text) sub_lang = td[1].text - release = re.sub(r'^subtitle ', '', td[2].text) - page_link = server_url + td[2].find('a').get('href') + release = re.sub(r'^\nsubtitle ', '', td[2].text) + page_link = td[2].find('a').get('href') hi = True if td[3].find('span', {'class': 'hi-subtitle'}) else False uploader = td[4].text