Merge branch 'refs/heads/development' into non-hi-only

This commit is contained in:
morpheus65535 2024-05-11 08:19:03 -04:00
commit 3aa504d83b
114 changed files with 3980 additions and 2897 deletions

View File

@ -8,6 +8,19 @@ updates:
prefix: "[bot]"
open-pull-requests-limit: 1
target-branch: "development"
groups:
fortawesome:
patterns:
- "@fortawesome*"
mantine:
patterns:
- "@mantine*"
react:
patterns:
- "react"
- "react-dom"
- "@types/react"
- "@types/react-dom"
- package-ecosystem: 'github-actions'
directory: '/'
schedule:

View File

@ -34,7 +34,7 @@ jobs:
restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS
uses: actions/setup-node@v3
uses: actions/setup-node@v4
with:
node-version: "lts/*"
@ -76,7 +76,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.8"

View File

@ -36,7 +36,7 @@ jobs:
restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS
uses: actions/setup-node@v3
uses: actions/setup-node@v4
with:
node-version: "lts/*"

View File

@ -38,7 +38,7 @@ jobs:
restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS
uses: actions/setup-node@v3
uses: actions/setup-node@v4
with:
node-version: "lts/*"

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Execute
uses: benc-uk/workflow-dispatch@v121
uses: benc-uk/workflow-dispatch@v1.2.3
with:
workflow: "release_beta_to_dev"
token: ${{ secrets.WF_GITHUB_TOKEN }}

View File

@ -22,7 +22,7 @@ jobs:
ref: development
- name: Setup NodeJS
uses: actions/setup-node@v3
uses: actions/setup-node@v4
with:
node-version: "lts/*"
@ -35,7 +35,7 @@ jobs:
working-directory: ${{ env.UI_DIRECTORY }}
- name: Set up Python 3.8
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: "3.8"

View File

@ -48,7 +48,9 @@ If you need something that is not already part of Bazarr, feel free to create a
## Supported subtitles providers:
- Addic7ed
- Animetosho (requires AniDb HTTP API client described [here](https://wiki.anidb.net/HTTP_API_Definition))
- Assrt
- AvistaZ, CinemaZ (Get session cookies using method described [here](https://github.com/morpheus65535/bazarr/pull/2375#issuecomment-2057010996))
- BetaSeries
- BSplayer
- Embedded Subtitles

View File

@ -8,12 +8,14 @@ import sys
import time
from bazarr.app.get_args import args
from bazarr.literals import *
from bazarr.literals import EXIT_PYTHON_UPGRADE_NEEDED, EXIT_NORMAL, FILE_RESTART, FILE_STOP, ENV_RESTARTFILE, ENV_STOPFILE, EXIT_INTERRUPT
def exit_program(status_code):
print(f'Bazarr exited with status code {status_code}.')
raise SystemExit(status_code)
def check_python_version():
python_version = platform.python_version_tuple()
minimum_py3_tuple = (3, 8, 0)
@ -52,12 +54,13 @@ check_python_version()
dir_name = os.path.dirname(__file__)
def start_bazarr():
script = [get_python_path(), "-u", os.path.normcase(os.path.join(dir_name, 'bazarr', 'main.py'))] + sys.argv[1:]
ep = subprocess.Popen(script, stdout=None, stderr=None, stdin=subprocess.DEVNULL)
ep = subprocess.Popen(script, stdout=None, stderr=None, stdin=subprocess.DEVNULL, env=os.environ)
print(f"Bazarr starting child process with PID {ep.pid}...")
return ep
def terminate_child():
print(f"Terminating child process with PID {child_process.pid}")
@ -66,7 +69,7 @@ def terminate_child():
def get_stop_status_code(input_file):
try:
with open(input_file,'r') as file:
with open(input_file, 'r') as file:
# read status code from file, if it exists
line = file.readline()
try:
@ -74,33 +77,33 @@ def get_stop_status_code(input_file):
except (ValueError, TypeError):
status_code = EXIT_NORMAL
file.close()
except:
except Exception:
status_code = EXIT_NORMAL
return status_code
def check_status():
global child_process
if os.path.exists(stopfile):
status_code = get_stop_status_code(stopfile)
if os.path.exists(stop_file):
status_code = get_stop_status_code(stop_file)
try:
print(f"Deleting stop file...")
os.remove(stopfile)
except Exception as e:
print("Deleting stop file...")
os.remove(stop_file)
except Exception:
print('Unable to delete stop file.')
finally:
terminate_child()
exit_program(status_code)
if os.path.exists(restartfile):
if os.path.exists(restart_file):
try:
print(f"Deleting restart file...")
os.remove(restartfile)
print("Deleting restart file...")
os.remove(restart_file)
except Exception:
print('Unable to delete restart file.')
finally:
terminate_child()
print(f"Bazarr is restarting...")
print("Bazarr is restarting...")
child_process = start_bazarr()
@ -113,25 +116,25 @@ def interrupt_handler(signum, frame):
interrupted = True
print('Handling keyboard interrupt...')
else:
print(f"Stop doing that! I heard you the first time!")
print("Stop doing that! I heard you the first time!")
if __name__ == '__main__':
interrupted = False
signal.signal(signal.SIGINT, interrupt_handler)
restartfile = os.path.join(args.config_dir, FILE_RESTART)
stopfile = os.path.join(args.config_dir, FILE_STOP)
os.environ[ENV_STOPFILE] = stopfile
os.environ[ENV_RESTARTFILE] = restartfile
restart_file = os.path.join(args.config_dir, FILE_RESTART)
stop_file = os.path.join(args.config_dir, FILE_STOP)
os.environ[ENV_STOPFILE] = stop_file
os.environ[ENV_RESTARTFILE] = restart_file
# Cleanup leftover files
try:
os.remove(restartfile)
os.remove(restart_file)
except FileNotFoundError:
pass
try:
os.remove(stopfile)
os.remove(stop_file)
except FileNotFoundError:
pass
@ -145,5 +148,5 @@ if __name__ == '__main__':
time.sleep(5)
except (KeyboardInterrupt, SystemExit, ChildProcessError):
# this code should never be reached, if signal handling is working properly
print(f'Bazarr exited main script file via keyboard interrupt.')
print('Bazarr exited main script file via keyboard interrupt.')
exit_program(EXIT_INTERRUPT)

View File

@ -7,7 +7,6 @@ from flask_restx import Resource, Namespace, fields, marshal
from app.config import settings
from app.logger import empty_log
from app.get_args import args
from utilities.central import get_log_file_path
from ..utils import authenticate

View File

@ -1,6 +1,6 @@
# coding=utf-8
from flask import Flask, redirect
from flask import Flask, redirect, Request
from flask_compress import Compress
from flask_cors import CORS
@ -13,9 +13,17 @@ from .config import settings, base_url
socketio = SocketIO()
class CustomRequest(Request):
def __init__(self, *args, **kwargs):
super(CustomRequest, self).__init__(*args, **kwargs)
# required to increase form-data size before returning a 413
self.max_form_parts = 10000
def create_app():
# Flask Setup
app = Flask(__name__)
app.request_class = CustomRequest
app.config['COMPRESS_ALGORITHM'] = 'gzip'
Compress(app)
app.wsgi_app = ReverseProxied(app.wsgi_app)

View File

@ -25,7 +25,7 @@ def check_releases():
url_releases = 'https://api.github.com/repos/morpheus65535/Bazarr/releases?per_page=100'
try:
logging.debug(f'BAZARR getting releases from Github: {url_releases}')
r = requests.get(url_releases, allow_redirects=True)
r = requests.get(url_releases, allow_redirects=True, timeout=15)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("Error trying to get releases from Github. Http error.")
@ -160,8 +160,7 @@ def apply_update():
'BAZARR was unable to delete the previous build directory during upgrade process.')
for file in archive.namelist():
if file.startswith(zip_root_directory) and file != zip_root_directory and not \
file.endswith('bazarr.py'):
if file.startswith(zip_root_directory) and file != zip_root_directory:
file_path = os.path.join(bazarr_dir, file[len(zip_root_directory):])
parent_dir = os.path.dirname(file_path)
os.makedirs(parent_dir, exist_ok=True)

View File

@ -7,6 +7,7 @@ import logging
import re
from urllib.parse import quote_plus
from utilities.binaries import BinaryNotFound, get_binary
from literals import EXIT_VALIDATION_ERROR
from utilities.central import stop_bazarr
from subliminal.cache import region
@ -54,6 +55,14 @@ class Validator(OriginalValidator):
)
def check_parser_binary(value):
try:
get_binary(value)
except BinaryNotFound:
raise ValidationError(f"Executable '{value}' not found in search path. Please install before making this selection.")
return True
validators = [
# general section
Validator('general.flask_secret_key', must_exist=True, default=hexlify(os.urandom(16)).decode(),
@ -100,6 +109,7 @@ validators = [
Validator('general.adaptive_searching_delta', must_exist=True, default='1w', is_type_of=str,
is_in=['3d', '1w', '2w', '3w', '4w']),
Validator('general.enabled_providers', must_exist=True, default=[], is_type_of=list),
Validator('general.enabled_integrations', must_exist=True, default=[], is_type_of=list),
Validator('general.multithreading', must_exist=True, default=True, is_type_of=bool),
Validator('general.chmod_enabled', must_exist=True, default=False, is_type_of=bool),
Validator('general.chmod', must_exist=True, default='0640', is_type_of=str),
@ -119,7 +129,7 @@ validators = [
Validator('general.dont_notify_manual_actions', must_exist=True, default=False, is_type_of=bool),
Validator('general.hi_extension', must_exist=True, default='hi', is_type_of=str, is_in=['hi', 'cc', 'sdh']),
Validator('general.embedded_subtitles_parser', must_exist=True, default='ffprobe', is_type_of=str,
is_in=['ffprobe', 'mediainfo']),
is_in=['ffprobe', 'mediainfo'], condition=check_parser_binary),
Validator('general.default_und_audio_lang', must_exist=True, default='', is_type_of=str),
Validator('general.default_und_embedded_subtitles_lang', must_exist=True, default='', is_type_of=str),
Validator('general.parse_embedded_audio_track', must_exist=True, default=False, is_type_of=bool),
@ -225,6 +235,11 @@ validators = [
Validator('addic7ed.user_agent', must_exist=True, default='', is_type_of=str),
Validator('addic7ed.vip', must_exist=True, default=False, is_type_of=bool),
# animetosho section
Validator('animetosho.search_threshold', must_exist=True, default=6, is_type_of=int, gte=1, lte=15),
Validator('animetosho.anidb_api_client', must_exist=True, default='', is_type_of=str, cast=str),
Validator('animetosho.anidb_api_client_ver', must_exist=True, default=1, is_type_of=int, gte=1, lte=9),
# avistaz section
Validator('avistaz.cookies', must_exist=True, default='', is_type_of=str),
Validator('avistaz.user_agent', must_exist=True, default='', is_type_of=str),
@ -278,10 +293,6 @@ validators = [
Validator('napisy24.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('napisy24.password', must_exist=True, default='', is_type_of=str, cast=str),
# subscene section
Validator('subscene.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('subscene.password', must_exist=True, default='', is_type_of=str, cast=str),
# betaseries section
Validator('betaseries.token', must_exist=True, default='', is_type_of=str, cast=str),
@ -360,6 +371,10 @@ validators = [
Validator('postgresql.database', must_exist=True, default='', is_type_of=str),
Validator('postgresql.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('postgresql.password', must_exist=True, default='', is_type_of=str, cast=str),
# anidb section
Validator('anidb.api_client', must_exist=True, default='', is_type_of=str),
Validator('anidb.api_client_ver', must_exist=True, default=1, is_type_of=int),
]
@ -433,6 +448,7 @@ array_keys = ['excluded_tags',
'subzero_mods',
'excluded_series_types',
'enabled_providers',
'enabled_integrations',
'path_mappings',
'path_mappings_movie',
'language_equals',
@ -666,15 +682,6 @@ def save_settings(settings_items):
reset_providers = True
region.delete('oscom_token')
if key == 'settings-subscene-username':
if key != settings.subscene.username:
reset_providers = True
region.delete('subscene_cookies2')
elif key == 'settings-subscene-password':
if key != settings.subscene.password:
reset_providers = True
region.delete('subscene_cookies2')
if key == 'settings-titlovi-username':
if key != settings.titlovi.username:
reset_providers = True

View File

@ -125,7 +125,7 @@ def provider_throttle_map():
PROVIDERS_FORCED_OFF = ["addic7ed", "tvsubtitles", "legendasdivx", "napiprojekt", "shooter",
"hosszupuska", "supersubtitles", "titlovi", "assrt", "subscene"]
"hosszupuska", "supersubtitles", "titlovi", "assrt"]
throttle_count = {}
@ -259,11 +259,6 @@ def get_providers_auth():
'also_foreign': False, # fixme
'verify_ssl': settings.podnapisi.verify_ssl
},
'subscene': {
'username': settings.subscene.username,
'password': settings.subscene.password,
'only_foreign': False, # fixme
},
'legendasdivx': {
'username': settings.legendasdivx.username,
'password': settings.legendasdivx.password,
@ -324,6 +319,9 @@ def get_providers_auth():
'timeout': settings.whisperai.timeout,
'ffmpeg_path': _FFMPEG_BINARY,
'loglevel': settings.whisperai.loglevel,
},
"animetosho": {
'search_threshold': settings.animetosho.search_threshold,
}
}

View File

@ -11,7 +11,6 @@ from logging.handlers import TimedRotatingFileHandler
from utilities.central import get_log_file_path
from pytz_deprecation_shim import PytzUsageWarning
from .get_args import args
from .config import settings
@ -62,18 +61,18 @@ class UnwantedWaitressMessageFilter(logging.Filter):
if settings.general.debug:
# no filtering in debug mode
return True
unwantedMessages = [
"Exception while serving /api/socket.io/",
['Session is disconnected', 'Session not found' ],
"Exception while serving /api/socket.io/",
["'Session is disconnected'", "'Session not found'" ],
"Exception while serving /api/socket.io/",
['"Session is disconnected"', '"Session not found"' ],
"Exception when servicing %r",
unwantedMessages = [
"Exception while serving /api/socket.io/",
['Session is disconnected', 'Session not found'],
"Exception while serving /api/socket.io/",
["'Session is disconnected'", "'Session not found'"],
"Exception while serving /api/socket.io/",
['"Session is disconnected"', '"Session not found"'],
"Exception when servicing %r",
[],
]

View File

@ -10,7 +10,6 @@ from apscheduler.triggers.date import DateTrigger
from apscheduler.events import EVENT_JOB_SUBMITTED, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from datetime import datetime, timedelta
from calendar import day_name
from math import floor
from random import randrange
from tzlocal import get_localzone
try:
@ -47,6 +46,10 @@ ONE_YEAR_IN_SECONDS = 60 * 60 * 24 * 365
def a_long_time_from_now(job):
# job isn't scheduled at all
if job.next_run_time is None:
return True
# currently defined as more than a year from now
delta = job.next_run_time - datetime.now(job.next_run_time.tzinfo)
return delta.total_seconds() > ONE_YEAR_IN_SECONDS
@ -321,8 +324,8 @@ class Scheduler:
self.aps_scheduler.modify_job(job.id,
next_run_time=datetime.now(tz=self.timezone) +
timedelta(seconds=randrange(
job.trigger.interval.total_seconds() * 0.75,
job.trigger.interval.total_seconds())))
int(job.trigger.interval.total_seconds() * 0.75),
int(job.trigger.interval.total_seconds()))))
def __no_task(self):
for job in self.aps_scheduler.get_jobs():

View File

@ -4,7 +4,7 @@ import signal
import warnings
import logging
import errno
from literals import EXIT_INTERRUPT, EXIT_NORMAL
from literals import EXIT_INTERRUPT, EXIT_NORMAL, EXIT_PORT_ALREADY_IN_USE_ERROR
from utilities.central import restart_bazarr, stop_bazarr
from waitress.server import create_server
@ -18,10 +18,7 @@ from .database import close_database
from .app import create_app
app = create_app()
ui_bp.register_blueprint(api_bp, url_prefix='/api')
# Mute UserWarning with flask-restx and Flask >= 2.2.0. Will be raised as an exception in 2.3.0
# https://github.com/python-restx/flask-restx/issues/485
warnings.filterwarnings('ignore', message='The setup method ')
app.register_blueprint(api_bp, url_prefix=base_url.rstrip('/') + '/api')
app.register_blueprint(ui_bp, url_prefix=base_url.rstrip('/'))
@ -56,10 +53,17 @@ class Server:
logging.exception("BAZARR cannot bind to specified IP, trying with default (0.0.0.0)")
self.address = '0.0.0.0'
self.connected = False
super(Server, self).__init__()
elif error.errno == errno.EADDRINUSE:
logging.exception("BAZARR cannot bind to specified TCP port, trying with default (6767)")
self.port = '6767'
self.connected = False
if self.port != '6767':
logging.exception("BAZARR cannot bind to specified TCP port, trying with default (6767)")
self.port = '6767'
self.connected = False
super(Server, self).__init__()
else:
logging.exception("BAZARR cannot bind to default TCP port (6767) because it's already in use, "
"exiting...")
self.shutdown(EXIT_PORT_ALREADY_IN_USE_ERROR)
else:
logging.exception("BAZARR cannot start because of unhandled exception.")
self.shutdown()
@ -83,9 +87,9 @@ class Server:
pass
def close_all(self):
print(f"Closing database...")
print("Closing database...")
close_database()
print(f"Closing webserver...")
print("Closing webserver...")
self.server.close()
def shutdown(self, status=EXIT_NORMAL):

View File

@ -12,7 +12,7 @@ from signalrcore.hub_connection_builder import HubConnectionBuilder
from collections import deque
from time import sleep
from constants import headers
from constants import HEADERS
from app.event_handler import event_stream
from sonarr.sync.episodes import sync_episodes, sync_one_episode
from sonarr.sync.series import update_series, update_one_series
@ -39,7 +39,7 @@ class SonarrSignalrClientLegacy:
self.session = Session()
self.session.timeout = 60
self.session.verify = False
self.session.headers = headers
self.session.headers = HEADERS
self.connection = None
self.connected = False
@ -162,7 +162,7 @@ class SonarrSignalrClient:
.with_url(f"{url_sonarr()}/signalr/messages?access_token={self.apikey_sonarr}",
options={
"verify_ssl": False,
"headers": headers
"headers": HEADERS
}) \
.with_automatic_reconnect({
"type": "raw",
@ -229,7 +229,7 @@ class RadarrSignalrClient:
.with_url(f"{url_radarr()}/signalr/messages?access_token={self.apikey_radarr}",
options={
"verify_ssl": False,
"headers": headers
"headers": HEADERS
}) \
.with_automatic_reconnect({
"type": "raw",

View File

@ -4,11 +4,12 @@ import os
import requests
import mimetypes
from flask import request, abort, render_template, Response, session, send_file, stream_with_context, Blueprint
from flask import (request, abort, render_template, Response, session, send_file, stream_with_context, Blueprint,
redirect)
from functools import wraps
from urllib.parse import unquote
from constants import headers
from constants import HEADERS
from literals import FILE_LOG
from sonarr.info import url_api_sonarr
from radarr.info import url_api_radarr
@ -65,6 +66,10 @@ def check_login(actual_method):
@ui_bp.route('/', defaults={'path': ''})
@ui_bp.route('/<path:path>')
def catch_all(path):
if path.startswith('login') and settings.auth.type not in ['basic', 'form']:
# login page has been accessed when no authentication is enabled
return redirect(base_url or "/", code=302)
auth = True
if settings.auth.type == 'basic':
auth = request.authorization
@ -113,7 +118,7 @@ def series_images(url):
baseUrl = settings.sonarr.base_url
url_image = f'{url_api_sonarr()}{url.lstrip(baseUrl)}?apikey={apikey}'.replace('poster-250', 'poster-500')
try:
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=headers)
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=HEADERS)
except Exception:
return '', 404
else:
@ -127,7 +132,7 @@ def movies_images(url):
baseUrl = settings.radarr.base_url
url_image = f'{url_api_radarr()}{url.lstrip(baseUrl)}?apikey={apikey}'
try:
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=headers)
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=HEADERS)
except Exception:
return '', 404
else:
@ -168,7 +173,7 @@ def proxy(protocol, url):
url = f'{protocol}://{unquote(url)}'
params = request.args
try:
result = requests.get(url, params, allow_redirects=False, verify=False, timeout=5, headers=headers)
result = requests.get(url, params, allow_redirects=False, verify=False, timeout=5, headers=HEADERS)
except Exception as e:
return dict(status=False, error=repr(e))
else:

View File

@ -1,13 +1,12 @@
# coding=utf-8
import os
import re
# set Bazarr user-agent used to make requests
headers = {"User-Agent": os.environ["SZ_USER_AGENT"]}
# hearing-impaired detection regex
hi_regex = re.compile(r'[*¶♫♪].{3,}[*¶♫♪]|[\[\(\{].{3,}[\]\)\}](?<!{\\an\d})')
HEADERS = {"User-Agent": os.environ["SZ_USER_AGENT"]}
# minimum file size for Bazarr to consider it a video
MINIMUM_VIDEO_SIZE = 20480
# maximum size for a subtitles file
MAXIMUM_SUBTITLE_SIZE = 1 * 1024 * 1024

View File

@ -19,7 +19,8 @@ from utilities.backup import restore_from_backup
from app.database import init_db
from literals import *
from literals import (EXIT_CONFIG_CREATE_ERROR, ENV_BAZARR_ROOT_DIR, DIR_BACKUP, DIR_CACHE, DIR_CONFIG, DIR_DB, DIR_LOG,
DIR_RESTORE, EXIT_REQUIREMENTS_ERROR)
from utilities.central import make_bazarr_dir, restart_bazarr, stop_bazarr
# set start time global variable as epoch

View File

@ -28,3 +28,4 @@ EXIT_VALIDATION_ERROR = -101
EXIT_CONFIG_CREATE_ERROR = -102
EXIT_PYTHON_UPGRADE_NEEDED = -103
EXIT_REQUIREMENTS_ERROR = -104
EXIT_PORT_ALREADY_IN_USE_ERROR = -105

View File

@ -1,7 +1,6 @@
# coding=utf-8
import os
import io
from threading import Thread
@ -42,6 +41,8 @@ from languages.get_languages import load_language_in_db # noqa E402
from app.signalr_client import sonarr_signalr_client, radarr_signalr_client # noqa E402
from app.server import webserver, app # noqa E402
from app.announcements import get_announcements_to_file # noqa E402
from utilities.central import stop_bazarr # noqa E402
from literals import EXIT_NORMAL # noqa E402
if args.create_db_revision:
create_db_revision(app)

View File

@ -5,7 +5,7 @@ import logging
from app.config import settings
from radarr.info import url_api_radarr
from constants import headers
from constants import HEADERS
def browse_radarr_filesystem(path='#'):
@ -16,7 +16,7 @@ def browse_radarr_filesystem(path='#'):
f"includeFiles=false&apikey={settings.radarr.apikey}")
try:
r = requests.get(url_radarr_api_filesystem, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get series from Radarr. Http error.")

View File

@ -3,12 +3,12 @@
import logging
import requests
import datetime
import json
from requests.exceptions import JSONDecodeError
from dogpile.cache import make_region
from app.config import settings, empty_values
from constants import headers
from constants import HEADERS
region = make_region().configure('dogpile.cache.memory')
@ -30,17 +30,17 @@ class GetRadarrInfo:
try:
rv = f"{url_radarr()}/api/system/status?apikey={settings.radarr.apikey}"
radarr_json = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers).json()
headers=HEADERS).json()
if 'version' in radarr_json:
radarr_version = radarr_json['version']
else:
raise json.decoder.JSONDecodeError
except json.decoder.JSONDecodeError:
raise JSONDecodeError
except JSONDecodeError:
try:
rv = f"{url_radarr()}/api/v3/system/status?apikey={settings.radarr.apikey}"
radarr_version = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers).json()['version']
except json.decoder.JSONDecodeError:
headers=HEADERS).json()['version']
except JSONDecodeError:
logging.debug('BAZARR cannot get Radarr version')
radarr_version = 'unknown'
except Exception:

View File

@ -5,7 +5,7 @@ import requests
from app.config import settings
from radarr.info import url_api_radarr
from constants import headers
from constants import HEADERS
def notify_radarr(radarr_id):
@ -15,6 +15,6 @@ def notify_radarr(radarr_id):
'name': 'RescanMovie',
'movieId': int(radarr_id)
}
requests.post(url, json=data, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers)
requests.post(url, json=data, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except Exception:
logging.exception('BAZARR cannot notify Radarr')

View File

@ -8,7 +8,7 @@ from app.config import settings
from utilities.path_mappings import path_mappings
from app.database import TableMoviesRootfolder, TableMovies, database, delete, update, insert, select
from radarr.info import url_api_radarr
from constants import headers
from constants import HEADERS
def get_radarr_rootfolder():
@ -19,7 +19,7 @@ def get_radarr_rootfolder():
url_radarr_api_rootfolder = f"{url_api_radarr()}rootfolder?apikey={apikey_radarr}"
try:
rootfolder = requests.get(url_radarr_api_rootfolder, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers)
rootfolder = requests.get(url_radarr_api_rootfolder, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get rootfolder from Radarr. Connection Error.")
return []

View File

@ -5,7 +5,7 @@ import logging
from app.config import settings
from radarr.info import get_radarr_info, url_api_radarr
from constants import headers
from constants import HEADERS
def get_profile_list():
@ -16,7 +16,7 @@ def get_profile_list():
f"apikey={apikey_radarr}")
try:
profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers)
profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get profiles from Radarr. Connection Error.")
except requests.exceptions.Timeout:
@ -45,7 +45,7 @@ def get_tags():
url_radarr_api_series = f"{url_api_radarr()}tag?apikey={apikey_radarr}"
try:
tagsDict = requests.get(url_radarr_api_series, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers)
tagsDict = requests.get(url_radarr_api_series, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get tags from Radarr. Connection Error.")
return []
@ -69,7 +69,7 @@ def get_movies_from_radarr_api(apikey_radarr, radarr_id=None):
url_radarr_api_movies = f'{url_api_radarr()}movie{f"/{radarr_id}" if radarr_id else ""}?apikey={apikey_radarr}'
try:
r = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers)
r = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
if r.status_code == 404:
return
r.raise_for_status()
@ -100,7 +100,7 @@ def get_history_from_radarr_api(apikey_radarr, movie_id):
try:
r = requests.get(url_radarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get history from Radarr. Http error.")

View File

@ -5,7 +5,7 @@ import logging
from app.config import settings
from sonarr.info import url_api_sonarr
from constants import headers
from constants import HEADERS
def browse_sonarr_filesystem(path='#'):
@ -15,7 +15,7 @@ def browse_sonarr_filesystem(path='#'):
f"includeFiles=false&apikey={settings.sonarr.apikey}")
try:
r = requests.get(url_sonarr_api_filesystem, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get series from Sonarr. Http error.")

View File

@ -3,12 +3,12 @@
import logging
import requests
import datetime
import json
from requests.exceptions import JSONDecodeError
from dogpile.cache import make_region
from app.config import settings, empty_values
from constants import headers
from constants import HEADERS
region = make_region().configure('dogpile.cache.memory')
@ -30,17 +30,17 @@ class GetSonarrInfo:
try:
sv = f"{url_sonarr()}/api/system/status?apikey={settings.sonarr.apikey}"
sonarr_json = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers).json()
headers=HEADERS).json()
if 'version' in sonarr_json:
sonarr_version = sonarr_json['version']
else:
raise json.decoder.JSONDecodeError
except json.decoder.JSONDecodeError:
raise JSONDecodeError
except JSONDecodeError:
try:
sv = f"{url_sonarr()}/api/v3/system/status?apikey={settings.sonarr.apikey}"
sonarr_version = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers).json()['version']
except json.decoder.JSONDecodeError:
headers=HEADERS).json()['version']
except JSONDecodeError:
logging.debug('BAZARR cannot get Sonarr version')
sonarr_version = 'unknown'
except Exception:

View File

@ -5,7 +5,7 @@ import requests
from app.config import settings
from sonarr.info import url_api_sonarr
from constants import headers
from constants import HEADERS
def notify_sonarr(sonarr_series_id):
@ -15,6 +15,6 @@ def notify_sonarr(sonarr_series_id):
'name': 'RescanSeries',
'seriesId': int(sonarr_series_id)
}
requests.post(url, json=data, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
requests.post(url, json=data, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except Exception:
logging.exception('BAZARR cannot notify Sonarr')

View File

@ -8,7 +8,7 @@ from app.config import settings
from app.database import TableShowsRootfolder, TableShows, database, insert, update, delete, select
from utilities.path_mappings import path_mappings
from sonarr.info import url_api_sonarr
from constants import headers
from constants import HEADERS
def get_sonarr_rootfolder():
@ -19,7 +19,7 @@ def get_sonarr_rootfolder():
url_sonarr_api_rootfolder = f"{url_api_sonarr()}rootfolder?apikey={apikey_sonarr}"
try:
rootfolder = requests.get(url_sonarr_api_rootfolder, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
rootfolder = requests.get(url_sonarr_api_rootfolder, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get rootfolder from Sonarr. Connection Error.")
return []

View File

@ -5,7 +5,7 @@ import logging
from app.config import settings
from sonarr.info import get_sonarr_info, url_api_sonarr
from constants import headers
from constants import HEADERS
def get_profile_list():
@ -23,7 +23,7 @@ def get_profile_list():
try:
profiles_json = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get profiles from Sonarr. Connection Error.")
return None
@ -53,7 +53,7 @@ def get_tags():
url_sonarr_api_series = f"{url_api_sonarr()}tag?apikey={apikey_sonarr}"
try:
tagsDict = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
tagsDict = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get tags from Sonarr. Connection Error.")
return []
@ -71,7 +71,7 @@ def get_series_from_sonarr_api(apikey_sonarr, sonarr_series_id=None):
url_sonarr_api_series = (f"{url_api_sonarr()}series/{sonarr_series_id if sonarr_series_id else ''}?"
f"apikey={apikey_sonarr}")
try:
r = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
r = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code:
@ -110,7 +110,7 @@ def get_episodes_from_sonarr_api(apikey_sonarr, series_id=None, episode_id=None)
return
try:
r = requests.get(url_sonarr_api_episode, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers)
r = requests.get(url_sonarr_api_episode, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get episodes from Sonarr. Http error.")
@ -144,7 +144,7 @@ def get_episodesFiles_from_sonarr_api(apikey_sonarr, series_id=None, episode_fil
try:
r = requests.get(url_sonarr_api_episodeFiles, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get episodeFiles from Sonarr. Http error.")
@ -173,7 +173,7 @@ def get_history_from_sonarr_api(apikey_sonarr, episode_id):
try:
r = requests.get(url_sonarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers)
headers=HEADERS)
r.raise_for_status()
except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get history from Sonarr. Http error.")

View File

@ -24,8 +24,9 @@ from .processing import process_subtitle
@update_pools
def generate_subtitles(path, languages, audio_language, sceneName, title, media_type,
forced_minimum_score=None, is_upgrade=False, profile_id=None, check_if_still_required=False):
def generate_subtitles(path, languages, audio_language, sceneName, title, media_type, forced_minimum_score=None,
is_upgrade=False, profile_id=None, check_if_still_required=False,
previous_subtitles_to_delete=None):
if not languages:
return None
@ -88,6 +89,13 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
fld = get_target_folder(path)
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
'win') and settings.general.chmod_enabled else None
if is_upgrade and previous_subtitles_to_delete:
try:
# delete previously downloaded subtitles in case of an upgrade to prevent edge loop
# issue.
os.remove(previous_subtitles_to_delete)
except (OSError, FileNotFoundError):
pass
saved_subtitles = save_subtitles(video.original_path, subtitles,
single=settings.general.single_language,
tags=None, # fixme

View File

@ -9,8 +9,8 @@ from subliminal_patch import core
from subzero.language import Language
from charset_normalizer import detect
from constants import MAXIMUM_SUBTITLE_SIZE
from app.config import settings
from constants import hi_regex
from utilities.path_mappings import path_mappings
@ -68,7 +68,7 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
forced = True if os.path.splitext(os.path.splitext(subtitle)[0])[1] == '.forced' else False
# to improve performance, skip detection of files larger that 1M
if os.path.getsize(subtitle_path) > 1 * 1024 * 1024:
if os.path.getsize(subtitle_path) > MAXIMUM_SUBTITLE_SIZE:
logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: "
f"{subtitle_path}")
continue
@ -119,7 +119,7 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
# check if file exist:
if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS:
# to improve performance, skip detection of files larger that 1M
if os.path.getsize(subtitle_path) > 1 * 1024 * 1024:
if os.path.getsize(subtitle_path) > MAXIMUM_SUBTITLE_SIZE:
logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: "
f"{subtitle_path}")
continue
@ -136,6 +136,6 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
continue
text = text.decode(encoding)
if bool(re.search(hi_regex, text)):
if bool(re.search(core.HI_REGEX, text)):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
return subtitles

View File

@ -18,7 +18,7 @@ from app.config import get_scores, settings, get_array_from
from utilities.helper import get_target_folder, force_unicode
from app.database import get_profiles_list
from .pool import update_pools, _get_pool, _init_pool
from .pool import update_pools, _get_pool
from .utils import get_video, _get_lang_obj, _get_scores, _set_forced_providers
from .processing import process_subtitle
@ -46,21 +46,7 @@ def manual_search(path, profile_id, providers, sceneName, title, media_type):
try:
if providers:
subtitles = list_all_subtitles([video], language_set, pool)
if 'subscene' in providers:
s_pool = _init_pool("movie", profile_id, {"subscene"})
subscene_language_set = set()
for language in language_set:
if language.forced:
subscene_language_set.add(language)
if len(subscene_language_set):
s_pool.provider_configs.update({"subscene": {"only_foreign": True}})
subtitles_subscene = list_all_subtitles([video], subscene_language_set, s_pool)
s_pool.provider_configs.update({"subscene": {"only_foreign": False}})
subtitles[video] += subtitles_subscene[video]
else:
subtitles = []
logging.info("BAZARR All providers are throttled")
return 'All providers are throttled'
except Exception:

View File

@ -3,9 +3,11 @@
from .ffprobe import refine_from_ffprobe
from .database import refine_from_db
from .arr_history import refine_from_arr_history
from .anidb import refine_from_anidb
registered = {
"database": refine_from_db,
"ffprobe": refine_from_ffprobe,
"arr_history": refine_from_arr_history,
"anidb": refine_from_anidb,
}

View File

@ -0,0 +1,140 @@
# coding=utf-8
# fmt: off
import logging
import requests
from collections import namedtuple
from datetime import timedelta
from requests.exceptions import HTTPError
from app.config import settings
from subliminal import Episode, region
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
refined_providers = {'animetosho'}
api_url = 'http://api.anidb.net:9001/httpapi'
class AniDBClient(object):
def __init__(self, api_client_key=None, api_client_ver=1, session=None):
self.session = session or requests.Session()
self.api_client_key = api_client_key
self.api_client_ver = api_client_ver
AnimeInfo = namedtuple('AnimeInfo', ['anime', 'episode_offset'])
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_mappings(self):
r = self.session.get(
'https://raw.githubusercontent.com/Anime-Lists/anime-lists/master/anime-list.xml',
timeout=10
)
r.raise_for_status()
return r.content
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_id(self, mappings, tvdb_series_season, tvdb_series_id, episode):
# Enrich the collection of anime with the episode offset
animes = [
self.AnimeInfo(anime, int(anime.attrib.get('episodeoffset', 0)))
for anime in mappings.findall(
f".//anime[@tvdbid='{tvdb_series_id}'][@defaulttvdbseason='{tvdb_series_season}']"
)
]
if not animes:
return None, None
# Sort the anime by offset in ascending order
animes.sort(key=lambda a: a.episode_offset)
# Different from Tvdb, Anidb have different ids for the Parts of a season
anidb_id = None
offset = 0
for index, anime_info in enumerate(animes):
anime, episode_offset = anime_info
anidb_id = int(anime.attrib.get('anidbid'))
if episode > episode_offset:
anidb_id = anidb_id
offset = episode_offset
return anidb_id, episode - offset
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_episodes_ids(self, tvdb_series_id, season, episode):
mappings = etree.fromstring(self.get_series_mappings())
series_id, episode_no = self.get_series_id(mappings, season, tvdb_series_id, episode)
if not series_id:
return None, None
episodes = etree.fromstring(self.get_episodes(series_id))
return series_id, int(episodes.find(f".//episode[epno='{episode_no}']").attrib.get('id'))
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_episodes(self, series_id):
r = self.session.get(
api_url,
params={
'request': 'anime',
'client': self.api_client_key,
'clientver': self.api_client_ver,
'protover': 1,
'aid': series_id
},
timeout=10)
r.raise_for_status()
xml_root = etree.fromstring(r.content)
response_code = xml_root.attrib.get('code')
if response_code == '500':
raise HTTPError('AniDB API Abuse detected. Banned status.')
elif response_code == '302':
raise HTTPError('AniDB API Client error. Client is disabled or does not exists.')
episode_elements = xml_root.find('episodes')
if not episode_elements:
raise ValueError
return etree.tostring(episode_elements, encoding='utf8', method='xml')
def refine_from_anidb(path, video):
if not isinstance(video, Episode) or not video.series_tvdb_id:
logging.debug(f'Video is not an Anime TV series, skipping refinement for {video}')
return
if refined_providers.intersection(settings.general.enabled_providers) and video.series_anidb_id is None:
refine_anidb_ids(video)
def refine_anidb_ids(video):
anidb_client = AniDBClient(settings.anidb.api_client, settings.anidb.api_client_ver)
season = video.season if video.season else 0
anidb_series_id, anidb_episode_id = anidb_client.get_series_episodes_ids(video.series_tvdb_id, season, video.episode)
if not anidb_episode_id:
logging.error(f'Could not find anime series {video.series}')
return video
video.series_anidb_id = anidb_series_id
video.series_anidb_episode_id = anidb_episode_id

View File

@ -33,9 +33,9 @@ def sync_subtitles(video_path, srt_path, srt_lang, forced, percent_score, sonarr
'max_offset_seconds': str(settings.subsync.max_offset_seconds),
'no_fix_framerate': settings.subsync.no_fix_framerate,
'gss': settings.subsync.gss,
'reference': None, # means choose automatically within video file
'sonarr_series_id': sonarr_series_id,
'sonarr_episode_id': sonarr_episode_id,
'reference': None, # means choose automatically within video file
'sonarr_series_id': sonarr_series_id,
'sonarr_episode_id': sonarr_episode_id,
'radarr_id': radarr_id,
}
subsync.sync(**sync_kwargs)

View File

@ -30,8 +30,8 @@ class SubSyncer:
self.vad = 'subs_then_webrtc'
self.log_dir_path = os.path.join(args.config_dir, 'log')
def sync(self, video_path, srt_path, srt_lang,
max_offset_seconds, no_fix_framerate, gss, reference=None,
def sync(self, video_path, srt_path, srt_lang,
max_offset_seconds, no_fix_framerate, gss, reference=None,
sonarr_series_id=None, sonarr_episode_id=None, radarr_id=None):
self.reference = video_path
self.srtin = srt_path

View File

@ -110,7 +110,9 @@ def upgrade_subtitles():
episode['seriesTitle'],
'series',
forced_minimum_score=int(episode['score']),
is_upgrade=True))
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace(
episode['subtitles_path'])))
if result:
if isinstance(result, list) and len(result):
@ -195,7 +197,9 @@ def upgrade_subtitles():
movie['title'],
'movie',
forced_minimum_score=int(movie['score']),
is_upgrade=True))
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie(
movie['subtitles_path'])))
if result:
if isinstance(result, list) and len(result):
result = result[0]

View File

@ -97,7 +97,6 @@ def _set_forced_providers(pool, also_forced=False, forced_required=False):
pool.provider_configs.update(
{
"podnapisi": {'also_foreign': also_forced, "only_foreign": forced_required},
"subscene": {"only_foreign": forced_required},
"opensubtitles": {'also_foreign': also_forced, "only_foreign": forced_required}
}
)

View File

@ -33,7 +33,7 @@ def get_restore_path():
def get_backup_files(fullpath=True):
backup_file_pattern = os.path.join(get_backup_path(), 'bazarr_backup_v*.zip')
file_list = glob(backup_file_pattern)
file_list.sort(key=os.path.getmtime)
file_list.sort(key=os.path.getmtime, reverse=True)
if fullpath:
return file_list
else:

View File

@ -6,30 +6,37 @@
import logging
import os
from pathlib import Path
from literals import *
from literals import ENV_BAZARR_ROOT_DIR, DIR_LOG, ENV_STOPFILE, ENV_RESTARTFILE, EXIT_NORMAL, FILE_LOG
def get_bazarr_dir(sub_dir):
path = os.path.join(os.environ[ENV_BAZARR_ROOT_DIR], sub_dir)
return path
def make_bazarr_dir(sub_dir):
path = get_bazarr_dir(sub_dir)
if not os.path.exists(path):
os.mkdir(path)
def get_log_file_path():
path = os.path.join(get_bazarr_dir(DIR_LOG), FILE_LOG)
return path
def get_stop_file_path():
return os.environ[ENV_STOPFILE]
def get_restart_file_path():
return os.environ[ENV_RESTARTFILE]
def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True):
try:
with open(get_stop_file_path(),'w', encoding='UTF-8') as file:
with open(get_stop_file_path(), 'w', encoding='UTF-8') as file:
# write out status code for final exit
file.write(f'{status_code}\n')
file.close()
@ -39,6 +46,7 @@ def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True):
if exit_main:
raise SystemExit(status_code)
def restart_bazarr():
try:
Path(get_restart_file_path()).touch()
@ -46,4 +54,3 @@ def restart_bazarr():
logging.error(f'BAZARR Cannot create restart file: {repr(e)}')
logging.info('Bazarr is being restarted...')
raise SystemExit(EXIT_NORMAL)

View File

@ -15,5 +15,4 @@ deathbycaptcha # unknown version, only found on gist
git+https://github.com/pannal/libfilebot#egg=libfilebot
git+https://github.com/RobinDavid/pyADS.git@28a2f6dbfb357f85b2c2f49add770b336e88840d#egg=pyads
py7zr==0.7.0 # modified to prevent importing of modules that can't be vendored
subscene-api==1.0.0 # modified specificaly for Bazarr
subliminal==2.1.0 # modified specifically for Bazarr

View File

@ -50,7 +50,7 @@ def default_xattr(fn):
XATTR_MAP = {
"default": (
default_xattr,
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
lambda result: re.search(r'(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2)
),
# "darwin": (
@ -60,7 +60,7 @@ XATTR_MAP = {
# ),
"darwin": (
lambda fn: ["filebot", "-script", "fn:xattr", fn],
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
lambda result: re.search(r'(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2)
),
"win32": (

View File

@ -591,7 +591,7 @@ def scan_videos(path, age=None, archives=True):
def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
"""Refine a video using :ref:`refiners`.
r"""Refine a video using :ref:`refiners`.
.. note::
@ -619,7 +619,7 @@ def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
"""List subtitles.
r"""List subtitles.
The `videos` must pass the `languages` check of :func:`check_video`.
@ -660,7 +660,7 @@ def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
r"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
:param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
@ -677,7 +677,7 @@ def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, **kwargs):
"""List and download the best matching subtitles.
r"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.

View File

@ -6,7 +6,7 @@ from stevedore import ExtensionManager
class RegistrableExtensionManager(ExtensionManager):
""":class:~stevedore.extensions.ExtensionManager` with support for registration.
r""":class:~stevedore.extensions.ExtensionManager` with support for registration.
It allows loading of internal extensions without setup and registering/unregistering additional extensions.

View File

@ -1,4 +1,4 @@
"""
r"""
Refiners enrich a :class:`~subliminal.video.Video` object by adding information to it.
A refiner is a simple function:

View File

@ -115,7 +115,7 @@ class Video(object):
class Episode(Video):
"""Episode :class:`Video`.
r"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
@ -129,7 +129,8 @@ class Episode(Video):
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, alternative_series=None, **kwargs):
series_tvdb_id=None, series_imdb_id=None, alternative_series=None, series_anidb_id=None,
series_anidb_episode_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
@ -162,6 +163,9 @@ class Episode(Video):
#: Alternative names of the series
self.alternative_series = alternative_series or []
self.series_anidb_episode_id = series_anidb_episode_id
self.series_anidb_id = series_anidb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
@ -198,7 +202,7 @@ class Episode(Video):
class Movie(Video):
"""Movie :class:`Video`.
r"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.

View File

@ -1,92 +0,0 @@
# coding=utf-8
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from subliminal.exceptions import ConfigurationError
from subzero.language import Language
# alpha3 codes extracted from `https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes`
# Subscene language list extracted from it's upload form
from_subscene = {
'Farsi/Persian': 'fas', 'Greek': 'ell', 'Greenlandic': 'kal',
'Malay': 'msa', 'Pashto': 'pus', 'Punjabi': 'pan', 'Swahili': 'swa'
}
from_subscene_with_country = {
'Brazillian Portuguese': ('por', 'BR')
}
to_subscene_with_country = {val: key for key, val in from_subscene_with_country.items()}
to_subscene = {v: k for k, v in from_subscene.items()}
exact_languages_alpha3 = [
'ara', 'aze', 'bel', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu',
'eng', 'epo', 'est', 'eus', 'fin', 'fra', 'heb', 'hin', 'hrv', 'hun',
'hye', 'ind', 'isl', 'ita', 'jpn', 'kat', 'kor', 'kur', 'lav', 'lit',
'mal', 'mkd', 'mni', 'mon', 'mya', 'nld', 'nor', 'pol', 'por', 'ron',
'rus', 'sin', 'slk', 'slv', 'som', 'spa', 'sqi', 'srp', 'sun', 'swe',
'tam', 'tel', 'tgl', 'tha', 'tur', 'ukr', 'urd', 'vie', 'yor'
]
language_ids = {
'ara': 2, 'dan': 10, 'nld': 11, 'eng': 13, 'fas': 46, 'fin': 17,
'fra': 18, 'heb': 22, 'ind': 44, 'ita': 26, 'msa': 50, 'nor': 30,
'ron': 33, 'spa': 38, 'swe': 39, 'vie': 45, 'sqi': 1, 'hye': 73,
'aze': 55, 'eus': 74, 'bel': 68, 'ben': 54, 'bos': 60, 'bul': 5,
'mya': 61, 'cat': 49, 'hrv': 8, 'ces': 9, 'epo': 47, 'est': 16,
'kat': 62, 'deu': 19, 'ell': 21, 'kal': 57, 'hin': 51, 'hun': 23,
'isl': 25, 'jpn': 27, 'kor': 28, 'kur': 52, 'lav': 29, 'lit': 43,
'mkd': 48, 'mal': 64, 'mni': 65, 'mon': 72, 'pus': 67, 'pol': 31,
'por': 32, 'pan': 66, 'rus': 34, 'srp': 35, 'sin': 58, 'slk': 36,
'slv': 37, 'som': 70, 'tgl': 53, 'tam': 59, 'tel': 63, 'tha': 40,
'tur': 41, 'ukr': 56, 'urd': 42, 'yor': 71, 'pt-BR': 4
}
# TODO: specify codes for unspecified_languages
unspecified_languages = [
'Big 5 code', 'Bulgarian/ English',
'Chinese BG code', 'Dutch/ English', 'English/ German',
'Hungarian/ English', 'Rohingya'
]
supported_languages = {Language(l) for l in exact_languages_alpha3}
alpha3_of_code = {l.name: l.alpha3 for l in supported_languages}
supported_languages.update({Language(l) for l in to_subscene})
supported_languages.update({Language(lang, cr) for lang, cr in to_subscene_with_country})
class SubsceneConverter(LanguageReverseConverter):
codes = {l.name for l in supported_languages}
def convert(self, alpha3, country=None, script=None):
if alpha3 in exact_languages_alpha3:
return Language(alpha3).name
if alpha3 in to_subscene:
return to_subscene[alpha3]
if (alpha3, country) in to_subscene_with_country:
return to_subscene_with_country[(alpha3, country)]
raise ConfigurationError('Unsupported language for subscene: %s, %s, %s' % (alpha3, country, script))
def reverse(self, code):
if code in from_subscene_with_country:
return from_subscene_with_country[code]
if code in from_subscene:
return (from_subscene[code],)
if code in alpha3_of_code:
return (alpha3_of_code[code],)
if code in unspecified_languages:
raise NotImplementedError("currently this language is unspecified: %s" % code)
raise ConfigurationError('Unsupported language code for subscene: %s' % code)

View File

@ -49,6 +49,8 @@ SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl', '
_POOL_LIFETIME = datetime.timedelta(hours=12)
HI_REGEX = re.compile(r'[*¶♫♪].{3,}[*¶♫♪]|[\[\(\{].{3,}[\]\)\}](?<!{\\an\d})')
def remove_crap_from_fn(fn):
# in case of the second regex part, the legit release group name will be in group(2), if it's followed by [string]
@ -539,6 +541,7 @@ class SZProviderPool(ProviderPool):
use_hearing_impaired = hearing_impaired in ("prefer", "force HI")
is_episode = isinstance(video, Episode)
max_score = sum(val for key, val in compute_score._scores['episode' if is_episode else 'movie'].items() if key != "hash")
# sort subtitles by score
unsorted_subtitles = []
@ -570,7 +573,9 @@ class SZProviderPool(ProviderPool):
for subtitle, score, score_without_hash, matches, orig_matches in scored_subtitles:
# check score
if score < min_score:
logger.info('%r: Score %d is below min_score (%d)', subtitle, score, min_score)
min_score_in_percent = round(min_score * 100 / max_score, 2) if min_score > 0 else 0
logger.info('%r: Score %d is below min_score: %d out of %d (or %r%%)',
subtitle, score, min_score, max_score, min_score_in_percent)
break
# stop when all languages are downloaded
@ -1054,7 +1059,7 @@ def list_supported_video_types(pool_class, **kwargs):
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
r"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
:param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
@ -1071,7 +1076,7 @@ def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, throttle_time=0, **kwargs):
"""List and download the best matching subtitles.
r"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.
@ -1198,6 +1203,8 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
continue
# create subtitle path
if bool(re.search(HI_REGEX, subtitle.text)):
subtitle.language.hi = True
subtitle_path = get_subtitle_path(file_path, None if single else subtitle.language,
forced_tag=subtitle.language.forced,
hi_tag=False if must_remove_hi else subtitle.language.hi, tags=tags)
@ -1242,7 +1249,7 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
"""Refine a video using :ref:`refiners`.
r"""Refine a video using :ref:`refiners`.
patch: add traceback logging

View File

@ -64,4 +64,3 @@ subliminal.refiner_manager.register('drone = subliminal_patch.refiners.drone:ref
subliminal.refiner_manager.register('filebot = subliminal_patch.refiners.filebot:refine')
subliminal.refiner_manager.register('file_info_file = subliminal_patch.refiners.file_info_file:refine')
subliminal.refiner_manager.register('symlinks = subliminal_patch.refiners.symlinks:refine')

View File

@ -0,0 +1,186 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import lzma
from guessit import guessit
from requests import Session
from subzero.language import Language
from subliminal.exceptions import ConfigurationError, ProviderError
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.video import Episode
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
logger = logging.getLogger(__name__)
supported_languages = [
"ara", # Arabic
"eng", # English
"fin", # Finnish
"fra", # French
"heb", # Hebrew
"ita", # Italian
"jpn", # Japanese
"por", # Portuguese
"pol", # Polish
"spa", # Spanish
"swe", # Swedish
"tha", # Thai
"tur", # Turkish
]
class AnimeToshoSubtitle(Subtitle):
"""AnimeTosho.org Subtitle."""
provider_name = 'animetosho'
def __init__(self, language, download_link, meta, release_info):
super(AnimeToshoSubtitle, self).__init__(language, page_link=download_link)
self.meta = meta
self.download_link = download_link
self.release_info = release_info
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
matches |= guess_matches(video, guessit(self.meta['filename']))
# Add these data are explicit extracted from the API and they always have to match otherwise they wouldn't
# arrive at this point and would stop on list_subtitles.
matches.update(['title', 'series', 'tvdb_id', 'season', 'episode'])
return matches
class AnimeToshoProvider(Provider, ProviderSubtitleArchiveMixin):
"""AnimeTosho.org Provider."""
subtitle_class = AnimeToshoSubtitle
languages = {Language('por', 'BR')} | {Language(sl) for sl in supported_languages}
video_types = Episode
def __init__(self, search_threshold=None):
self.session = None
if not all([search_threshold]):
raise ConfigurationError("Search threshold, Api Client and Version must be specified!")
self.search_threshold = search_threshold
def initialize(self):
self.session = Session()
def terminate(self):
self.session.close()
def list_subtitles(self, video, languages):
if not video.series_anidb_episode_id:
logger.debug('Skipping video %r. It is not an anime or the anidb_episode_id could not be identified', video)
return []
return [s for s in self._get_series(video.series_anidb_episode_id) if s.language in languages]
def download_subtitle(self, subtitle):
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(subtitle.page_link, timeout=10)
r.raise_for_status()
# Check if the bytes content starts with the xz magic number of the xz archives
if not self._is_xz_file(r.content):
raise ProviderError('Unidentified archive type')
subtitle.content = lzma.decompress(r.content)
return subtitle
@staticmethod
def _is_xz_file(content):
return content.startswith(b'\xFD\x37\x7A\x58\x5A\x00')
def _get_series(self, episode_id):
storage_download_url = 'https://animetosho.org/storage/attach/'
feed_api_url = 'https://feed.animetosho.org/json'
subtitles = []
entries = self._get_series_entries(episode_id)
for entry in entries:
r = self.session.get(
feed_api_url,
params={
'show': 'torrent',
'id': entry['id'],
},
timeout=10
)
r.raise_for_status()
for file in r.json()['files']:
if 'attachments' not in file:
continue
subtitle_files = list(filter(lambda f: f['type'] == 'subtitle', file['attachments']))
for subtitle_file in subtitle_files:
hex_id = format(subtitle_file['id'], '08x')
lang = Language.fromalpha3b(subtitle_file['info']['lang'])
# For Portuguese and Portuguese Brazilian they both share the same code, the name is the only
# identifier AnimeTosho provides. Also, some subtitles does not have name, in this case it could
# be a false negative but there is nothing we can use to guarantee it is PT-BR, we rather skip it.
if lang.alpha3 == 'por' and subtitle_file['info'].get('name', '').lower().find('brazil'):
lang = Language('por', 'BR')
subtitle = self.subtitle_class(
lang,
storage_download_url + '{}/{}.xz'.format(hex_id, subtitle_file['id']),
meta=file,
release_info=entry.get('title'),
)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def _get_series_entries(self, episode_id):
api_url = 'https://feed.animetosho.org/json'
r = self.session.get(
api_url,
params={
'eid': episode_id,
},
timeout=10
)
r.raise_for_status()
j = r.json()
# Ignore records that are not yet ready or has been abandoned by AnimeTosho.
entries = list(filter(lambda t: t['status'] == 'complete', j))[:self.search_threshold]
# Return the latest entries that have been added as it is used to cutoff via the user configuration threshold
entries.sort(key=lambda t: t['timestamp'], reverse=True)
return entries

View File

@ -83,6 +83,14 @@ class BetaSeriesProvider(Provider):
logger.debug('Searching subtitles %r', params)
res = self.session.get(
server_url + 'episodes/display', params=params, timeout=10)
try:
if res.status_code == 400 and res.json()['errors'][0]['code'] == 4001:
# this is to catch no series found
return []
elif res.status_code == 400 and res.json()['errors'][0]['code'] == 1001:
raise AuthenticationError("Invalid token provided")
except Exception:
pass
res.raise_for_status()
result = res.json()
matches.add('tvdb_id')
@ -96,8 +104,14 @@ class BetaSeriesProvider(Provider):
logger.debug('Searching subtitles %r', params)
res = self.session.get(
server_url + 'shows/episodes', params=params, timeout=10)
if res.status_code == 400:
raise AuthenticationError("Invalid token provided")
try:
if res.status_code == 400 and res.json()['errors'][0]['code'] == 4001:
# this is to catch no series found
return []
elif res.status_code == 400 and res.json()['errors'][0]['code'] == 1001:
raise AuthenticationError("Invalid token provided")
except Exception:
pass
res.raise_for_status()
result = res.json()
matches.add('series_tvdb_id')

View File

@ -208,8 +208,11 @@ class EmbeddedSubtitlesProvider(Provider):
except Exception as error:
logger.debug("'%s' raised running modifier", error)
with open(path, "rb") as sub:
subtitle.content = sub.read()
if os.path.exists(path):
with open(path, "rb") as sub:
subtitle.content = sub.read()
else:
logger.error("%s not found in filesystem", path)
def _get_subtitle_path(self, subtitle: EmbeddedSubtitle):
container = subtitle.container
@ -379,7 +382,7 @@ def _clean_ass_subtitles(path, output_path):
logger.debug("Cleaned lines: %d", abs(len(lines) - len(clean_lines)))
with open(output_path, "w") as f:
with open(output_path, "w", encoding="utf-8", errors="ignore") as f:
f.writelines(clean_lines)
logger.debug("Lines written to output path: %s", output_path)

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
import functools
from json import JSONDecodeError
from requests.exceptions import JSONDecodeError
import logging
import re
import time

View File

@ -3,6 +3,7 @@ import io
import logging
import os
import json
from requests.exceptions import JSONDecodeError
from subzero.language import Language
from guessit import guessit
@ -144,7 +145,7 @@ class KtuvitProvider(Provider):
self.session.headers["Pragma"] = "no-cache"
self.session.headers["Cache-Control"] = "no-cache"
self.session.headers["Content-Type"] = "application/json"
self.session.headers["User-Agent"]: os.environ.get(
self.session.headers["User-Agent"] = os.environ.get(
"SZ_USER_AGENT", "Sub-Zero/2"
)
@ -161,13 +162,13 @@ class KtuvitProvider(Provider):
is_success = self.parse_d_response(
r, "IsSuccess", False, "Authentication to the provider"
)
except json.decoder.JSONDecodeError:
except JSONDecodeError:
logger.info("Failed to Login to Ktuvit")
if not is_success:
error_message = ''
try:
error_message = self.parse_d_response(r, "ErrorMessage", "[None]")
except json.decode.JSONDecoderError:
except JSONDecodeError:
raise AuthenticationError(
"Error Logging in to Ktuvit Provider: " + str(r.content)
)
@ -473,8 +474,8 @@ class KtuvitProvider(Provider):
try:
response_content = response.json()
except json.decoder.JSONDecodeError as ex:
raise json.decoder.JSONDecodeError(
except JSONDecodeError as ex:
raise JSONDecodeError(
"Unable to parse JSON returned while getting " + message, ex.doc, ex.pos
)
else:
@ -486,11 +487,11 @@ class KtuvitProvider(Provider):
value = response_content.get(field, default_value)
if not value and value != default_value:
raise json.decoder.JSONDecodeError(
raise JSONDecodeError(
"Missing " + message, str(response_content), 0
)
else:
raise json.decoder.JSONDecodeError(
raise JSONDecodeError(
"Incomplete JSON returned while getting " + message,
str(response_content),
0

View File

@ -324,7 +324,7 @@ class LegendasdivxProvider(Provider):
# for series, if no results found, try again just with series and season (subtitle packs)
if isinstance(video, Episode):
logger.debug("Legendasdivx.pt :: trying again with just series and season on query.")
querytext = re.sub("(e|E)(\d{2})", "", querytext)
querytext = re.sub(r"(e|E)(\d{2})", "", querytext)
# sleep for a 1 second before another request
sleep(1)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False)

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from json import JSONDecodeError
from requests.exceptions import JSONDecodeError
import logging
import random
import re

View File

@ -1,366 +0,0 @@
# coding=utf-8
import io
import logging
import os
import time
import traceback
from urllib import parse
import requests
import inflect
import re
import json
import html
import zipfile
import rarfile
from babelfish import language_converters
from guessit import guessit
from dogpile.cache.api import NO_VALUE
from requests.exceptions import RequestException
from subliminal import Episode, ProviderError
from subliminal.video import Episode, Movie
from subliminal.exceptions import ConfigurationError, ServiceUnavailable
from subliminal.utils import sanitize_release_group
from subliminal.cache import region
from subliminal_patch.http import RetryingCFSession
from subliminal_patch.providers import Provider, reinitialize_on_error
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.converters.subscene import language_ids, supported_languages
from subscene_api.subscene import search, SearchTypes, Subtitle as APISubtitle, SITE_DOMAIN
from subzero.language import Language
p = inflect.engine()
language_converters.register('subscene = subliminal_patch.converters.subscene:SubsceneConverter')
logger = logging.getLogger(__name__)
class SubsceneSubtitle(Subtitle):
provider_name = 'subscene'
hearing_impaired_verifiable = True
is_pack = False
page_link = None
season = None
episode = None
releases = None
def __init__(self, language, release_info, hearing_impaired=False, page_link=None, encoding=None, mods=None,
asked_for_release_group=None, asked_for_episode=None):
super(SubsceneSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link,
encoding=encoding, mods=mods)
self.release_info = self.releases = release_info
self.asked_for_episode = asked_for_episode
self.asked_for_release_group = asked_for_release_group
self.season = None
self.episode = None
@classmethod
def from_api(cls, s):
return cls(Language.fromsubscene(s.language.strip()), s.title, hearing_impaired=s.hearing_impaired,
page_link=s.url)
@property
def id(self):
return self.page_link
@property
def numeric_id(self):
return self.page_link.split("/")[-1]
def get_matches(self, video):
matches = set()
if self.release_info.strip() == get_video_filename(video):
logger.debug("Using hash match as the release name is the same")
matches |= {"hash"}
# episode
if isinstance(video, Episode):
guess = guessit(self.release_info, {'type': 'episode'})
self.season = guess.get("season")
self.episode = guess.get("episode")
matches |= guess_matches(video, guess)
if "season" in matches and "episode" not in guess:
# pack
matches.add("episode")
logger.debug("%r is a pack", self)
self.is_pack = True
if "title" in guess and "year" in matches:
if video.series in guess['title']:
matches.add("series")
# movie
else:
guess = guessit(self.release_info, {'type': 'movie'})
matches |= guess_matches(video, guess)
if video.release_group and "release_group" not in matches and "release_group" in guess:
if sanitize_release_group(video.release_group) in sanitize_release_group(guess["release_group"]):
matches.add("release_group")
self.matches = matches
return matches
def get_download_link(self, session):
return APISubtitle.get_zipped_url(self.page_link, session)
def get_video_filename(video):
return os.path.splitext(os.path.basename(video.original_name))[0]
class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin):
"""
This currently only searches for the filename on SubScene. It doesn't open every found subtitle page to avoid
massive hammering, thus it can't determine whether a subtitle is only-foreign or not.
"""
subtitle_class = SubsceneSubtitle
languages = supported_languages
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
video_types = (Episode, Movie)
session = None
skip_wrong_fps = False
hearing_impaired_verifiable = True
only_foreign = False
username = None
password = None
search_throttle = 8 # seconds
def __init__(self, only_foreign=False, username=None, password=None):
if not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.only_foreign = only_foreign
self.username = username
self.password = password
def initialize(self):
logger.info("Creating session")
self.session = RetryingCFSession()
prev_cookies = region.get("subscene_cookies2")
if prev_cookies != NO_VALUE:
logger.debug("Re-using old subscene cookies: %r", prev_cookies)
self.session.cookies.update(prev_cookies)
else:
logger.debug("Logging in")
self.login()
def login(self):
r = self.session.get("https://subscene.com/account/login")
if "Server Error" in r.text:
logger.error("Login unavailable; Maintenance?")
raise ServiceUnavailable("Login unavailable; Maintenance?")
match = re.search(r"<script id='modelJson' type='application/json'>\s*(.+)\s*</script>", r.text)
if match:
h = html
data = json.loads(h.unescape(match.group(1)))
login_url = parse.urljoin(data["siteUrl"], data["loginUrl"])
time.sleep(1.0)
r = self.session.post(login_url,
{
"username": self.username,
"password": self.password,
data["antiForgery"]["name"]: data["antiForgery"]["value"]
})
pep_content = re.search(r"<form method=\"post\" action=\"https://subscene\.com/\">"
r".+name=\"id_token\".+?value=\"(?P<id_token>.+?)\".*?"
r"access_token\".+?value=\"(?P<access_token>.+?)\".+?"
r"token_type.+?value=\"(?P<token_type>.+?)\".+?"
r"expires_in.+?value=\"(?P<expires_in>.+?)\".+?"
r"scope.+?value=\"(?P<scope>.+?)\".+?"
r"state.+?value=\"(?P<state>.+?)\".+?"
r"session_state.+?value=\"(?P<session_state>.+?)\"",
r.text, re.MULTILINE | re.DOTALL)
if pep_content:
r = self.session.post(SITE_DOMAIN, pep_content.groupdict())
try:
r.raise_for_status()
except Exception:
raise ProviderError("Something went wrong when trying to log in: %s", traceback.format_exc())
else:
cj = self.session.cookies.copy()
store_cks = ("scene", "idsrv", "idsrv.xsrf", "idsvr.clients", "idsvr.session", "idsvr.username")
for cn in self.session.cookies.keys():
if cn not in store_cks:
del cj[cn]
logger.debug("Storing cookies: %r", cj)
region.set("subscene_cookies2", cj)
return
raise ProviderError("Something went wrong when trying to log in #1")
def terminate(self):
logger.info("Closing session")
self.session.close()
def _create_filters(self, languages):
self.filters = dict(HearingImpaired="2")
acc_filters = self.filters.copy()
if self.only_foreign:
self.filters["ForeignOnly"] = "True"
acc_filters["ForeignOnly"] = self.filters["ForeignOnly"].lower()
logger.info("Only searching for foreign/forced subtitles")
selected_ids = []
for l in languages:
lid = language_ids.get(l.basename, language_ids.get(l.alpha3, None))
if lid:
selected_ids.append(str(lid))
acc_filters["SelectedIds"] = selected_ids
self.filters["LanguageFilter"] = ",".join(acc_filters["SelectedIds"])
last_filters = region.get("subscene_filters")
if last_filters != acc_filters:
region.set("subscene_filters", acc_filters)
logger.debug("Setting account filters to %r", acc_filters)
self.session.post("https://u.subscene.com/filter", acc_filters, allow_redirects=False)
logger.debug("Filter created: '%s'" % self.filters)
def _enable_filters(self):
self.session.cookies.update(self.filters)
logger.debug("Filters applied")
def list_subtitles(self, video, languages):
if not video.original_name:
logger.info("Skipping search because we don't know the original release name")
return []
self._create_filters(languages)
self._enable_filters()
if isinstance(video, Episode):
international_titles = list(set([video.series] + video.alternative_series[:1]))
subtitles = [s for s in self.query(video, international_titles) if s.language in languages]
if not len(subtitles):
us_titles = [x + ' (US)' for x in international_titles]
subtitles = [s for s in self.query(video, us_titles) if s.language in languages]
return subtitles
else:
titles = list(set([video.title] + video.alternative_titles[:1]))
return [s for s in self.query(video, titles) if s.language in languages]
def download_subtitle(self, subtitle):
if subtitle.pack_data:
logger.info("Using previously downloaded pack data")
if rarfile.is_rarfile(io.BytesIO(subtitle.pack_data)):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(io.BytesIO(subtitle.pack_data))
elif zipfile.is_zipfile(io.BytesIO(subtitle.pack_data)):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(io.BytesIO(subtitle.pack_data))
else:
logger.error('Unsupported compressed format')
return
subtitle.pack_data = None
try:
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
return
except ProviderError:
pass
# open the archive
r = self.session.get(subtitle.get_download_link(self.session), timeout=10)
r.raise_for_status()
archive_stream = io.BytesIO(r.content)
if rarfile.is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(archive_stream)
else:
logger.error('Unsupported compressed format')
return
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
# store archive as pack_data for later caching
subtitle.pack_data = r.content
def parse_results(self, video, film):
subtitles = []
for s in film.subtitles:
try:
subtitle = SubsceneSubtitle.from_api(s)
except NotImplementedError as e:
logger.info(e)
continue
subtitle.asked_for_release_group = video.release_group
if isinstance(video, Episode):
subtitle.asked_for_episode = video.episode
if self.only_foreign:
subtitle.language = Language.rebuild(subtitle.language, forced=True)
# set subtitle language to hi if it's hearing_impaired
if subtitle.hearing_impaired:
subtitle.language = Language.rebuild(subtitle.language, hi=True)
subtitles.append(subtitle)
logger.debug('Found subtitle %r', subtitle)
return subtitles
def do_search(self, *args, **kwargs):
try:
return search(*args, **kwargs)
except requests.HTTPError:
region.delete("subscene_cookies2")
raise
@reinitialize_on_error((RequestException,), attempts=1)
def query(self, video, titles):
subtitles = []
if isinstance(video, Episode):
more_than_one = len(titles) > 1
for series in titles:
term = u"%s - %s Season" % (series, p.number_to_words("%sth" % video.season).capitalize())
logger.debug('Searching with series and season: %s', term)
film = self.do_search(term, session=self.session, release=False, throttle=self.search_throttle,
limit_to=SearchTypes.TvSerie)
if not film and video.season == 1:
logger.debug('Searching with series name: %s', series)
film = self.do_search(series, session=self.session, release=False, throttle=self.search_throttle,
limit_to=SearchTypes.TvSerie)
if film and film.subtitles:
logger.debug('Searching found: %s', len(film.subtitles))
subtitles += self.parse_results(video, film)
else:
logger.debug('No results found')
if more_than_one:
time.sleep(self.search_throttle)
else:
more_than_one = len(titles) > 1
for title in titles:
logger.debug('Searching for movie results: %r', title)
film = self.do_search(title, year=video.year, session=self.session, limit_to=None, release=False,
throttle=self.search_throttle)
if film and film.subtitles:
subtitles += self.parse_results(video, film)
if more_than_one:
time.sleep(self.search_throttle)
logger.info("%s subtitles found" % len(subtitles))
return subtitles

View File

@ -1,410 +0,0 @@
# -*- coding: utf-8 -*-
from difflib import SequenceMatcher
import functools
import logging
import re
import time
import urllib.parse
from bs4 import BeautifulSoup as bso
import cloudscraper
from guessit import guessit
from requests import Session
from requests.exceptions import HTTPError
from subliminal.exceptions import ProviderError
from subliminal_patch.core import Episode
from subliminal_patch.core import Movie
from subliminal_patch.exceptions import APIThrottled
from subliminal_patch.providers import Provider
from subliminal_patch.providers.utils import get_archive_from_bytes
from subliminal_patch.providers.utils import get_subtitle_from_archive
from subliminal_patch.providers.utils import update_matches
from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
logger = logging.getLogger(__name__)
class SubsceneSubtitle(Subtitle):
provider_name = "subscene_cloudscraper"
hash_verifiable = False
def __init__(self, language, page_link, release_info, episode_number=None):
super().__init__(language, page_link=page_link)
self.release_info = release_info
self.episode_number = episode_number
self.episode_title = None
self._matches = set(
("title", "year")
if episode_number is None
else ("title", "series", "year", "season", "episode")
)
def get_matches(self, video):
update_matches(self._matches, video, self.release_info)
return self._matches
@property
def id(self):
return self.page_link
_BASE_URL = "https://subscene.com"
# TODO: add more seasons and languages
_SEASONS = (
"First",
"Second",
"Third",
"Fourth",
"Fifth",
"Sixth",
"Seventh",
"Eighth",
"Ninth",
"Tenth",
"Eleventh",
"Twelfth",
"Thirdteenth",
"Fourthteenth",
"Fifteenth",
"Sixteenth",
"Seventeenth",
"Eightheenth",
"Nineteenth",
"Tweentieth",
)
_LANGUAGE_MAP = {
"english": "eng",
"farsi_persian": "per",
"arabic": "ara",
"spanish": "spa",
"portuguese": "por",
"italian": "ita",
"dutch": "dut",
"hebrew": "heb",
"indonesian": "ind",
"danish": "dan",
"norwegian": "nor",
"bengali": "ben",
"bulgarian": "bul",
"croatian": "hrv",
"swedish": "swe",
"vietnamese": "vie",
"czech": "cze",
"finnish": "fin",
"french": "fre",
"german": "ger",
"greek": "gre",
"hungarian": "hun",
"icelandic": "ice",
"japanese": "jpn",
"macedonian": "mac",
"malay": "may",
"polish": "pol",
"romanian": "rum",
"russian": "rus",
"serbian": "srp",
"thai": "tha",
"turkish": "tur",
}
class SubsceneProvider(Provider):
provider_name = "subscene_cloudscraper"
_movie_title_regex = re.compile(r"^(.+?)( \((\d{4})\))?$")
_tv_show_title_regex = re.compile(
r"^(.+?) [-\(]\s?(.*?) (season|series)\)?( \((\d{4})\))?$"
)
_supported_languages = {}
_supported_languages["brazillian-portuguese"] = Language("por", "BR")
for key, val in _LANGUAGE_MAP.items():
_supported_languages[key] = Language.fromalpha3b(val)
_supported_languages_reversed = {
val: key for key, val in _supported_languages.items()
}
languages = set(_supported_languages.values())
video_types = (Episode, Movie)
subtitle_class = SubsceneSubtitle
def initialize(self):
pass
def terminate(self):
pass
def _scraper_call(self, url, retry=7, method="GET", sleep=5, **kwargs):
last_exc = None
for n in range(retry):
# Creating an instance for every try in order to avoid dropped connections.
# This could probably be improved!
scraper = cloudscraper.create_scraper()
if method == "GET":
req = scraper.get(url, **kwargs)
elif method == "POST":
req = scraper.post(url, **kwargs)
else:
raise NotImplementedError(f"{method} not allowed")
try:
req.raise_for_status()
except HTTPError as error:
logger.debug(
"'%s' returned. Trying again [%d] in %s", error, n + 1, sleep
)
last_exc = error
time.sleep(sleep)
else:
return req
raise ProviderError("403 Retry count exceeded") from last_exc
def _gen_results(self, query):
url = (
f"{_BASE_URL}/subtitles/searchbytitle?query={urllib.parse.quote(query)}&l="
)
result = self._scraper_call(url, method="POST")
soup = bso(result.content, "html.parser")
for title in soup.select("li div[class='title'] a"):
yield title
def _search_movie(self, title, year):
title = title.lower()
year = str(year)
found_movie = None
results = []
for result in self._gen_results(title):
text = result.text.lower()
match = self._movie_title_regex.match(text)
if not match:
continue
match_title = match.group(1)
match_year = match.group(3)
if year == match_year:
results.append(
{
"href": result.get("href"),
"similarity": SequenceMatcher(None, title, match_title).ratio(),
}
)
if results:
results.sort(key=lambda x: x["similarity"], reverse=True)
found_movie = results[0]["href"]
logger.debug("Movie found: %s", results[0])
return found_movie
def _search_tv_show_season(self, title, season, year=None):
try:
season_str = _SEASONS[season - 1].lower()
except IndexError:
logger.debug("Season number not supported: %s", season)
return None
found_tv_show_season = None
results = []
for result in self._gen_results(title):
text = result.text.lower()
match = self._tv_show_title_regex.match(text)
if not match:
logger.debug("Series title not matched: %s", text)
continue
else:
logger.debug("Series title matched: %s", text)
match_title = match.group(1)
match_season = match.group(2)
# Match "complete series" titles as they usually contain season packs
if season_str == match_season or "complete" in match_season:
plus = 0.1 if year and str(year) in text else 0
results.append(
{
"href": result.get("href"),
"similarity": SequenceMatcher(None, title, match_title).ratio()
+ plus,
}
)
if results:
results.sort(key=lambda x: x["similarity"], reverse=True)
found_tv_show_season = results[0]["href"]
logger.debug("TV Show season found: %s", results[0])
return found_tv_show_season
def _find_movie_subtitles(self, path, language):
soup = self._get_subtitle_page_soup(path, language)
subtitles = []
for item in soup.select("tr"):
subtitle = _get_subtitle_from_item(item, language)
if subtitle is None:
continue
logger.debug("Found subtitle: %s", subtitle)
subtitles.append(subtitle)
return subtitles
def _find_episode_subtitles(
self, path, season, episode, language, episode_title=None
):
soup = self._get_subtitle_page_soup(path, language)
subtitles = []
for item in soup.select("tr"):
valid_item = None
clean_text = " ".join(item.text.split())
if not clean_text:
continue
# It will return list values
guess = _memoized_episode_guess(clean_text)
if "season" not in guess:
if "complete series" in clean_text.lower():
logger.debug("Complete series pack found: %s", clean_text)
guess["season"] = [season]
else:
logger.debug("Nothing guessed from release: %s", clean_text)
continue
if season in guess["season"] and episode in guess.get("episode", []):
logger.debug("Episode match found: %s - %s", guess, clean_text)
valid_item = item
elif season in guess["season"] and not "episode" in guess:
logger.debug("Season pack found: %s", clean_text)
valid_item = item
if valid_item is None:
continue
subtitle = _get_subtitle_from_item(item, language, episode)
if subtitle is None:
continue
subtitle.episode_title = episode_title
logger.debug("Found subtitle: %s", subtitle)
subtitles.append(subtitle)
return subtitles
def _get_subtitle_page_soup(self, path, language):
language_path = self._supported_languages_reversed[language]
result = self._scraper_call(f"{_BASE_URL}{path}/{language_path}")
return bso(result.content, "html.parser")
def list_subtitles(self, video, languages):
is_episode = isinstance(video, Episode)
if is_episode:
result = self._search_tv_show_season(video.series, video.season, video.year)
else:
result = self._search_movie(video.title, video.year)
if result is None:
logger.debug("No results")
return []
subtitles = []
for language in languages:
if is_episode:
subtitles.extend(
self._find_episode_subtitles(
result, video.season, video.episode, language, video.title
)
)
else:
subtitles.extend(self._find_movie_subtitles(result, language))
return subtitles
def download_subtitle(self, subtitle):
# TODO: add MustGetBlacklisted support
result = self._scraper_call(subtitle.page_link)
soup = bso(result.content, "html.parser")
try:
download_url = _BASE_URL + str(
soup.select_one("a[id='downloadButton']")["href"] # type: ignore
)
except (AttributeError, KeyError, TypeError):
raise APIThrottled(f"Couldn't get download url from {subtitle.page_link}")
downloaded = self._scraper_call(download_url)
archive = get_archive_from_bytes(downloaded.content)
if archive is None:
raise APIThrottled(f"Invalid archive: {subtitle.page_link}")
subtitle.content = get_subtitle_from_archive(
archive,
episode=subtitle.episode_number,
episode_title=subtitle.episode_title,
)
@functools.lru_cache(2048)
def _memoized_episode_guess(content):
# Use include to save time from unnecessary checks
return guessit(
content,
{
"type": "episode",
# Add codec keys to avoid matching x264, 5.1, etc as episode info
"includes": ["season", "episode", "video_codec", "audio_codec"],
"enforce_list": True,
},
)
def _get_subtitle_from_item(item, language, episode_number=None):
release_infos = []
try:
release_infos.append(item.find("td", {"class": "a6"}).text.strip())
except (AttributeError, KeyError):
pass
try:
release_infos.append(
item.find("td", {"class": "a1"}).find_all("span")[-1].text.strip()
)
except (AttributeError, KeyError):
pass
release_info = "".join(r_info for r_info in release_infos if r_info)
try:
path = item.find("td", {"class": "a1"}).find("a")["href"]
except (AttributeError, KeyError):
logger.debug("Couldn't get path: %s", item)
return None
return SubsceneSubtitle(language, _BASE_URL + path, release_info, episode_number)

View File

@ -110,7 +110,7 @@ class SubsSabBzSubtitle(Subtitle):
guess_filename = guessit(self.filename, video.hints)
matches |= guess_matches(video, guess_filename)
if isinstance(video, Movie) and (self.num_cds > 1 or 'cd' in guess_filename):
if isinstance(video, Movie) and ((isinstance(self.num_cds, int) and self.num_cds > 1) or 'cd' in guess_filename):
# reduce score of subtitles for multi-disc movie releases
return set()

View File

@ -108,7 +108,7 @@ class SubsUnacsSubtitle(Subtitle):
guess_filename = guessit(self.filename, video.hints)
matches |= guess_matches(video, guess_filename)
if isinstance(video, Movie) and (self.num_cds > 1 or 'cd' in guess_filename):
if isinstance(video, Movie) and ((isinstance(self.num_cds, int) and self.num_cds > 1) or 'cd' in guess_filename):
# reduce score of subtitles for multi-disc movie releases
return set()

View File

@ -169,7 +169,7 @@ def whisper_get_language_reverse(alpha3):
lan = whisper_get_language(wl, whisper_languages[wl])
if lan.alpha3 == alpha3:
return wl
raise ValueError
return None
def language_from_alpha3(lang):
name = Language(lang).name
@ -317,7 +317,7 @@ class WhisperAIProvider(Provider):
if out == None:
logger.info(f"Whisper cannot process {subtitle.video.original_path} because of missing/bad audio track")
subtitle.content = None
return
return
logger.debug(f'Audio stream length (in WAV format) is {len(out):,} bytes')
@ -326,11 +326,23 @@ class WhisperAIProvider(Provider):
else:
output_language = "eng"
input_language = whisper_get_language_reverse(subtitle.audio_language)
if input_language is None:
if output_language == "eng":
# guess that audio track is mislabelled English and let whisper try to transcribe it
input_language = "en"
subtitle.task = "transcribe"
logger.info(f"Whisper treating unsupported audio track language: '{subtitle.audio_language}' as English")
else:
logger.info(f"Whisper cannot process {subtitle.video.original_path} because of unsupported audio track language: '{subtitle.audio_language}'")
subtitle.content = None
return
logger.info(f'Starting WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} for {subtitle.video.original_path}')
startTime = time.time()
r = self.session.post(f"{self.endpoint}/asr",
params={'task': subtitle.task, 'language': whisper_get_language_reverse(subtitle.audio_language), 'output': 'srt', 'encode': 'false'},
params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false'},
files={'audio_file': out},
timeout=(self.response, self.timeout))

View File

@ -33,6 +33,8 @@ class Video(Video_):
edition=None,
other=None,
info_url=None,
series_anidb_id=None,
series_anidb_episode_id=None,
**kwargs
):
super(Video, self).__init__(
@ -57,3 +59,5 @@ class Video(Video_):
self.original_path = name
self.other = other
self.info_url = info_url
self.series_anidb_series_id = series_anidb_id,
self.series_anidb_episode_id = series_anidb_episode_id,

View File

@ -1,299 +0,0 @@
# -*- coding: utf-8 -*-
# vim: fenc=utf-8 ts=4 et sw=4 sts=4
# This file is part of Subscene-API.
#
# Subscene-API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Subscene-API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python wrapper for Subscene subtitle database.
since Subscene doesn't provide an official API, I wrote
this script that does the job by parsing the website"s pages.
"""
# imports
import re
import enum
import sys
import requests
import time
import logging
is_PY2 = sys.version_info[0] < 3
if is_PY2:
from contextlib2 import suppress
from urllib2 import Request, urlopen
else:
from contextlib import suppress
from urllib.request import Request, urlopen
from dogpile.cache.api import NO_VALUE
from subliminal.cache import region
from bs4 import BeautifulSoup, NavigableString
logger = logging.getLogger(__name__)
# constants
HEADERS = {
}
SITE_DOMAIN = "https://subscene.com"
DEFAULT_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWeb"\
"Kit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
ENDPOINT_RE = re.compile(r'(?uis)<form.+?action="/subtitles/(.+)">.*?<input type="text"')
class NewEndpoint(Exception):
pass
# utils
def soup_for(url, data=None, session=None, user_agent=DEFAULT_USER_AGENT):
url = re.sub("\s", "+", url)
if not session:
r = Request(url, data=None, headers=dict(HEADERS, **{"User-Agent": user_agent}))
html = urlopen(r).read().decode("utf-8")
else:
ret = session.post(url, data=data)
ret.raise_for_status()
html = ret.text
return BeautifulSoup(html, "html.parser")
class AttrDict(object):
def __init__(self, *attrs):
self._attrs = attrs
for attr in attrs:
setattr(self, attr, "")
def to_dict(self):
return {k: getattr(self, k) for k in self._attrs}
# models
@enum.unique
class SearchTypes(enum.Enum):
Exact = 1
TvSerie = 2
Popular = 3
Close = 4
SectionsParts = {
SearchTypes.Exact: "Exact",
SearchTypes.TvSerie: "TV-Series",
SearchTypes.Popular: "Popular",
SearchTypes.Close: "Close"
}
class Subtitle(object):
def __init__(self, title, url, language, owner_username, owner_url,
description, hearing_impaired):
self.title = title
self.url = url
self.language = language
self.owner_username = owner_username
self.owner_url = owner_url
self.description = description
self.hearing_impaired = hearing_impaired
self._zipped_url = None
def __str__(self):
return self.title
@classmethod
def from_rows(cls, rows):
subtitles = []
for row in rows:
if row.td.a is not None and row.td.get("class", ["lazy"])[0] != "empty":
subtitles.append(cls.from_row(row))
return subtitles
@classmethod
def from_row(cls, row):
attrs = AttrDict("title", "url", "language", "owner_username",
"owner_url", "description", "hearing_impaired")
with suppress(Exception):
attrs.title = row.find("td", "a1").a.find_all("span")[1].text \
.strip()
with suppress(Exception):
attrs.url = SITE_DOMAIN + row.find("td", "a1").a.get("href")
with suppress(Exception):
attrs.language = row.find("td", "a1").a.find_all("span")[0].text \
.strip()
with suppress(Exception):
attrs.owner_username = row.find("td", "a5").a.text.strip()
with suppress(Exception):
attrs.owner_page = SITE_DOMAIN + row.find("td", "a5").a \
.get("href").strip()
with suppress(Exception):
attrs.description = row.find("td", "a6").div.text.strip()
with suppress(Exception):
attrs.hearing_impaired = bool(row.find("td", "a41"))
return cls(**attrs.to_dict())
@classmethod
def get_zipped_url(cls, url, session=None):
soup = soup_for(url, session=session)
return SITE_DOMAIN + soup.find("div", "download").a.get("href")
@property
def zipped_url(self):
if self._zipped_url:
return self._zipped_url
self._zipped_url = Subtitle.get_zipped_url(self.url)
return self._zipped_url
class Film(object):
def __init__(self, title, year=None, imdb=None, cover=None,
subtitles=None):
self.title = title
self.year = year
self.imdb = imdb
self.cover = cover
self.subtitles = subtitles
def __str__(self):
return self.title
@classmethod
def from_url(cls, url, session=None):
soup = soup_for(url, session=session)
content = soup.find("div", "subtitles")
header = content.find("div", "box clearfix")
cover = None
try:
cover = header.find("div", "poster").img.get("src")
except AttributeError:
pass
title = header.find("div", "header").h2.text[:-12].strip()
imdb = header.find("div", "header").h2.find("a", "imdb").get("href")
year = header.find("div", "header").ul.li.text
year = int(re.findall(r"[0-9]+", year)[0])
rows = content.find("table").tbody.find_all("tr")
subtitles = Subtitle.from_rows(rows)
return cls(title, year, imdb, cover, subtitles)
# functions
def section_exists(soup, section):
tag_part = SectionsParts[section]
try:
headers = soup.find("div", "search-result").find_all("h2")
except AttributeError:
return False
for header in headers:
if tag_part in header.text:
return True
return False
def get_first_film(soup, section, year=None, session=None):
tag_part = SectionsParts[section]
tag = None
headers = soup.find("div", "search-result").find_all("h2")
for header in headers:
if tag_part in header.text:
tag = header
break
if not tag:
return
url = None
url = SITE_DOMAIN + tag.findNext("ul").find("li").div.a.get("href")
for t in tag.findNext("ul").findAll("li"):
if isinstance(t, NavigableString) or not t.div:
continue
if str(year) in t.div.a.string:
url = SITE_DOMAIN + t.div.a.get("href")
break
return Film.from_url(url, session=session)
def find_endpoint(session, content=None):
endpoint = region.get("subscene_endpoint2")
if endpoint is NO_VALUE:
if not content:
content = session.get(SITE_DOMAIN).text
m = ENDPOINT_RE.search(content)
if m:
endpoint = m.group(1).strip()
logger.debug("Switching main endpoint to %s", endpoint)
region.set("subscene_endpoint2", endpoint)
return endpoint
def search(term, release=True, session=None, year=None, limit_to=SearchTypes.Exact, throttle=0):
# note to subscene: if you actually start to randomize the endpoint, we'll have to query your server even more
if release:
endpoint = "release"
else:
endpoint = find_endpoint(session)
time.sleep(throttle)
if not endpoint:
logger.error("Couldn't find endpoint, exiting")
return
soup = soup_for("%s/subtitles/%s" % (SITE_DOMAIN, endpoint), data={"query": term},
session=session)
if soup:
if "Subtitle search by" in str(soup):
rows = soup.find("table").tbody.find_all("tr")
subtitles = Subtitle.from_rows(rows)
return Film(term, subtitles=subtitles)
for junk, search_type in SearchTypes.__members__.items():
if section_exists(soup, search_type):
return get_first_film(soup, search_type, year=year, session=session)
if limit_to == search_type:
return

File diff suppressed because it is too large Load Diff

View File

@ -19,31 +19,31 @@
"@mantine/hooks": "^6.0.21",
"@mantine/modals": "^6.0.21",
"@mantine/notifications": "^6.0.21",
"axios": "^1.6.7",
"axios": "^1.6.8",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-query": "^3.39.3",
"react-router-dom": "^6.22.3",
"socket.io-client": "^4.7.4"
"socket.io-client": "^4.7.5"
},
"devDependencies": {
"@fontsource/roboto": "^5.0.12",
"@fortawesome/fontawesome-svg-core": "^6.5.1",
"@fortawesome/free-brands-svg-icons": "^6.5.1",
"@fortawesome/free-regular-svg-icons": "^6.5.1",
"@fortawesome/free-solid-svg-icons": "^6.5.1",
"@fortawesome/fontawesome-svg-core": "^6.5.2",
"@fortawesome/free-brands-svg-icons": "^6.5.2",
"@fortawesome/free-regular-svg-icons": "^6.5.2",
"@fortawesome/free-solid-svg-icons": "^6.5.2",
"@fortawesome/react-fontawesome": "^0.2.0",
"@testing-library/jest-dom": "^6.4.2",
"@testing-library/react": "^14.2.1",
"@testing-library/react": "^15.0.5",
"@testing-library/user-event": "^14.5.2",
"@types/jest": "^29.5.12",
"@types/lodash": "^4.17.0",
"@types/node": "^20.11.26",
"@types/react": "^18.2.65",
"@types/react-dom": "^18.2.21",
"@types/react-table": "^7.7.19",
"@types/node": "^20.12.6",
"@types/react": "^18.2.75",
"@types/react-dom": "^18.2.24",
"@types/react-table": "^7.7.20",
"@vitejs/plugin-react": "^4.2.1",
"@vitest/coverage-v8": "^1.3.1",
"@vitest/coverage-v8": "^1.4.0",
"@vitest/ui": "^1.2.2",
"clsx": "^2.1.0",
"eslint": "^8.57.0",
@ -53,15 +53,14 @@
"husky": "^9.0.11",
"jsdom": "^24.0.0",
"lodash": "^4.17.21",
"moment": "^2.30.1",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^3.2.4",
"pretty-quick": "^4.0.0",
"react-table": "^7.8.0",
"recharts": "^2.12.2",
"sass": "^1.71.1",
"typescript": "^5.4.2",
"vite": "^5.1.6",
"recharts": "^2.12.6",
"sass": "^1.74.1",
"typescript": "^5.4.4",
"vite": "^5.2.8",
"vite-plugin-checker": "^0.6.4",
"vitest": "^1.2.2",
"yaml": "^2.4.1"
@ -77,7 +76,7 @@
"test:ui": "vitest --ui",
"coverage": "vitest run --coverage",
"format": "prettier -w .",
"prepare": "cd .. && husky install frontend/.husky"
"prepare": "cd .. && husky frontend/.husky"
},
"browserslist": {
"production": [

View File

@ -140,7 +140,7 @@ const SubtitleToolsMenu: FunctionComponent<Props> = ({
const disabledTools = selections.length === 0;
return (
<Menu withArrow position="left-end" {...menu}>
<Menu withArrow withinPortal position="left-end" {...menu}>
<Menu.Target>{children}</Menu.Target>
<Menu.Dropdown>
<Menu.Label>Tools</Menu.Label>

View File

@ -19,6 +19,7 @@ import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import {
Button,
Checkbox,
createStyles,
Divider,
MantineColor,
Stack,
@ -78,12 +79,21 @@ interface Props {
onComplete?: () => void;
}
const useStyles = createStyles((theme) => {
return {
wrapper: {
overflowWrap: "anywhere",
},
};
});
const MovieUploadForm: FunctionComponent<Props> = ({
files,
movie,
onComplete,
}) => {
const modals = useModals();
const { classes } = useStyles();
const profile = useLanguageProfileBy(movie.profileId);
@ -279,7 +289,7 @@ const MovieUploadForm: FunctionComponent<Props> = ({
modals.closeSelf();
})}
>
<Stack>
<Stack className={classes.wrapper}>
<SimpleTable columns={columns} data={form.values.files}></SimpleTable>
<Divider></Divider>
<Button type="submit">Upload</Button>

View File

@ -86,7 +86,12 @@ const ProfileEditForm: FunctionComponent<Props> = ({
const itemCutoffOptions = useSelectorOptions(
form.values.items,
(v) => v.language,
(v) => {
const suffix =
v.hi === "True" ? ":hi" : v.forced === "True" ? ":forced" : "";
return v.language + suffix;
},
(v) => String(v.id),
);

View File

@ -23,6 +23,7 @@ import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import {
Button,
Checkbox,
createStyles,
Divider,
MantineColor,
Stack,
@ -85,12 +86,21 @@ interface Props {
onComplete?: VoidFunction;
}
const useStyles = createStyles((theme) => {
return {
wrapper: {
overflowWrap: "anywhere",
},
};
});
const SeriesUploadForm: FunctionComponent<Props> = ({
series,
files,
onComplete,
}) => {
const modals = useModals();
const { classes } = useStyles();
const episodes = useEpisodesBySeriesId(series.sonarrSeriesId);
const episodeOptions = useSelectorOptions(
episodes.data ?? [],
@ -358,7 +368,7 @@ const SeriesUploadForm: FunctionComponent<Props> = ({
modals.closeSelf();
})}
>
<Stack>
<Stack className={classes.wrapper}>
<SimpleTable columns={columns} data={form.values.files}></SimpleTable>
<Divider></Divider>
<Button type="submit">Upload</Button>

View File

@ -39,14 +39,24 @@ import {
} from "../utilities/FormValues";
import { SettingsProvider, useSettings } from "../utilities/SettingsProvider";
import { useSettingValue } from "../utilities/hooks";
import { ProviderInfo, ProviderList } from "./list";
import { ProviderInfo } from "./list";
const ProviderKey = "settings-general-enabled_providers";
type SettingsKey =
| "settings-general-enabled_providers"
| "settings-general-enabled_integrations";
export const ProviderView: FunctionComponent = () => {
interface ProviderViewProps {
availableOptions: Readonly<ProviderInfo[]>;
settingsKey: SettingsKey;
}
export const ProviderView: FunctionComponent<ProviderViewProps> = ({
availableOptions,
settingsKey,
}) => {
const settings = useSettings();
const staged = useStagedValues();
const providers = useSettingValue<string[]>(ProviderKey);
const providers = useSettingValue<string[]>(settingsKey);
const { update } = useFormActions();
@ -61,17 +71,27 @@ export const ProviderView: FunctionComponent = () => {
staged,
settings,
onChange: update,
availableOptions: availableOptions,
settingsKey: settingsKey,
});
}
},
[modals, providers, settings, staged, update],
[
modals,
providers,
settings,
staged,
update,
availableOptions,
settingsKey,
],
);
const cards = useMemo(() => {
if (providers) {
return providers
.flatMap((v) => {
const item = ProviderList.find((inn) => inn.key === v);
const item = availableOptions.find((inn) => inn.key === v);
if (item) {
return item;
} else {
@ -89,7 +109,7 @@ export const ProviderView: FunctionComponent = () => {
} else {
return [];
}
}, [providers, select]);
}, [providers, select, availableOptions]);
return (
<SimpleGrid cols={3}>
@ -106,6 +126,8 @@ interface ProviderToolProps {
staged: LooseObject;
settings: Settings;
onChange: (v: LooseObject) => void;
availableOptions: Readonly<ProviderInfo[]>;
settingsKey: Readonly<SettingsKey>;
}
const SelectItem = forwardRef<
@ -126,6 +148,8 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
staged,
settings,
onChange,
availableOptions,
settingsKey,
}) => {
const modals = useModals();
@ -147,11 +171,11 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
if (idx !== -1) {
const newProviders = [...enabledProviders];
newProviders.splice(idx, 1);
onChangeRef.current({ [ProviderKey]: newProviders });
onChangeRef.current({ [settingsKey]: newProviders });
modals.closeAll();
}
}
}, [payload, enabledProviders, modals]);
}, [payload, enabledProviders, modals, settingsKey]);
const submit = useCallback(
(values: FormValues) => {
@ -161,8 +185,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
// Add this provider if not exist
if (enabledProviders.find((v) => v === info.key) === undefined) {
const newProviders = [...enabledProviders, info.key];
changes[ProviderKey] = newProviders;
changes[settingsKey] = [...enabledProviders, info.key];
}
// Apply submit hooks
@ -172,7 +195,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
modals.closeAll();
}
},
[info, enabledProviders, modals],
[info, enabledProviders, modals, settingsKey],
);
const canSave = info !== null;
@ -188,18 +211,18 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
}
}, []);
const availableOptions = useMemo(
const options = useMemo(
() =>
ProviderList.filter(
availableOptions.filter(
(v) =>
enabledProviders?.find((p) => p === v.key && p !== info?.key) ===
undefined,
),
[info?.key, enabledProviders],
[info?.key, enabledProviders, availableOptions],
);
const options = useSelectorOptions(
availableOptions,
const selectorOptions = useSelectorOptions(
options,
(v) => v.name ?? capitalize(v.key),
);
@ -289,7 +312,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
placeholder="Click to Select a Provider"
itemComponent={SelectItem}
disabled={payload !== null}
{...options}
{...selectorOptions}
value={info}
onChange={onSelect}
></Selector>

View File

@ -11,12 +11,16 @@ import {
Text,
} from "../components";
import { ProviderView } from "./components";
import { IntegrationList, ProviderList } from "./list";
const SettingsProvidersView: FunctionComponent = () => {
return (
<Layout name="Providers">
<Section header="Providers">
<ProviderView></ProviderView>
<ProviderView
availableOptions={ProviderList}
settingsKey="settings-general-enabled_providers"
></ProviderView>
</Section>
<Section header="Anti-Captcha Options">
<Selector
@ -58,6 +62,12 @@ const SettingsProvidersView: FunctionComponent = () => {
<Message>Link to subscribe</Message>
</CollapseBox>
</Section>
<Section header="Integrations">
<ProviderView
availableOptions={IntegrationList}
settingsKey="settings-general-enabled_integrations"
></ProviderView>
</Section>
</Layout>
);
};

View File

@ -64,6 +64,21 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
},
],
},
{
key: "animetosho",
name: "Anime Tosho",
description:
"Anime Tosho is a free, completely automated service which mirrors most torrents posted on TokyoTosho's anime category, Nyaa.si's English translated anime category and AniDex's anime category.",
inputs: [
{
type: "text",
key: "search_threshold",
defaultValue: 6,
name: "Search Threshold. Increase if you often cannot find subtitles for your Anime. Note that increasing the value will decrease the performance of the search for each Episode.",
},
],
message: "Requires AniDB Integration.",
},
{
key: "argenteam_dump",
name: "Argenteam Dump",
@ -359,7 +374,6 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
{
key: "subf2m",
name: "subf2m.co",
description: "Subscene Alternative Provider",
inputs: [
{
type: "switch",
@ -391,20 +405,6 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
description:
"Greek Subtitles Provider.\nRequires anti-captcha provider to solve captchas for each download.",
},
{
key: "subscene",
inputs: [
{
type: "text",
key: "username",
},
{
type: "password",
key: "password",
},
],
description: "Broken, may not work for some. Use subf2m instead.",
},
{ key: "subscenter", description: "Hebrew Subtitles Provider" },
{
key: "subsunacs",
@ -538,3 +538,24 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
description: "Chinese Subtitles Provider. Anti-captcha required.",
},
];
export const IntegrationList: Readonly<ProviderInfo[]> = [
{
key: "anidb",
name: "AniDB",
description:
"AniDB is non-profit database of anime information that is freely open to the public.",
inputs: [
{
type: "text",
key: "api_client",
name: "API Client",
},
{
type: "text",
key: "api_client_ver",
name: "API Client Version",
},
],
},
];

View File

@ -409,8 +409,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
settingKey="settings-subsync-use_subsync"
></Check>
<Message>
Enable automatic subtitles synchronization after downloading a
subtitle.
Enable automatic synchronization after downloading subtitles.
</Message>
<CollapseBox indent settingKey="settings-subsync-use_subsync">
<MultiSelector

View File

@ -20,7 +20,6 @@ import {
Text,
} from "@mantine/core";
import { useDocumentTitle } from "@mantine/hooks";
import moment from "moment";
import {
FunctionComponent,
PropsWithChildren,
@ -28,6 +27,13 @@ import {
useCallback,
useState,
} from "react";
import {
divisorDay,
divisorHour,
divisorMinute,
divisorSecond,
formatTime,
} from "@/utilities/time";
import Table from "./table";
interface InfoProps {
@ -98,15 +104,19 @@ const SystemStatusView: FunctionComponent = () => {
const update = useCallback(() => {
const startTime = status?.start_time;
if (startTime) {
const duration = moment.duration(
moment().utc().unix() - startTime,
"seconds",
),
days = duration.days(),
hours = duration.hours().toString().padStart(2, "0"),
minutes = duration.minutes().toString().padStart(2, "0"),
seconds = duration.seconds().toString().padStart(2, "0");
setUptime(days + "d " + hours + ":" + minutes + ":" + seconds);
// Current time in seconds
const currentTime = Math.floor(Date.now() / 1000);
const uptimeInSeconds = currentTime - startTime;
const uptime: string = formatTime(uptimeInSeconds, [
{ unit: "d", divisor: divisorDay },
{ unit: "h", divisor: divisorHour },
{ unit: "m", divisor: divisorMinute },
{ unit: "s", divisor: divisorSecond },
]);
setUptime(uptime);
}
}, [status?.start_time]);

View File

@ -20,7 +20,6 @@ interface Settings {
xsubs: Settings.XSubs;
assrt: Settings.Assrt;
napisy24: Settings.Napisy24;
subscene: Settings.Subscene;
betaseries: Settings.Betaseries;
titlovi: Settings.Titlovi;
ktuvit: Settings.Ktuvit;
@ -211,8 +210,6 @@ declare namespace Settings {
interface Napisy24 extends BaseProvider {}
interface Subscene extends BaseProvider {}
interface Titlovi extends BaseProvider {}
interface Ktuvit {

View File

@ -0,0 +1,60 @@
import {
divisorDay,
divisorHour,
divisorMinute,
divisorSecond,
formatTime,
} from "./time";
describe("formatTime", () => {
it("should format day hour minute and second", () => {
const uptimeInSeconds = 3661;
const formattedTime = formatTime(uptimeInSeconds, [
{ unit: "d", divisor: divisorDay },
{ unit: "h", divisor: divisorHour },
{ unit: "m", divisor: divisorMinute },
{ unit: "s", divisor: divisorSecond },
]);
expect(formattedTime).toBe("0d 01:01:01");
});
it("should format multiple digits of days", () => {
const uptimeInSeconds = 50203661;
const formattedTime = formatTime(uptimeInSeconds, [
{ unit: "d", divisor: divisorDay },
{ unit: "h", divisor: divisorHour },
{ unit: "m", divisor: divisorMinute },
{ unit: "s", divisor: divisorSecond },
]);
expect(formattedTime).toBe("581d 25:27:41");
});
it("should format time day hour minute", () => {
const uptimeInSeconds = 3661;
const formattedTime = formatTime(uptimeInSeconds, [
{ unit: "d", divisor: divisorDay },
{ unit: "h", divisor: divisorHour },
{ unit: "m", divisor: divisorMinute },
]);
expect(formattedTime).toBe("0d 01:01");
});
it("should format zero uptime", () => {
const uptimeInSeconds = 0;
const formattedTime = formatTime(uptimeInSeconds, [
{ unit: "d", divisor: divisorDay },
{ unit: "h", divisor: divisorHour },
{ unit: "m", divisor: divisorMinute },
{ unit: "s", divisor: divisorSecond },
]);
expect(formattedTime).toBe("0d 00:00:00");
});
});

View File

@ -0,0 +1,29 @@
interface TimeFormat {
unit: string;
divisor: number;
}
export const divisorDay = 24 * 60 * 60;
export const divisorHour = 60 * 60;
export const divisorMinute = 60;
export const divisorSecond = 1;
export const formatTime = (
timeInSeconds: number,
formats: TimeFormat[],
): string =>
formats.reduce(
(formattedTime: string, { unit, divisor }: TimeFormat, index: number) => {
const timeValue: number =
index === 0
? Math.floor(timeInSeconds / divisor)
: Math.floor(timeInSeconds / divisor) % 60;
return (
formattedTime +
(index === 0
? `${timeValue}${unit} `
: `${timeValue.toString().padStart(2, "0")}${index < formats.length - 1 ? ":" : ""}`)
);
},
"",
);

View File

@ -1,12 +1,12 @@
Metadata-Version: 2.1
Name: apprise
Version: 1.7.4
Version: 1.7.6
Summary: Push Notifications that work with just about every platform!
Home-page: https://github.com/caronc/apprise
Author: Chris Caron
Author-email: lead2gold@gmail.com
License: BSD
Keywords: Alerts Apprise API Automated Packet Reporting System AWS Boxcar BulkSMS BulkVS Burst SMS Chat CLI ClickSend D7Networks Dapnet DBus DingTalk Discord Email Emby Enigma2 Faast FCM Flock Form Gnome Google Chat Gotify Growl Guilded Home Assistant httpSMS IFTTT Join JSON Kavenegar KODI Kumulos LaMetric Line LunaSea MacOSX Mailgun Mastodon Matrix Mattermost MessageBird Microsoft Misskey MQTT MSG91 MSTeams Nextcloud NextcloudTalk Notica Notifiarr Notifico Ntfy Office365 OneSignal Opsgenie PagerDuty PagerTree ParsePlatform PopcornNotify Prowl PushBullet Pushed Pushjet PushMe Push Notifications Pushover PushSafer Pushy PushDeer Reddit Revolt Rocket.Chat RSyslog Ryver SendGrid ServerChan SES Signal SimplePush Sinch Slack SMSEagle SMS Manager SMTP2Go SNS SparkPost Streamlabs Stride Synology Chat Syslog Techulus Telegram Threema Gateway Twilio Twist Twitter Voipms Vonage Webex WeCom Bot WhatsApp Windows XBMC XML Zulip
Keywords: Alerts Apprise API Automated Packet Reporting System AWS Boxcar BulkSMS BulkVS Burst SMS Chantify Chat CLI ClickSend D7Networks Dapnet DBus DingTalk Discord Email Emby Enigma2 FCM Feishu Flock Form Free Mobile Gnome Google Chat Gotify Growl Guilded Home Assistant httpSMS IFTTT Join JSON Kavenegar KODI Kumulos LaMetric Line LunaSea MacOSX Mailgun Mastodon Matrix Mattermost MessageBird Microsoft Misskey MQTT MSG91 MSTeams Nextcloud NextcloudTalk Notica Notifiarr Notifico Ntfy Office365 OneSignal Opsgenie PagerDuty PagerTree ParsePlatform PopcornNotify Prowl PushBullet Pushed Pushjet PushMe Push Notifications Pushover PushSafer Pushy PushDeer Reddit Revolt Rocket.Chat RSyslog Ryver SendGrid ServerChan SES Signal SimplePush Sinch Slack SMSEagle SMS Manager SMTP2Go SNS SparkPost Streamlabs Stride Synology Chat Syslog Techulus Telegram Threema Gateway Twilio Twist Twitter Voipms Vonage Webex WeCom Bot WhatsApp Windows XBMC XML Zulip
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: System Administrators
@ -20,6 +20,7 @@ Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: License :: OSI Approved :: BSD License
@ -98,11 +99,12 @@ The table below identifies the services this tool supports and some example serv
| [AWS SES](https://github.com/caronc/apprise/wiki/Notify_ses) | ses:// | (TCP) 443 | ses://user@domain/AccessKeyID/AccessSecretKey/RegionName<br/>ses://user@domain/AccessKeyID/AccessSecretKey/RegionName/email1/email2/emailN
| [Bark](https://github.com/caronc/apprise/wiki/Notify_bark) | bark:// | (TCP) 80 or 443 | bark://hostname<br />bark://hostname/device_key<br />bark://hostname/device_key1/device_key2/device_keyN<br/>barks://hostname<br />barks://hostname/device_key<br />barks://hostname/device_key1/device_key2/device_keyN
| [Boxcar](https://github.com/caronc/apprise/wiki/Notify_boxcar) | boxcar:// | (TCP) 443 | boxcar://hostname<br />boxcar://hostname/@tag<br/>boxcar://hostname/device_token<br />boxcar://hostname/device_token1/device_token2/device_tokenN<br />boxcar://hostname/@tag/@tag2/device_token
| [Chantify](https://github.com/caronc/apprise/wiki/Notify_chantify) | chantify:// | (TCP) 443 | chantify://token
| [Discord](https://github.com/caronc/apprise/wiki/Notify_discord) | discord:// | (TCP) 443 | discord://webhook_id/webhook_token<br />discord://avatar@webhook_id/webhook_token
| [Emby](https://github.com/caronc/apprise/wiki/Notify_emby) | emby:// or embys:// | (TCP) 8096 | emby://user@hostname/<br />emby://user:password@hostname
| [Enigma2](https://github.com/caronc/apprise/wiki/Notify_enigma2) | enigma2:// or enigma2s:// | (TCP) 80 or 443 | enigma2://hostname
| [Faast](https://github.com/caronc/apprise/wiki/Notify_faast) | faast:// | (TCP) 443 | faast://authorizationtoken
| [FCM](https://github.com/caronc/apprise/wiki/Notify_fcm) | fcm:// | (TCP) 443 | fcm://project@apikey/DEVICE_ID<br />fcm://project@apikey/#TOPIC<br/>fcm://project@apikey/DEVICE_ID1/#topic1/#topic2/DEVICE_ID2/
| [Feishu](https://github.com/caronc/apprise/wiki/Notify_feishu) | feishu:// | (TCP) 443 | feishu://token
| [Flock](https://github.com/caronc/apprise/wiki/Notify_flock) | flock:// | (TCP) 443 | flock://token<br/>flock://botname@token<br/>flock://app_token/u:userid<br/>flock://app_token/g:channel_id<br/>flock://app_token/u:userid/g:channel_id
| [Google Chat](https://github.com/caronc/apprise/wiki/Notify_googlechat) | gchat:// | (TCP) 443 | gchat://workspace/key/token
| [Gotify](https://github.com/caronc/apprise/wiki/Notify_gotify) | gotify:// or gotifys:// | (TCP) 80 or 443 | gotify://hostname/token<br />gotifys://hostname/token?priority=high
@ -184,6 +186,7 @@ The table below identifies the services this tool supports and some example serv
| [DAPNET](https://github.com/caronc/apprise/wiki/Notify_dapnet) | dapnet:// | (TCP) 80 | dapnet://user:pass@callsign<br/>dapnet://user:pass@callsign1/callsign2/callsignN
| [D7 Networks](https://github.com/caronc/apprise/wiki/Notify_d7networks) | d7sms:// | (TCP) 443 | d7sms://token@PhoneNo<br/>d7sms://token@ToPhoneNo1/ToPhoneNo2/ToPhoneNoN
| [DingTalk](https://github.com/caronc/apprise/wiki/Notify_dingtalk) | dingtalk:// | (TCP) 443 | dingtalk://token/<br />dingtalk://token/ToPhoneNo<br />dingtalk://token/ToPhoneNo1/ToPhoneNo2/ToPhoneNo1/
| [Free-Mobile](https://github.com/caronc/apprise/wiki/Notify_freemobile) | freemobile:// | (TCP) 443 | freemobile://user@password/
[httpSMS](https://github.com/caronc/apprise/wiki/Notify_httpsms) | httpsms:// | (TCP) 443 | httpsms://ApiKey@FromPhoneNo<br/>httpsms://ApiKey@FromPhoneNo/ToPhoneNo<br/>httpsms://ApiKey@FromPhoneNo/ToPhoneNo1/ToPhoneNo2/ToPhoneNoN/
| [Kavenegar](https://github.com/caronc/apprise/wiki/Notify_kavenegar) | kavenegar:// | (TCP) 443 | kavenegar://ApiKey/ToPhoneNo<br/>kavenegar://FromPhoneNo@ApiKey/ToPhoneNo<br/>kavenegar://ApiKey/ToPhoneNo1/ToPhoneNo2/ToPhoneNoN
| [MessageBird](https://github.com/caronc/apprise/wiki/Notify_messagebird) | msgbird:// | (TCP) 443 | msgbird://ApiKey/FromPhoneNo<br/>msgbird://ApiKey/FromPhoneNo/ToPhoneNo<br/>msgbird://ApiKey/FromPhoneNo/ToPhoneNo1/ToPhoneNo2/ToPhoneNoN/

View File

@ -1,12 +1,12 @@
../../bin/apprise,sha256=ZJ-e4qqxNLtdW_DAvpuPPX5iROIiQd8I6nvg7vtAv-g,233
apprise-1.7.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
apprise-1.7.4.dist-info/LICENSE,sha256=gt7qKBxRhVcdmXCYVtrWP6DtYjD0DzONet600dkU994,1343
apprise-1.7.4.dist-info/METADATA,sha256=Lc66iPsSCFv0zmoQX8NFuc_V5CqFYN5Yrx_gqeN8OF8,44502
apprise-1.7.4.dist-info/RECORD,,
apprise-1.7.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apprise-1.7.4.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
apprise-1.7.4.dist-info/entry_points.txt,sha256=71YypBuNdjAKiaLsiMG40HEfLHxkU4Mi7o_S0s0d8wI,45
apprise-1.7.4.dist-info/top_level.txt,sha256=JrCRn-_rXw5LMKXkIgMSE4E0t1Ks9TYrBH54Pflwjkk,8
apprise-1.7.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
apprise-1.7.6.dist-info/LICENSE,sha256=gt7qKBxRhVcdmXCYVtrWP6DtYjD0DzONet600dkU994,1343
apprise-1.7.6.dist-info/METADATA,sha256=z_gaX2IdNJqw4T9q7AYQri9jcIs-OTGCo3t2EgEY-mw,44823
apprise-1.7.6.dist-info/RECORD,,
apprise-1.7.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apprise-1.7.6.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
apprise-1.7.6.dist-info/entry_points.txt,sha256=71YypBuNdjAKiaLsiMG40HEfLHxkU4Mi7o_S0s0d8wI,45
apprise-1.7.6.dist-info/top_level.txt,sha256=JrCRn-_rXw5LMKXkIgMSE4E0t1Ks9TYrBH54Pflwjkk,8
apprise/Apprise.py,sha256=Stm2NhJprWRaMwQfTiIQG_nR1bLpHi_zcdwEcsCpa-A,32865
apprise/Apprise.pyi,sha256=_4TBKvT-QVj3s6PuTh3YX-BbQMeJTdBGdVpubLMY4_k,2203
apprise/AppriseAsset.py,sha256=jRW8Y1EcAvjVA9h_mINmsjO4DM3S0aDl6INIFVMcUCs,11647
@ -15,13 +15,13 @@ apprise/AppriseAttachment.py,sha256=vhrktSrp8GLr32aK4KqV6BX83IpI1lxZe-pGo1wiSFM,
apprise/AppriseAttachment.pyi,sha256=R9-0dVqWpeaFrVpcREwPhGy3qHWztG5jEjYIOsbE5dM,1145
apprise/AppriseConfig.py,sha256=wfuR6Mb3ZLHvjvqWdFp9lVmjjDRWs65unY9qa92RkCg,16909
apprise/AppriseConfig.pyi,sha256=_mUlCnncqAq8sL01WxQTgZjnb2ic9kZXvtqZmVl-fc8,1568
apprise/AppriseLocale.py,sha256=ISth7xC7M1WhsSNXdGZFouaA4bi07KP35m9RX-ExG48,8852
apprise/AppriseLocale.py,sha256=4uSr4Nj_rz6ISMMAfRVRk58wZVLKOofJgk2x0_E8NkQ,8994
apprise/AttachmentManager.py,sha256=EwlnjuKn3fv_pioWcmMCkyDTsO178t6vkEOD8AjAPsw,2053
apprise/ConfigurationManager.py,sha256=MUmGajxjgnr6FGN7xb3q0nD0VVgdTdvapBBR7CsI-rc,2058
apprise/NotificationManager.py,sha256=ZJgkiCgcJ7Bz_6bwQ47flrcxvLMbA4Vbw0HG_yTsGdE,2041
apprise/URLBase.py,sha256=ZWjHz69790EfVNDIBzWzRZzjw-gwC3db_t3_3an6cWI,28388
apprise/URLBase.py,sha256=xRP0-blocp9UudYh04Hb3fIEmTZWJaTv_tzjrqaB9fg,29423
apprise/URLBase.pyi,sha256=WLaRREH7FzZ5x3-qkDkupojWGFC4uFwJ1EDt02lVs8c,520
apprise/__init__.py,sha256=oBHq9Zbcwz9DTkurqnEhbu9Q79a0TdVAZrWFIhlk__8,3368
apprise/__init__.py,sha256=ArtvoarAMnBcSfXF7L_hzq5CUJ9TUnHopiC7xafCe3c,3368
apprise/assets/NotifyXML-1.0.xsd,sha256=292qQ_IUl5EWDhPyzm9UTT0C2rVvJkyGar8jiODkJs8,986
apprise/assets/NotifyXML-1.1.xsd,sha256=bjR3CGG4AEXoJjYkGCbDttKHSkPP1FlIWO02E7G59g4,1758
apprise/assets/themes/default/apprise-failure-128x128.ico,sha256=Mt0ptfHJaN3Wsv5UCNDn9_3lyEDHxVDv1JdaDEI_xCA,67646
@ -45,22 +45,22 @@ apprise/assets/themes/default/apprise-warning-128x128.png,sha256=pf5c4Ph7jWH7gf3
apprise/assets/themes/default/apprise-warning-256x256.png,sha256=SY-xlaiXaj420iEYKC2_fJxU-yj2SuaQg6xfPNi83bw,43708
apprise/assets/themes/default/apprise-warning-32x32.png,sha256=97R2ywNvcwczhBoWEIgajVtWjgT8fLs4FCCz4wu0dwc,2472
apprise/assets/themes/default/apprise-warning-72x72.png,sha256=L8moEInkO_OLxoOcuvN7rmrGZo64iJeH20o-24MQghE,7913
apprise/attachment/AttachBase.py,sha256=ik3hRFnr8Z9bXt69P9Ej1VST4gQbnE0C_9WQvEE-72A,13592
apprise/attachment/AttachBase.py,sha256=T3WreGrTsqqGplXJO36jm-N14X7ymSc9xt7XdTYuXVE,13656
apprise/attachment/AttachBase.pyi,sha256=w0XG_QKauiMLJ7eQ4S57IiLIURZHm_Snw7l6-ih9GP8,961
apprise/attachment/AttachFile.py,sha256=MbHY_av0GeM_AIBKV02Hq7SHiZ9eCr1yTfvDMUgi2I4,4765
apprise/attachment/AttachHTTP.py,sha256=dyDy3U47cI28ENhaw1r5nQlGh8FWHZlHI8n9__k8wcY,11995
apprise/attachment/AttachHTTP.py,sha256=_CMPp4QGLATfGO2-Nw57sxsQyed9z3ywgoB0vpK3KZk,13779
apprise/attachment/__init__.py,sha256=xabgXpvV05X-YRuqIt3uGYMXwYNXjHyF6Dwd8HfZCFE,1658
apprise/cli.py,sha256=h-pWSQPqQficH6J-OEp3MTGydWyt6vMYnDZvHCeAt4Y,20697
apprise/common.py,sha256=I6wfrndggCL7l7KAl7Cm4uwAX9n0l3SN4-BVvTE0L0M,5593
apprise/common.pyi,sha256=luF3QRiClDCk8Z23rI6FCGYsVmodOt_JYfYyzGogdNM,447
apprise/config/ConfigBase.py,sha256=A4p_N9vSxOK37x9kuYeZFzHhAeEt-TCe2oweNi2KGg4,53062
apprise/config/ConfigBase.py,sha256=d1efIuQFCJr66WgpudV2DWtxY3-tuZAyMAhHXBzJ8p0,53194
apprise/config/ConfigBase.pyi,sha256=cngfobwH6v2vxYbQrObDi5Z-t5wcquWF-wR0kBCr3Eg,54
apprise/config/ConfigFile.py,sha256=u_SDaN3OHMyaAq2X7k_T4_PRKkVsDwleqBz9YIN5lbA,6138
apprise/config/ConfigHTTP.py,sha256=Iy6Ji8_nX3xDjFgJGLrz4ftrMlMiyKiFGzYGJ7rMSMQ,9457
apprise/config/ConfigMemory.py,sha256=epEAgNy-eJVWoQaUOvjivMWxXTofy6wAQ-NbCqYmuyE,2829
apprise/config/__init__.py,sha256=lbsxrUpB1IYM2q7kjYhsXQGgPF-yZXJrKFE361tdIPY,1663
apprise/conversion.py,sha256=bvTu-3TU2CPEhdroLRtd_XpDzzXqe_wyUql089IpYxs,6197
apprise/decorators/CustomNotifyPlugin.py,sha256=F49vOM2EVy43Pn3j8z7tgTacweMUxGhw0UX-1n2Y3c8,7836
apprise/conversion.py,sha256=0VZ0eCZfksN-97Vl0TjVjwnCTgus3XTRioceSFnP-gc,6277
apprise/decorators/CustomNotifyPlugin.py,sha256=i4D-sgOsBWsxO5auWCN2bgXLLPuADaaLlJ1gUKLj2bU,7972
apprise/decorators/__init__.py,sha256=e_PDAm0kQNzwDPx-NJZLPfLMd2VAABvNZtxx_iDviRM,1487
apprise/decorators/notify.py,sha256=a2WupErNw1_SMAld7jPC273bskiChMpYy95BOog5A9w,5111
apprise/emojis.py,sha256=ONF0t8dY9f2XlEkLUG79-ybKVAj2GqbPj2-Be97vAoI,87738
@ -69,21 +69,22 @@ apprise/i18n/en/LC_MESSAGES/apprise.mo,sha256=oUTuHREmLEYN07oqYqRMJ_kU71-o5o37Ns
apprise/logger.py,sha256=131hqhed8cUj9x_mfXDEvwA2YbcYDFAYiWVK1HgxRVY,6921
apprise/manager.py,sha256=R9w8jxQRNy6Z_XDcobkt4JYbrC4jtj2OwRw9Zrib3CA,26857
apprise/plugins/NotifyAppriseAPI.py,sha256=ISBE0brD3eQdyw3XrGXd4Uc4kSYvIuI3SSUVCt-bkdo,16654
apprise/plugins/NotifyAprs.py,sha256=IS1uxIl391L3i2LOK6x8xmlOG1W58k4o793Oq2W5Wao,24220
apprise/plugins/NotifyAprs.py,sha256=xdL_aIVgb4ggxRFeCdkZAbgHYZ8DWLw9pRpLZQ0rHoE,25523
apprise/plugins/NotifyBark.py,sha256=bsDvKooRy4k1Gg7tvBjv3DIx7-WZiV_mbTrkTwMtd9Q,15698
apprise/plugins/NotifyBase.py,sha256=9MB2uv4Rv8BnoXjU52k5Mv4YQppkNPv4Y_iPwauKxKQ,29716
apprise/plugins/NotifyBase.py,sha256=G3xkF_a2BWqNSxsrnOW7NUgHjOqBCYC5zihCifWemo8,30360
apprise/plugins/NotifyBase.pyi,sha256=aKlZXRYUgG8lz_ZgGkYYJ_GKhuf18youTmMU-FlG7z8,21
apprise/plugins/NotifyBoxcar.py,sha256=vR00-WggHa1nHYWyb-f5P2V-G4f683fU_-GBlIeJvD0,12867
apprise/plugins/NotifyBulkSMS.py,sha256=stPWAFCfhBP617zYK9Dgk6pNJBN_WcyJtODzo0jR1QQ,16005
apprise/plugins/NotifyBulkVS.py,sha256=viLGeyUDiirRRM7CgRqqElHSLYFnMugDtWE6Ytjqfaw,13290
apprise/plugins/NotifyBurstSMS.py,sha256=cN2kRETKIK5LhwpQEA8C68LKv8KEUPmXYe-nTSegGls,15550
apprise/plugins/NotifyChantify.py,sha256=GJJOAtSnVoIfKbJF_W1DTu7WsvS_zHdjO4T1XTKT87g,6673
apprise/plugins/NotifyClickSend.py,sha256=UfOJqsas6WLjQskojuJE7I_-lrb5QrkMiBZv-po_Q9c,11229
apprise/plugins/NotifyD7Networks.py,sha256=4E6Fh0kQoDlMMwgZJDOXky7c7KrdMMvqprcfm29scWU,15043
apprise/plugins/NotifyDBus.py,sha256=1eVJHIL3XkFjDePMqfcll35Ie1vxggJ1iBsVFAIaF00,14379
apprise/plugins/NotifyDapnet.py,sha256=KuXjBU0ZrIYtoDei85NeLZ-IP810T4w5oFXH9sWiSh0,13624
apprise/plugins/NotifyDingTalk.py,sha256=NJyETgN6QjtRqtxQjfBLFVuFpURyWykRftm6WpQJVbY,12009
apprise/plugins/NotifyDiscord.py,sha256=M_qmTzB7NNL5_agjYDX38KBN1jRzDBp2EMSNwEF_9Tw,26072
apprise/plugins/NotifyEmail.py,sha256=DhAzLFX4pzzuS07QQFcv0VUOYu2PzQE7TTjlPokJcPY,38883
apprise/plugins/NotifyEmail.py,sha256=Y_ZOrdK6hTUKHLvogKpV5VqD8byzDyDSvwIVmfdsC2g,39789
apprise/plugins/NotifyEmby.py,sha256=OMVO8XsVl_XCBYNNNQi8ni2lS4voLfU8Puk1xJOAvHs,24039
apprise/plugins/NotifyEnigma2.py,sha256=Hj0Q9YOeljSwbfiuMKLqXTVX_1g_mjNUGEts7wfrwno,11498
apprise/plugins/NotifyFCM/__init__.py,sha256=mBFtIgIJuLIFnMB5ndx5Makjs9orVMc2oLoD7LaVT48,21669
@ -91,9 +92,10 @@ apprise/plugins/NotifyFCM/color.py,sha256=8iqDtadloQh2TMxkFmIFwenHqKp1pHHn1bwyWO
apprise/plugins/NotifyFCM/common.py,sha256=978uBUoNdtopCtylipGiKQdsQ8FTONxkFBp7uJMZHc8,1718
apprise/plugins/NotifyFCM/oauth.py,sha256=Vvbd0-rd5BPIjAneG3rILU153JIzfSZ0kaDov6hm96M,11197
apprise/plugins/NotifyFCM/priority.py,sha256=0WuRW1y1HVnybgjlTeCZPHzt7j8SwWnC7faNcjioAOc,8163
apprise/plugins/NotifyFaast.py,sha256=_F1633tQhk8gCfaNpZZm808f2G0S6fP0OOEetSiv0h8,6972
apprise/plugins/NotifyFeishu.py,sha256=IpcABdLZJ1vcQdZHlmASVbNOiOCIrmgKFhz1hbdskY4,7266
apprise/plugins/NotifyFlock.py,sha256=0rUIa9nToGsO8BTUgixh8Z_qdVixJeH479UNYjcE4EM,12748
apprise/plugins/NotifyForm.py,sha256=38nL-2m1cf4gEQFQ4NpvA4j9i5_nNUgelReWFSjyV5U,17905
apprise/plugins/NotifyFreeMobile.py,sha256=XCkgZLc3KKGlx_9UdeoMJVcHpeQrOml9T93S-DGf4bs,6644
apprise/plugins/NotifyGnome.py,sha256=8MXTa8gZg1wTgNJfLlmq7_fl3WaYK-SX6VR91u308C4,9059
apprise/plugins/NotifyGoogleChat.py,sha256=lnoN17m6lZANaXcElDTP8lcuVWjIZEK8C6_iqJNAnw4,12622
apprise/plugins/NotifyGotify.py,sha256=DNlOIHyuYitO5use9oa_REPm2Fant7y9QSaatrZFNI0,10551
@ -109,7 +111,7 @@ apprise/plugins/NotifyKumulos.py,sha256=eCEW2ZverZqETOLHVWMC4E8Ll6rEhhEWOSD73RD8
apprise/plugins/NotifyLametric.py,sha256=h8vZoX-Ll5NBZRprBlxTO2H9w0lOiMxglGvUgJtK4_8,37534
apprise/plugins/NotifyLine.py,sha256=OVI0ozMJcq_-dI8dodVX52dzUzgENlAbOik-Kw4l-rI,10676
apprise/plugins/NotifyLunaSea.py,sha256=woN8XdkwAjhgxAXp7Zj4XsWLybNL80l4W3Dx5BvobZg,14459
apprise/plugins/NotifyMQTT.py,sha256=PFLwESgR8dMZvVFHxmOZ8xfy-YqyX5b2kl_e8Z1lo-0,19537
apprise/plugins/NotifyMQTT.py,sha256=cnuG4f3bYYNPhEj9qDX8SLmnxLVT9G1b8J5w6-mQGKY,19545
apprise/plugins/NotifyMSG91.py,sha256=P7JPyT1xmucnaEeCZPf_6aJfe1gS_STYYwEM7hJ7QBw,12677
apprise/plugins/NotifyMSTeams.py,sha256=dFH575hoLL3zRddbBKfozlYjxvPJGbj3BKvfJSIkvD0,22976
apprise/plugins/NotifyMacOSX.py,sha256=y2fGpSZXomFiNwKbWImrXQUMVM4JR4uPCnsWpnxQrFA,8271
@ -124,7 +126,7 @@ apprise/plugins/NotifyNextcloudTalk.py,sha256=dLl_g7Knq5PVcadbzDuQsxbGHTZlC4r-pQ
apprise/plugins/NotifyNotica.py,sha256=yHmk8HiNFjzoI4Gewo_nBRrx9liEmhT95k1d10wqhYg,12990
apprise/plugins/NotifyNotifiarr.py,sha256=ADwLJO9eenfLkNa09tXMGSBTM4c3zTY0SEePvyB8WYA,15857
apprise/plugins/NotifyNotifico.py,sha256=Qe9jMN_M3GL4XlYIWkAf-w_Hf65g9Hde4bVuytGhUW4,12035
apprise/plugins/NotifyNtfy.py,sha256=TkDs6jOc30XQn2O2BJ14-nE_cohPdJiSS8DpYXc9hoE,27953
apprise/plugins/NotifyNtfy.py,sha256=AtJt2zH35mMQTwRDxKia93NPy6-4rtixplP53zIYV2M,27979
apprise/plugins/NotifyOffice365.py,sha256=8TxsVsdbUghmNj0kceMlmoZzTOKQTgn3priI8JuRuHE,25190
apprise/plugins/NotifyOneSignal.py,sha256=gsw7ckW7xLiJDRUb7eJHNe_4bvdBXmt6_YsB1u_ghjw,18153
apprise/plugins/NotifyOpsgenie.py,sha256=zJWpknjoHq35Iv9w88ucR62odaeIN3nrGFPtYnhDdjA,20515
@ -144,7 +146,7 @@ apprise/plugins/NotifyPushy.py,sha256=mmWcnu905Fvc8ihYXvZ7lVYErGZH5Q-GbBNS20v5r4
apprise/plugins/NotifyRSyslog.py,sha256=W42LT90X65-pNoU7KdhdX1PBcmsz9RyV376CDa_H3CI,11982
apprise/plugins/NotifyReddit.py,sha256=E78OSyDQfUalBEcg71sdMsNBOwdj7cVBnELrhrZEAXY,25785
apprise/plugins/NotifyRevolt.py,sha256=DRA9Xylwl6leVjVFuJcP4L1cG49CIBtnQdxh4BKnAZ4,14500
apprise/plugins/NotifyRocketChat.py,sha256=GTEfT-upQ56tJgE0kuc59l4uQGySj_d15wjdcARR9Ko,24624
apprise/plugins/NotifyRocketChat.py,sha256=Cb_nasX0-G3FoPMYvNk55RJ-tHuXUCTLUn2wTSi4IcI,25738
apprise/plugins/NotifyRyver.py,sha256=yhHPMLGeJtcHwBKSPPk0OBfp59DgTvXio1R59JhrJu4,11823
apprise/plugins/NotifySES.py,sha256=wtRmpAZkS5mQma6sdiaPT6U1xcgoj77CB9mNFvSEAw8,33545
apprise/plugins/NotifySMSEagle.py,sha256=voFNqOewD9OC1eRctD0YdUB_ZSWsb06rjUwBfCcxPYA,24161
@ -162,7 +164,7 @@ apprise/plugins/NotifyStreamlabs.py,sha256=lx3N8T2ufUWFYIZ-kU_rOv50YyGWBqLSCKk7x
apprise/plugins/NotifySynology.py,sha256=_jTqfgWeOuSi_I8geMOraHBVFtDkvm9mempzymrmeAo,11105
apprise/plugins/NotifySyslog.py,sha256=J9Kain2bb-PDNiG5Ydb0q678cYjNE_NjZFqMG9oEXM0,10617
apprise/plugins/NotifyTechulusPush.py,sha256=m43_Qj1scPcgCRX5Dr2Ul7nxMbaiVxNzm_HRuNmfgoA,7253
apprise/plugins/NotifyTelegram.py,sha256=Bim4mmPcefHNpvbNSy3pmLuCXRw5IVVWUNUB1SkIhDM,35624
apprise/plugins/NotifyTelegram.py,sha256=XE7PC9LRzcrfE2bpLKyor5lO_7B9LS4Xw1UlUmA4a2A,37187
apprise/plugins/NotifyThreema.py,sha256=C_C3j0fJWgeF2uB7ceJFXOdC6Lt0TFBInFMs5Xlg04M,11885
apprise/plugins/NotifyTwilio.py,sha256=WCo8eTI9OF1rtg3ueHHRDXt4Lp45eZ6h3IdTZVf5HM8,15976
apprise/plugins/NotifyTwist.py,sha256=nZA73CYVe-p0tkVMy5q3vFRyflLM4yjUo9LECvkUwgc,28841
@ -175,7 +177,7 @@ apprise/plugins/NotifyWhatsApp.py,sha256=PtzW0ue3d2wZ8Pva_LG29jUcpRRP03TFxO5SME_
apprise/plugins/NotifyWindows.py,sha256=QgWJfJF8AE6RWr-L81YYVZNWrnImK9Qr3B991HWanqU,8563
apprise/plugins/NotifyXBMC.py,sha256=5hDuOTP3Kwtp4NEMaokNjWyEKEkQcN_fSx-cUPJvhaU,12096
apprise/plugins/NotifyXML.py,sha256=WJnmdvXseuTRgioVMRqpR8a09cDfTpPTfuFlTnT_TfI,16973
apprise/plugins/NotifyZulip.py,sha256=mbZoPiQXFbcaJ5UYDbkX4HJPAvRzPEAB-rsOlF9SD4o,13755
apprise/plugins/NotifyZulip.py,sha256=M8cSL7nZvtBYyTX6045g34tyn2vyybltgD1CoI4Xa7A,13968
apprise/plugins/__init__.py,sha256=jTfLmW47kZC_Wf5eFFta2NoD2J-7_E7JaPrrVMIECkU,18725
apprise/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apprise/utils.py,sha256=SjRU2tb1UsVnTCTXPUyXVz3WpRbDWwAHH-d3ll38EHY,53185

View File

View File

@ -219,6 +219,9 @@ class AppriseLocale:
try:
# Acquire our locale
lang = locale.getlocale()[0]
# Compatibility for Python >= 3.12
if lang == 'C':
lang = AppriseLocale._default_language
except (ValueError, TypeError) as e:
# This occurs when an invalid locale was parsed from the

View File

@ -669,6 +669,79 @@ class URLBase:
'verify': 'yes' if self.verify_certificate else 'no',
}
@staticmethod
def post_process_parse_url_results(results):
"""
After parsing the URL, this function applies a bit of extra logic to
support extra entries like `pass` becoming `password`, etc
This function assumes that parse_url() was called previously setting
up the basics to be checked
"""
# if our URL ends with an 's', then assume our secure flag is set.
results['secure'] = (results['schema'][-1] == 's')
# QSD Checking (over-rides all)
qsd_exists = True if isinstance(results.get('qsd'), dict) else False
if qsd_exists and 'verify' in results['qsd']:
# Pulled from URL String
results['verify'] = parse_bool(
results['qsd'].get('verify', True))
elif 'verify' in results:
# Pulled from YAML Configuratoin
results['verify'] = parse_bool(results.get('verify', True))
else:
# Support SSL Certificate 'verify' keyword. Default to being
# enabled
results['verify'] = True
# Password overrides
if 'pass' in results:
results['password'] = results['pass']
del results['pass']
if qsd_exists:
if 'password' in results['qsd']:
results['password'] = results['qsd']['password']
if 'pass' in results['qsd']:
results['password'] = results['qsd']['pass']
# User overrides
if 'user' in results['qsd']:
results['user'] = results['qsd']['user']
# parse_url() always creates a 'password' and 'user' entry in the
# results returned. Entries are set to None if they weren't
# specified
if results['password'] is None and 'user' in results['qsd']:
# Handle cases where the user= provided in 2 locations, we want
# the original to fall back as a being a password (if one
# wasn't otherwise defined) e.g.
# mailtos://PASSWORD@hostname?user=admin@mail-domain.com
# - in the above, the PASSWORD gets lost in the parse url()
# since a user= over-ride is specified.
presults = parse_url(results['url'])
if presults:
# Store our Password
results['password'] = presults['user']
# Store our socket read timeout if specified
if 'rto' in results['qsd']:
results['rto'] = results['qsd']['rto']
# Store our socket connect timeout if specified
if 'cto' in results['qsd']:
results['cto'] = results['qsd']['cto']
if 'port' in results['qsd']:
results['port'] = results['qsd']['port']
return results
@staticmethod
def parse_url(url, verify_host=True, plus_to_space=False,
strict_port=False):
@ -698,53 +771,7 @@ class URLBase:
# We're done; we failed to parse our url
return results
# if our URL ends with an 's', then assume our secure flag is set.
results['secure'] = (results['schema'][-1] == 's')
# Support SSL Certificate 'verify' keyword. Default to being enabled
results['verify'] = True
if 'verify' in results['qsd']:
results['verify'] = parse_bool(
results['qsd'].get('verify', True))
# Password overrides
if 'password' in results['qsd']:
results['password'] = results['qsd']['password']
if 'pass' in results['qsd']:
results['password'] = results['qsd']['pass']
# User overrides
if 'user' in results['qsd']:
results['user'] = results['qsd']['user']
# parse_url() always creates a 'password' and 'user' entry in the
# results returned. Entries are set to None if they weren't specified
if results['password'] is None and 'user' in results['qsd']:
# Handle cases where the user= provided in 2 locations, we want
# the original to fall back as a being a password (if one wasn't
# otherwise defined)
# e.g.
# mailtos://PASSWORD@hostname?user=admin@mail-domain.com
# - the PASSWORD gets lost in the parse url() since a user=
# over-ride is specified.
presults = parse_url(results['url'])
if presults:
# Store our Password
results['password'] = presults['user']
# Store our socket read timeout if specified
if 'rto' in results['qsd']:
results['rto'] = results['qsd']['rto']
# Store our socket connect timeout if specified
if 'cto' in results['qsd']:
results['cto'] = results['qsd']['cto']
if 'port' in results['qsd']:
results['port'] = results['qsd']['port']
return results
return URLBase.post_process_parse_url_results(results)
@staticmethod
def http_response_code_lookup(code, response_mask=None):

View File

@ -27,7 +27,7 @@
# POSSIBILITY OF SUCH DAMAGE.
__title__ = 'Apprise'
__version__ = '1.7.4'
__version__ = '1.7.6'
__author__ = 'Chris Caron'
__license__ = 'BSD'
__copywrite__ = 'Copyright (C) 2024 Chris Caron <lead2gold@gmail.com>'

View File

@ -253,7 +253,7 @@ class AttachBase(URLBase):
return self.detected_mimetype \
if self.detected_mimetype else self.unknown_mimetype
def exists(self):
def exists(self, retrieve_if_missing=True):
"""
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
@ -282,7 +282,7 @@ class AttachBase(URLBase):
# The file is not present
pass
return self.download()
return False if not retrieve_if_missing else self.download()
def invalidate(self):
"""

View File

@ -29,6 +29,7 @@
import re
import os
import requests
import threading
from tempfile import NamedTemporaryFile
from .AttachBase import AttachBase
from ..common import ContentLocation
@ -56,6 +57,9 @@ class AttachHTTP(AttachBase):
# Web based requests are remote/external to our current location
location = ContentLocation.HOSTED
# thread safe loading
_lock = threading.Lock()
def __init__(self, headers=None, **kwargs):
"""
Initialize HTTP Object
@ -96,9 +100,6 @@ class AttachHTTP(AttachBase):
# our content is inaccessible
return False
# Ensure any existing content set has been invalidated
self.invalidate()
# prepare header
headers = {
'User-Agent': self.app_id,
@ -117,134 +118,154 @@ class AttachHTTP(AttachBase):
url += self.fullpath
self.logger.debug('HTTP POST URL: %s (cert_verify=%r)' % (
url, self.verify_certificate,
))
# Where our request object will temporarily live.
r = None
# Always call throttle before any remote server i/o is made
self.throttle()
try:
# Make our request
with requests.get(
url,
headers=headers,
auth=auth,
params=self.qsd,
verify=self.verify_certificate,
timeout=self.request_timeout,
stream=True) as r:
with self._lock:
if self.exists(retrieve_if_missing=False):
# Due to locking; it's possible a concurrent thread already
# handled the retrieval in which case we can safely move on
self.logger.trace(
'HTTP Attachment %s already retrieved',
self._temp_file.name)
return True
# Handle Errors
r.raise_for_status()
# Get our file-size (if known)
try:
file_size = int(r.headers.get('Content-Length', '0'))
except (TypeError, ValueError):
# Handle edge case where Content-Length is a bad value
file_size = 0
# Perform a little Q/A on file limitations and restrictions
if self.max_file_size > 0 and file_size > self.max_file_size:
# The content retrieved is to large
self.logger.error(
'HTTP response exceeds allowable maximum file length '
'({}KB): {}'.format(
int(self.max_file_size / 1024),
self.url(privacy=True)))
# Return False (signifying a failure)
return False
# Detect config format based on mime if the format isn't
# already enforced
self.detected_mimetype = r.headers.get('Content-Type')
d = r.headers.get('Content-Disposition', '')
result = re.search(
"filename=['\"]?(?P<name>[^'\"]+)['\"]?", d, re.I)
if result:
self.detected_name = result.group('name').strip()
# Create a temporary file to work with
self._temp_file = NamedTemporaryFile()
# Get our chunk size
chunk_size = self.chunk_size
# Track all bytes written to disk
bytes_written = 0
# If we get here, we can now safely write our content to disk
for chunk in r.iter_content(chunk_size=chunk_size):
# filter out keep-alive chunks
if chunk:
self._temp_file.write(chunk)
bytes_written = self._temp_file.tell()
# Prevent a case where Content-Length isn't provided
# we don't want to fetch beyond our limits
if self.max_file_size > 0:
if bytes_written > self.max_file_size:
# The content retrieved is to large
self.logger.error(
'HTTP response exceeds allowable maximum '
'file length ({}KB): {}'.format(
int(self.max_file_size / 1024),
self.url(privacy=True)))
# Invalidate any variables previously set
self.invalidate()
# Return False (signifying a failure)
return False
elif bytes_written + chunk_size \
> self.max_file_size:
# Adjust out next read to accomodate up to our
# limit +1. This will prevent us from readig
# to much into our memory buffer
self.max_file_size - bytes_written + 1
# Ensure our content is flushed to disk for post-processing
self._temp_file.flush()
# Set our minimum requirements for a successful download() call
self.download_path = self._temp_file.name
if not self.detected_name:
self.detected_name = os.path.basename(self.fullpath)
except requests.RequestException as e:
self.logger.error(
'A Connection error occurred retrieving HTTP '
'configuration from %s.' % self.host)
self.logger.debug('Socket Exception: %s' % str(e))
# Invalidate any variables previously set
# Ensure any existing content set has been invalidated
self.invalidate()
# Return False (signifying a failure)
return False
self.logger.debug(
'HTTP Attachment Fetch URL: %s (cert_verify=%r)' % (
url, self.verify_certificate))
except (IOError, OSError):
# IOError is present for backwards compatibility with Python
# versions older then 3.3. >= 3.3 throw OSError now.
try:
# Make our request
with requests.get(
url,
headers=headers,
auth=auth,
params=self.qsd,
verify=self.verify_certificate,
timeout=self.request_timeout,
stream=True) as r:
# Could not open and/or write the temporary file
self.logger.error(
'Could not write attachment to disk: {}'.format(
self.url(privacy=True)))
# Handle Errors
r.raise_for_status()
# Invalidate any variables previously set
self.invalidate()
# Get our file-size (if known)
try:
file_size = int(r.headers.get('Content-Length', '0'))
except (TypeError, ValueError):
# Handle edge case where Content-Length is a bad value
file_size = 0
# Return False (signifying a failure)
return False
# Perform a little Q/A on file limitations and restrictions
if self.max_file_size > 0 and \
file_size > self.max_file_size:
# The content retrieved is to large
self.logger.error(
'HTTP response exceeds allowable maximum file '
'length ({}KB): {}'.format(
int(self.max_file_size / 1024),
self.url(privacy=True)))
# Return False (signifying a failure)
return False
# Detect config format based on mime if the format isn't
# already enforced
self.detected_mimetype = r.headers.get('Content-Type')
d = r.headers.get('Content-Disposition', '')
result = re.search(
"filename=['\"]?(?P<name>[^'\"]+)['\"]?", d, re.I)
if result:
self.detected_name = result.group('name').strip()
# Create a temporary file to work with; delete must be set
# to False or it isn't compatible with Microsoft Windows
# instances. In lieu of this, __del__ will clean up the
# file for us.
self._temp_file = NamedTemporaryFile(delete=False)
# Get our chunk size
chunk_size = self.chunk_size
# Track all bytes written to disk
bytes_written = 0
# If we get here, we can now safely write our content to
# disk
for chunk in r.iter_content(chunk_size=chunk_size):
# filter out keep-alive chunks
if chunk:
self._temp_file.write(chunk)
bytes_written = self._temp_file.tell()
# Prevent a case where Content-Length isn't
# provided. In this case we don't want to fetch
# beyond our limits
if self.max_file_size > 0:
if bytes_written > self.max_file_size:
# The content retrieved is to large
self.logger.error(
'HTTP response exceeds allowable '
'maximum file length '
'({}KB): {}'.format(
int(self.max_file_size / 1024),
self.url(privacy=True)))
# Invalidate any variables previously set
self.invalidate()
# Return False (signifying a failure)
return False
elif bytes_written + chunk_size \
> self.max_file_size:
# Adjust out next read to accomodate up to
# our limit +1. This will prevent us from
# reading to much into our memory buffer
self.max_file_size - bytes_written + 1
# Ensure our content is flushed to disk for post-processing
self._temp_file.flush()
# Set our minimum requirements for a successful download()
# call
self.download_path = self._temp_file.name
if not self.detected_name:
self.detected_name = os.path.basename(self.fullpath)
except requests.RequestException as e:
self.logger.error(
'A Connection error occurred retrieving HTTP '
'configuration from %s.' % self.host)
self.logger.debug('Socket Exception: %s' % str(e))
# Invalidate any variables previously set
self.invalidate()
# Return False (signifying a failure)
return False
except (IOError, OSError):
# IOError is present for backwards compatibility with Python
# versions older then 3.3. >= 3.3 throw OSError now.
# Could not open and/or write the temporary file
self.logger.error(
'Could not write attachment to disk: {}'.format(
self.url(privacy=True)))
# Invalidate any variables previously set
self.invalidate()
# Return False (signifying a failure)
return False
# Return our success
return True
@ -254,11 +275,30 @@ class AttachHTTP(AttachBase):
Close our temporary file
"""
if self._temp_file:
self.logger.trace(
'Attachment cleanup of %s', self._temp_file.name)
self._temp_file.close()
try:
# Ensure our file is removed (if it exists)
os.unlink(self._temp_file.name)
except OSError:
pass
# Reset our temporary file to prevent from entering
# this block again
self._temp_file = None
super().invalidate()
def __del__(self):
"""
Tidy memory if open
"""
with self._lock:
self.invalidate()
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.

View File

@ -1184,6 +1184,9 @@ class ConfigBase(URLBase):
# Prepare our Asset Object
_results['asset'] = asset
# Handle post processing of result set
_results = URLBase.post_process_parse_url_results(_results)
# Store our preloaded entries
preloaded.append({
'results': _results,

View File

@ -58,8 +58,8 @@ def markdown_to_html(content):
"""
Converts specified content from markdown to HTML.
"""
return markdown(content)
return markdown(content, extensions=[
'markdown.extensions.nl2br', 'markdown.extensions.tables'])
def text_to_html(content):

View File

@ -147,6 +147,10 @@ class CustomNotifyPlugin(NotifyBase):
self._default_args = {}
# Some variables do not need to be set
if 'secure' in kwargs:
del kwargs['secure']
# Apply our updates based on what was parsed
dict_full_update(self._default_args, self._base_args)
dict_full_update(self._default_args, kwargs)

View File

@ -203,6 +203,13 @@ class NotifyAprs(NotifyBase):
"type": "string",
"map_to": "targets",
},
"delay": {
"name": _("Resend Delay"),
"type": "float",
"min": 0.0,
"max": 5.0,
"default": 0.0,
},
"locale": {
"name": _("Locale"),
"type": "choice:string",
@ -212,7 +219,7 @@ class NotifyAprs(NotifyBase):
}
)
def __init__(self, targets=None, locale=None, **kwargs):
def __init__(self, targets=None, locale=None, delay=None, **kwargs):
"""
Initialize APRS Object
"""
@ -272,6 +279,28 @@ class NotifyAprs(NotifyBase):
self.logger.warning(msg)
raise TypeError(msg)
# Update our delay
if delay is None:
self.delay = NotifyAprs.template_args["delay"]["default"]
else:
try:
self.delay = float(delay)
if self.delay < NotifyAprs.template_args["delay"]["min"]:
raise ValueError()
elif self.delay >= NotifyAprs.template_args["delay"]["max"]:
raise ValueError()
except (TypeError, ValueError):
msg = "Unsupported APRS-IS delay ({}) specified. ".format(
delay)
self.logger.warning(msg)
raise TypeError(msg)
# Bump up our request_rate
self.request_rate_per_sec += self.delay
# Set the transmitter group
self.locale = \
NotifyAprs.template_args["locale"]["default"] \
@ -674,6 +703,10 @@ class NotifyAprs(NotifyBase):
# Store our locale if not default
params['locale'] = self.locale
if self.delay != NotifyAprs.template_args["delay"]["default"]:
# Store our locale if not default
params['delay'] = "{:.2f}".format(self.delay)
# Extend our parameters
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
@ -727,6 +760,10 @@ class NotifyAprs(NotifyBase):
# All entries after the hostname are additional targets
results["targets"].extend(NotifyAprs.split_path(results["fullpath"]))
# Get Delay (if set)
if 'delay' in results['qsd'] and len(results['qsd']['delay']):
results['delay'] = NotifyAprs.unquote(results['qsd']['delay'])
# Support the 'to' variable so that we can support rooms this way too
# The 'to' makes it easier to use yaml configuration
if "to" in results["qsd"] and len(results["qsd"]["to"]):

View File

@ -457,6 +457,19 @@ class NotifyBase(URLBase):
# Handle situations where the title is None
title = '' if not title else title
# Truncate flag set with attachments ensures that only 1
# attachment passes through. In the event there could be many
# services specified, we only want to do this logic once.
# The logic is only applicable if ther was more then 1 attachment
# specified
overflow = self.overflow_mode if overflow is None else overflow
if attach and len(attach) > 1 and overflow == OverflowMode.TRUNCATE:
# Save first attachment
_attach = AppriseAttachment(attach[0], asset=self.asset)
else:
# reference same attachment
_attach = attach
# Apply our overflow (if defined)
for chunk in self._apply_overflow(
body=body, title=title, overflow=overflow,
@ -465,7 +478,7 @@ class NotifyBase(URLBase):
# Send notification
yield dict(
body=chunk['body'], title=chunk['title'],
notify_type=notify_type, attach=attach,
notify_type=notify_type, attach=_attach,
body_format=body_format
)
@ -485,7 +498,7 @@ class NotifyBase(URLBase):
},
{
title: 'the title goes here',
body: 'the message body goes here',
body: 'the continued message body goes here',
},
]

View File

@ -26,118 +26,111 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Chantify
# 1. Visit https://chanify.net/
# The API URL will look something like this:
# https://api.chanify.net/v1/sender/token
#
import requests
from .NotifyBase import NotifyBase
from ..common import NotifyImageSize
from ..common import NotifyType
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
class NotifyFaast(NotifyBase):
class NotifyChantify(NotifyBase):
"""
A wrapper for Faast Notifications
A wrapper for Chantify Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Faast'
service_name = _('Chantify')
# The services URL
service_url = 'http://www.faast.io/'
service_url = 'https://chanify.net/'
# The default protocol (this is secure for faast)
protocol = 'faast'
# The default secure protocol
secure_protocol = 'chantify'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_faast'
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_chantify'
# Faast uses the http protocol with JSON requests
notify_url = 'https://www.appnotifications.com/account/notifications.json'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_72
# Notification URL
notify_url = 'https://api.chanify.net/v1/sender/{token}/'
# Define object templates
templates = (
'{schema}://{authtoken}',
'{schema}://{token}',
)
# Define our template tokens
# The title is not used
title_maxlen = 0
# Define our tokens; these are the minimum tokens required required to
# be passed into this function (as arguments). The syntax appends any
# previously defined in the base package and builds onto them
template_tokens = dict(NotifyBase.template_tokens, **{
'authtoken': {
'name': _('Authorization Token'),
'token': {
'name': _('Token'),
'type': 'string',
'private': True,
'required': True,
'regex': (r'^[A-Z0-9_-]+$', 'i'),
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': True,
'map_to': 'include_image',
'token': {
'alias_of': 'token',
},
})
def __init__(self, authtoken, include_image=True, **kwargs):
def __init__(self, token, **kwargs):
"""
Initialize Faast Object
Initialize Chantify Object
"""
super().__init__(**kwargs)
# Store the Authentication Token
self.authtoken = validate_regex(authtoken)
if not self.authtoken:
msg = 'An invalid Faast Authentication Token ' \
'({}) was specified.'.format(authtoken)
self.token = validate_regex(
token, *self.template_tokens['token']['regex'])
if not self.token:
msg = 'The Chantify token specified ({}) is invalid.'\
.format(token)
self.logger.warning(msg)
raise TypeError(msg)
# Associate an image with our post
self.include_image = include_image
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Faast Notification
Send our notification
"""
# prepare our headers
headers = {
'User-Agent': self.app_id,
'Content-Type': 'multipart/form-data'
'Content-Type': 'application/x-www-form-urlencoded',
}
# prepare JSON Object
# Our Message
payload = {
'user_credentials': self.authtoken,
'title': title,
'message': body,
'text': body
}
# Acquire our image if we're configured to do so
image_url = None if not self.include_image \
else self.image_url(notify_type)
if image_url:
payload['icon_url'] = image_url
self.logger.debug('Faast POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('Faast Payload: %s' % str(payload))
self.logger.debug('Chantify GET URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate))
self.logger.debug('Chantify Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
self.notify_url,
self.notify_url.format(token=self.token),
data=payload,
headers=headers,
verify=self.verify_certificate,
@ -146,10 +139,10 @@ class NotifyFaast(NotifyBase):
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyFaast.http_response_code_lookup(r.status_code)
NotifyChantify.http_response_code_lookup(r.status_code)
self.logger.warning(
'Failed to send Faast notification:'
'Failed to send Chantify notification: '
'{}{}error={}.'.format(
status_str,
', ' if status_str else '',
@ -161,12 +154,12 @@ class NotifyFaast(NotifyBase):
return False
else:
self.logger.info('Sent Faast notification.')
self.logger.info('Sent Chantify notification.')
except requests.RequestException as e:
self.logger.warning(
'A Connection error occurred sending Faast notification.',
)
'A Connection error occurred sending Chantify '
'notification.')
self.logger.debug('Socket Exception: %s' % str(e))
# Return; we're done
@ -179,18 +172,13 @@ class NotifyFaast(NotifyBase):
Returns the URL built dynamically based on specified arguments.
"""
# Define any URL parameters
params = {
'image': 'yes' if self.include_image else 'no',
}
# Prepare our parameters
params = self.url_parameters(privacy=privacy, *args, **kwargs)
# Extend our parameters
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
return '{schema}://{authtoken}/?{params}'.format(
schema=self.protocol,
authtoken=self.pprint(self.authtoken, privacy, safe=''),
params=NotifyFaast.urlencode(params),
return '{schema}://{token}/?{params}'.format(
schema=self.secure_protocol,
token=self.pprint(self.token, privacy, safe=''),
params=NotifyChantify.urlencode(params),
)
@staticmethod
@ -200,16 +188,19 @@ class NotifyFaast(NotifyBase):
us to re-instantiate this object.
"""
# parse_url already handles getting the `user` and `password` fields
# populated.
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Store our authtoken using the host
results['authtoken'] = NotifyFaast.unquote(results['host'])
# Allow over-ride
if 'token' in results['qsd'] and len(results['qsd']['token']):
results['token'] = NotifyChantify.unquote(results['qsd']['token'])
# Include image with our post
results['include_image'] = \
parse_bool(results['qsd'].get('image', True))
else:
results['token'] = NotifyChantify.unquote(results['host'])
return results

View File

@ -45,7 +45,7 @@ from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyFormat, NotifyType
from ..conversion import convert_between
from ..utils import is_email, parse_emails
from ..utils import is_email, parse_emails, is_hostname
from ..AppriseLocale import gettext_lazy as _
from ..logger import logger
@ -566,12 +566,20 @@ class NotifyEmail(NotifyBase):
# Apply any defaults based on certain known configurations
self.NotifyEmailDefaults(secure_mode=secure_mode, **kwargs)
if self.user and self.host:
# Prepare the bases of our email
self.from_addr = [self.app_id, '{}@{}'.format(
re.split(r'[\s@]+', self.user)[0],
self.host,
)]
if self.user:
if self.host:
# Prepare the bases of our email
self.from_addr = [self.app_id, '{}@{}'.format(
re.split(r'[\s@]+', self.user)[0],
self.host,
)]
else:
result = is_email(self.user)
if result:
# Prepare the bases of our email and include domain
self.host = result['domain']
self.from_addr = [self.app_id, self.user]
if from_addr:
result = is_email(from_addr)
@ -1037,11 +1045,25 @@ class NotifyEmail(NotifyBase):
us to re-instantiate this object.
"""
results = NotifyBase.parse_url(url)
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Prepare our target lists
results['targets'] = []
if not is_hostname(results['host'], ipv4=False, ipv6=False,
underscore=False):
if is_email(NotifyEmail.unquote(results['host'])):
# Don't lose defined email addresses
results['targets'].append(NotifyEmail.unquote(results['host']))
# Detect if we have a valid hostname or not; be sure to reset it's
# value if invalid; we'll attempt to figure this out later on
results['host'] = ''
# The From address is a must; either through the use of templates
# from= entry and/or merging the user and hostname together, this
# must be calculated or parse_url will fail.
@ -1052,7 +1074,7 @@ class NotifyEmail(NotifyBase):
# Get our potential email targets; if none our found we'll just
# add one to ourselves
results['targets'] = NotifyEmail.split_path(results['fullpath'])
results['targets'] += NotifyEmail.split_path(results['fullpath'])
# Attempt to detect 'to' email address
if 'to' in results['qsd'] and len(results['qsd']['to']):

Some files were not shown because too many files have changed in this diff Show More