diff --git a/bazarr/api.py b/bazarr/api.py index 08d81cf51..97ea20c4a 100644 --- a/bazarr/api.py +++ b/bazarr/api.py @@ -23,6 +23,7 @@ from bs4 import BeautifulSoup as bso from get_args import args from config import settings, base_url, save_settings, get_settings from logger import empty_log +from init import startTime from init import * import logging @@ -606,6 +607,7 @@ class SystemStatus(Resource): system_status.update({'python_version': platform.python_version()}) system_status.update({'bazarr_directory': os.path.dirname(os.path.dirname(__file__))}) system_status.update({'bazarr_config_directory': args.config_dir}) + system_status.update({'start_time': startTime}) return jsonify(data=system_status) diff --git a/bazarr/app.py b/bazarr/app.py index 1a9e25f4f..a67810c57 100644 --- a/bazarr/app.py +++ b/bazarr/app.py @@ -28,7 +28,7 @@ def create_app(): else: app.config["DEBUG"] = False - socketio.init_app(app, path=base_url.rstrip('/')+'/api/socket.io', cors_allowed_origins='*', async_mode='gevent') + socketio.init_app(app, path=base_url.rstrip('/')+'/api/socket.io', cors_allowed_origins='*', async_mode='threading') return app diff --git a/bazarr/config.py b/bazarr/config.py index 57e0d3ef0..af581c352 100644 --- a/bazarr/config.py +++ b/bazarr/config.py @@ -185,7 +185,10 @@ defaults = { }, 'titulky': { 'username': '', - 'password': '' + 'password': '', + 'skip_wrong_fps': 'False', + 'approved_only': 'False', + 'multithreading': 'True' }, 'subsync': { 'use_subsync': 'False', diff --git a/bazarr/database.py b/bazarr/database.py index 9924f8d72..85f420110 100644 --- a/bazarr/database.py +++ b/bazarr/database.py @@ -3,7 +3,7 @@ import atexit import json import ast import logging -import gevent +import time from peewee import * from playhouse.sqliteq import SqliteQueueDatabase from playhouse.shortcuts import model_to_dict @@ -15,7 +15,7 @@ from config import settings, get_array_from from get_args import args database = SqliteQueueDatabase(os.path.join(args.config_dir, 'db', 'bazarr.db'), - use_gevent=True, + use_gevent=False, autostart=True, queue_max_size=256) migrator = SqliteMigrator(database) @@ -284,7 +284,7 @@ def init_db(): if not System.select().count(): System.insert({System.configured: '0', System.updated: '0'}).execute() except: - gevent.sleep(0.1) + time.sleep(0.1) else: tables_created = True diff --git a/bazarr/get_episodes.py b/bazarr/get_episodes.py index ebbec73ec..c93f2a693 100644 --- a/bazarr/get_episodes.py +++ b/bazarr/get_episodes.py @@ -3,7 +3,6 @@ import os import requests import logging -from gevent import sleep from peewee import DoesNotExist from database import get_exclusion_clause, TableEpisodes, TableShows @@ -45,7 +44,6 @@ def sync_episodes(series_id=None, send_event=True): series_count = len(seriesIdList) for i, seriesId in enumerate(seriesIdList): - sleep() if send_event: show_progress(id='episodes_progress', header='Syncing episodes...', @@ -70,7 +68,6 @@ def sync_episodes(series_id=None, send_event=True): episode['episodeFile'] = item[0] for episode in episodes: - sleep() if 'hasFile' in episode: if episode['hasFile'] is True: if 'episodeFile' in episode: @@ -91,7 +88,6 @@ def sync_episodes(series_id=None, send_event=True): removed_episodes = list(set(current_episodes_db_list) - set(current_episodes_sonarr)) for removed_episode in removed_episodes: - sleep() episode_to_delete = TableEpisodes.select(TableEpisodes.sonarrSeriesId, TableEpisodes.sonarrEpisodeId)\ .where(TableEpisodes.sonarrEpisodeId == removed_episode)\ .dicts()\ @@ -124,7 +120,6 @@ def sync_episodes(series_id=None, send_event=True): episodes_to_update_list = [i for i in episodes_to_update if i not in episode_in_db_list] for updated_episode in episodes_to_update_list: - sleep() TableEpisodes.update(updated_episode).where(TableEpisodes.sonarrEpisodeId == updated_episode['sonarrEpisodeId']).execute() altered_episodes.append([updated_episode['sonarrEpisodeId'], @@ -133,7 +128,6 @@ def sync_episodes(series_id=None, send_event=True): # Insert new episodes in DB for added_episode in episodes_to_add: - sleep() result = TableEpisodes.insert(added_episode).on_conflict(action='IGNORE').execute() if result > 0: altered_episodes.append([added_episode['sonarrEpisodeId'], @@ -147,7 +141,6 @@ def sync_episodes(series_id=None, send_event=True): # Store subtitles for added or modified episodes for i, altered_episode in enumerate(altered_episodes, 1): - sleep() store_subtitles(altered_episode[1], path_mappings.path_replace(altered_episode[1])) logging.debug('BAZARR All episodes synced from Sonarr into database.') diff --git a/bazarr/get_movies.py b/bazarr/get_movies.py index 8749ff6fd..3ff4bd951 100644 --- a/bazarr/get_movies.py +++ b/bazarr/get_movies.py @@ -5,7 +5,6 @@ import requests import logging import operator from functools import reduce -from gevent import sleep from peewee import DoesNotExist from config import settings, url_radarr @@ -17,6 +16,7 @@ from get_rootfolder import check_radarr_rootfolder from get_subtitle import movies_download_subtitles from database import get_exclusion_clause, TableMovies from event_handler import event_stream, show_progress, hide_progress +from get_languages import language_from_alpha2 headers = {"User-Agent": os.environ["SZ_USER_AGENT"]} @@ -64,7 +64,6 @@ def update_movies(send_event=True): # Build new and updated movies movies_count = len(movies) for i, movie in enumerate(movies): - sleep() if send_event: show_progress(id='movies_progress', header='Syncing movies...', @@ -96,7 +95,6 @@ def update_movies(send_event=True): removed_movies = list(set(current_movies_db_list) - set(current_movies_radarr)) for removed_movie in removed_movies: - sleep() TableMovies.delete().where(TableMovies.tmdbId == removed_movie).execute() # Update movies in DB @@ -129,7 +127,6 @@ def update_movies(send_event=True): movies_to_update_list = [i for i in movies_to_update if i not in movies_in_db_list] for updated_movie in movies_to_update_list: - sleep() TableMovies.update(updated_movie).where(TableMovies.tmdbId == updated_movie['tmdbId']).execute() altered_movies.append([updated_movie['tmdbId'], updated_movie['path'], @@ -138,7 +135,6 @@ def update_movies(send_event=True): # Insert new movies in DB for added_movie in movies_to_add: - sleep() result = TableMovies.insert(added_movie).on_conflict(action='IGNORE').execute() if result > 0: altered_movies.append([added_movie['tmdbId'], @@ -153,7 +149,6 @@ def update_movies(send_event=True): # Store subtitles for added or modified movies for i, altered_movie in enumerate(altered_movies, 1): - sleep() store_subtitles_movie(altered_movie[1], path_mappings.path_replace_movie(altered_movie[1])) logging.debug('BAZARR All movies synced from Radarr into database.') @@ -456,7 +451,10 @@ def movieParser(movie, action, tags_dict, movie_default_profile, audio_profiles) for item in movie['movieFile']['languages']: if isinstance(item, dict): if 'name' in item: - audio_language.append(item['name']) + language = item['name'] + if item['name'] == 'Portuguese (Brazil)': + language = language_from_alpha2('pb') + audio_language.append(language) tags = [d['label'] for d in tags_dict if d['id'] in movie['tags']] diff --git a/bazarr/get_providers.py b/bazarr/get_providers.py index fa23b40cc..ab3974c4b 100644 --- a/bazarr/get_providers.py +++ b/bazarr/get_providers.py @@ -1,6 +1,7 @@ # coding=utf-8 import os import datetime +import pytz import logging import subliminal_patch import pretty @@ -29,6 +30,9 @@ def time_until_end_of_day(dt=None): tomorrow = dt + datetime.timedelta(days=1) return datetime.datetime.combine(tomorrow, datetime.time.min) - dt +# Titulky resets its download limits at the start of a new day from its perspective - the Europe/Prague timezone +titulky_server_local_time = datetime.datetime.now(tz=pytz.timezone('Europe/Prague')).replace(tzinfo=None) # Needs to convert to offset-naive dt +titulky_limit_reset_datetime = time_until_end_of_day(dt=titulky_server_local_time) hours_until_end_of_day = time_until_end_of_day().seconds // 3600 + 1 @@ -65,8 +69,7 @@ PROVIDER_THROTTLE_MAP = { IPAddressBlocked : (datetime.timedelta(hours=1), "1 hours"), }, "titulky" : { - DownloadLimitExceeded: ( - datetime.timedelta(hours=hours_until_end_of_day), "{} hours".format(str(hours_until_end_of_day))) + DownloadLimitExceeded: (titulky_limit_reset_datetime, f"{titulky_limit_reset_datetime.seconds // 3600 + 1} hours") }, "legendasdivx" : { TooManyRequests : (datetime.timedelta(hours=3), "3 hours"), @@ -183,6 +186,9 @@ def get_providers_auth(): 'titulky' : { 'username': settings.titulky.username, 'password': settings.titulky.password, + 'skip_wrong_fps': settings.titulky.getboolean('skip_wrong_fps'), + 'approved_only': settings.titulky.getboolean('approved_only'), + 'multithreading': settings.titulky.getboolean('multithreading'), }, 'titlovi' : { 'username': settings.titlovi.username, diff --git a/bazarr/get_series.py b/bazarr/get_series.py index 45b0941f6..c92af286b 100644 --- a/bazarr/get_series.py +++ b/bazarr/get_series.py @@ -3,7 +3,6 @@ import os import requests import logging -from gevent import sleep from peewee import DoesNotExist from config import settings, url_sonarr @@ -51,7 +50,6 @@ def update_series(send_event=True): series_count = len(series) for i, show in enumerate(series): - sleep() if send_event: show_progress(id='series_progress', header='Syncing series...', @@ -78,7 +76,6 @@ def update_series(send_event=True): removed_series = list(set(current_shows_db_list) - set(current_shows_sonarr)) for series in removed_series: - sleep() TableShows.delete().where(TableShows.sonarrSeriesId == series).execute() if send_event: event_stream(type='series', action='delete', payload=series) @@ -106,7 +103,6 @@ def update_series(send_event=True): series_to_update_list = [i for i in series_to_update if i not in series_in_db_list] for updated_series in series_to_update_list: - sleep() TableShows.update(updated_series).where(TableShows.sonarrSeriesId == updated_series['sonarrSeriesId']).execute() if send_event: @@ -114,7 +110,6 @@ def update_series(send_event=True): # Insert new series in DB for added_series in series_to_add: - sleep() result = TableShows.insert(added_series).on_conflict(action='IGNORE').execute() if result: list_missing_subtitles(no=added_series['sonarrSeriesId']) diff --git a/bazarr/get_subtitle.py b/bazarr/get_subtitle.py index 02956ad17..9bf0875f9 100644 --- a/bazarr/get_subtitle.py +++ b/bazarr/get_subtitle.py @@ -271,7 +271,9 @@ def download_subtitle(path, language, audio_language, hi, forced, providers, pro reversed_path = path_mappings.path_replace_reverse(path) reversed_subtitles_path = path_mappings.path_replace_reverse(downloaded_path) notify_sonarr(episode_metadata['sonarrSeriesId']) - event_stream(type='episode-wanted', action='delete', payload=episode_metadata['sonarrEpisodeId']) + event_stream(type='series', action='update', payload=episode_metadata['sonarrSeriesId']) + event_stream(type='episode-wanted', action='delete', + payload=episode_metadata['sonarrEpisodeId']) else: reversed_path = path_mappings.path_replace_reverse_movie(path) @@ -717,10 +719,14 @@ def manual_upload_subtitle(path, language, forced, hi, title, scene_name, media_ reversed_path = path_mappings.path_replace_reverse(path) reversed_subtitles_path = path_mappings.path_replace_reverse(subtitle_path) notify_sonarr(episode_metadata['sonarrSeriesId']) + event_stream(type='series', action='update', payload=episode_metadata['sonarrSeriesId']) + event_stream(type='episode-wanted', action='delete', payload=episode_metadata['sonarrEpisodeId']) else: reversed_path = path_mappings.path_replace_reverse_movie(path) reversed_subtitles_path = path_mappings.path_replace_reverse_movie(subtitle_path) notify_radarr(movie_metadata['radarrId']) + event_stream(type='movie', action='update', payload=movie_metadata['radarrId']) + event_stream(type='movie-wanted', action='delete', payload=movie_metadata['radarrId']) return message, reversed_path, reversed_subtitles_path @@ -1066,6 +1072,7 @@ def wanted_download_subtitles(sonarr_episode_id): store_subtitles(episode['path'], path_mappings.path_replace(episode['path'])) history_log(1, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path, language_code, provider, score, subs_id, subs_path) + event_stream(type='series', action='update', payload=episode['sonarrSeriesId']) event_stream(type='episode-wanted', action='delete', payload=episode['sonarrEpisodeId']) send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message) else: diff --git a/bazarr/init.py b/bazarr/init.py index 6560ad911..78e86942c 100644 --- a/bazarr/init.py +++ b/bazarr/init.py @@ -14,6 +14,11 @@ from helper import path_mappings from dogpile.cache.region import register_backend as register_cache_backend import subliminal import datetime +import time + +# set start time global variable as epoch +global startTime +startTime = time.time() # set subliminal_patch user agent os.environ["SZ_USER_AGENT"] = "Bazarr/{}".format(os.environ["BAZARR_VERSION"]) @@ -54,7 +59,7 @@ def is_virtualenv(): # deploy requirements.txt if not args.no_update: try: - import lxml, numpy, webrtcvad, gevent, geventwebsocket, setuptools + import lxml, numpy, webrtcvad, setuptools except ImportError: try: import pip diff --git a/bazarr/list_subtitles.py b/bazarr/list_subtitles.py index 10d1a87c2..1f4dcc029 100644 --- a/bazarr/list_subtitles.py +++ b/bazarr/list_subtitles.py @@ -8,7 +8,6 @@ import re from guess_language import guess_language from subliminal_patch import core, search_external_subtitles from subzero.language import Language -from gevent import sleep from custom_lang import CustomLanguage from database import get_profiles_list, get_profile_cutoff, TableEpisodes, TableShows, TableMovies @@ -19,6 +18,7 @@ from helper import path_mappings, get_subtitle_destination_folder from embedded_subs_reader import embedded_subs_reader from event_handler import event_stream, show_progress, hide_progress from charamel import Detector +from peewee import DoesNotExist gc.enable() @@ -37,33 +37,39 @@ def store_subtitles(original_path, reversed_path, use_cache=True): .where(TableEpisodes.path == original_path)\ .dicts()\ .get() - subtitle_languages = embedded_subs_reader(reversed_path, - file_size=item['file_size'], - episode_file_id=item['episode_file_id'], - use_cache=use_cache) - for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages: - try: - if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \ - (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() == - "vobsub") or \ - (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() == - "ass"): - logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language))) - continue + except DoesNotExist: + logging.exception(f"BAZARR error when trying to select this episode from database: {reversed_path}") + else: + try: + subtitle_languages = embedded_subs_reader(reversed_path, + file_size=item['file_size'], + episode_file_id=item['episode_file_id'], + use_cache=use_cache) + for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages: + try: + if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \ + (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() == + "vobsub") or \ + (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() == + "ass"): + logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language))) + continue - if alpha2_from_alpha3(subtitle_language) is not None: - lang = str(alpha2_from_alpha3(subtitle_language)) - if subtitle_forced: - lang = lang + ":forced" - if subtitle_hi: - lang = lang + ":hi" - logging.debug("BAZARR embedded subtitles detected: " + lang) - actual_subtitles.append([lang, None]) - except Exception as error: - logging.debug("BAZARR unable to index this unrecognized language: %s (%s)", subtitle_language, error) - except Exception as e: - logging.exception( - "BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path)) + if alpha2_from_alpha3(subtitle_language) is not None: + lang = str(alpha2_from_alpha3(subtitle_language)) + if subtitle_forced: + lang = lang + ":forced" + if subtitle_hi: + lang = lang + ":hi" + logging.debug("BAZARR embedded subtitles detected: " + lang) + actual_subtitles.append([lang, None]) + except Exception as error: + logging.debug("BAZARR unable to index this unrecognized language: %s (%s)", subtitle_language, error) + except Exception as e: + logging.exception( + "BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], + reversed_path)) + pass try: dest_folder = get_subtitle_destination_folder() core.CUSTOM_PATHS = [dest_folder] if dest_folder else [] @@ -131,35 +137,40 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True): .where(TableMovies.path == original_path)\ .dicts()\ .get() - subtitle_languages = embedded_subs_reader(reversed_path, - file_size=item['file_size'], - movie_file_id=item['movie_file_id'], - use_cache=use_cache) - for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages: - try: - if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \ - (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() == - "vobsub") or \ - (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() == - "ass"): - logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language))) - continue + except DoesNotExist: + logging.exception(f"BAZARR error when trying to select this movie from database: {reversed_path}") + else: + try: + subtitle_languages = embedded_subs_reader(reversed_path, + file_size=item['file_size'], + movie_file_id=item['movie_file_id'], + use_cache=use_cache) + for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages: + try: + if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \ + (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() == + "vobsub") or \ + (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() == + "ass"): + logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language))) + continue - if alpha2_from_alpha3(subtitle_language) is not None: - lang = str(alpha2_from_alpha3(subtitle_language)) - if subtitle_forced: - lang = lang + ':forced' - if subtitle_hi: - lang = lang + ':hi' - logging.debug("BAZARR embedded subtitles detected: " + lang) - actual_subtitles.append([lang, None]) - except: - logging.debug("BAZARR unable to index this unrecognized language: " + subtitle_language) - pass - except Exception: - logging.exception( - "BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path)) - pass + if alpha2_from_alpha3(subtitle_language) is not None: + lang = str(alpha2_from_alpha3(subtitle_language)) + if subtitle_forced: + lang = lang + ':forced' + if subtitle_hi: + lang = lang + ':hi' + logging.debug("BAZARR embedded subtitles detected: " + lang) + actual_subtitles.append([lang, None]) + except: + logging.debug("BAZARR unable to index this unrecognized language: " + subtitle_language) + pass + except Exception: + logging.exception( + "BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], + reversed_path)) + pass try: dest_folder = get_subtitle_destination_folder() or '' @@ -237,7 +248,6 @@ def list_missing_subtitles(no=None, epno=None, send_event=True): use_embedded_subs = settings.general.getboolean('use_embedded_subs') for episode_subtitles in episodes_subtitles: - sleep() missing_subtitles_text = '[]' if episode_subtitles['profileId']: # get desired subtitles @@ -348,7 +358,6 @@ def list_missing_subtitles_movies(no=None, send_event=True): use_embedded_subs = settings.general.getboolean('use_embedded_subs') for movie_subtitles in movies_subtitles: - sleep() missing_subtitles_text = '[]' if movie_subtitles['profileId']: # get desired subtitles @@ -416,7 +425,7 @@ def list_missing_subtitles_movies(no=None, send_event=True): # remove missing that have forced or hi subtitles for this language in existing for item in actual_subtitles_list: - if item[1] == 'True' or item[2] == 'True': + if item[2] == 'True': try: missing_subtitles_list.remove([item[0], 'False', 'False']) except ValueError: @@ -450,7 +459,6 @@ def series_full_scan_subtitles(): count_episodes = len(episodes) for i, episode in enumerate(episodes): - sleep() show_progress(id='episodes_disk_scan', header='Full disk scan...', name='Episodes subtitles', @@ -470,7 +478,6 @@ def movies_full_scan_subtitles(): count_movies = len(movies) for i, movie in enumerate(movies): - sleep() show_progress(id='movies_disk_scan', header='Full disk scan...', name='Movies subtitles', @@ -491,7 +498,6 @@ def series_scan_subtitles(no): .dicts() for episode in episodes: - sleep() store_subtitles(episode['path'], path_mappings.path_replace(episode['path']), use_cache=False) @@ -502,7 +508,6 @@ def movies_scan_subtitles(no): .dicts() for movie in movies: - sleep() store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']), use_cache=False) diff --git a/bazarr/logger.py b/bazarr/logger.py index 44812a90f..7b1e09683 100644 --- a/bazarr/logger.py +++ b/bazarr/logger.py @@ -117,10 +117,8 @@ def configure_logging(debug=False): logging.getLogger("srt").setLevel(logging.ERROR) logging.getLogger("SignalRCoreClient").setLevel(logging.CRITICAL) logging.getLogger("websocket").setLevel(logging.CRITICAL) - logging.getLogger("geventwebsocket.handler").setLevel(logging.WARNING) - logging.getLogger("geventwebsocket.handler").setLevel(logging.WARNING) - logging.getLogger("engineio.server").setLevel(logging.WARNING) + logging.getLogger("waitress").setLevel(logging.ERROR) logging.getLogger("knowit").setLevel(logging.CRITICAL) logging.getLogger("enzyme").setLevel(logging.CRITICAL) logging.getLogger("guessit").setLevel(logging.WARNING) diff --git a/bazarr/main.py b/bazarr/main.py index 92b077817..9b84ed0c8 100644 --- a/bazarr/main.py +++ b/bazarr/main.py @@ -1,13 +1,5 @@ # coding=utf-8 -# Gevent monkey patch if gevent available. If not, it will be installed on during the init process. -try: - from gevent import monkey, Greenlet, joinall -except ImportError: - pass -else: - monkey.patch_all() - import os bazarr_version = 'unknown' @@ -34,6 +26,7 @@ from urllib.parse import unquote from get_languages import load_language_in_db from flask import make_response, request, redirect, abort, render_template, Response, session, flash, url_for, \ send_file, stream_with_context +from threading import Thread from get_series import * from get_episodes import * @@ -202,11 +195,10 @@ def proxy(protocol, url): return dict(status=False, error=result.raise_for_status()) -greenlets = [] if settings.general.getboolean('use_sonarr'): - greenlets.append(Greenlet.spawn(sonarr_signalr_client.start)) + Thread(target=sonarr_signalr_client.start).start() if settings.general.getboolean('use_radarr'): - greenlets.append(Greenlet.spawn(radarr_signalr_client.start)) + Thread(target=radarr_signalr_client.start).start() if __name__ == "__main__": diff --git a/bazarr/scheduler.py b/bazarr/scheduler.py index 36d18191d..8cde8262c 100644 --- a/bazarr/scheduler.py +++ b/bazarr/scheduler.py @@ -12,7 +12,7 @@ if not args.no_update: from check_update import check_if_new_update, check_releases else: from check_update import check_releases -from apscheduler.schedulers.gevent import GeventScheduler +from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.interval import IntervalTrigger from apscheduler.triggers.cron import CronTrigger from apscheduler.triggers.date import DateTrigger @@ -30,7 +30,7 @@ class Scheduler: def __init__(self): self.__running_tasks = [] - self.aps_scheduler = GeventScheduler() + self.aps_scheduler = BackgroundScheduler() # task listener def task_listener_add(event): diff --git a/bazarr/server.py b/bazarr/server.py index 1a9053ee2..b414d8ee6 100644 --- a/bazarr/server.py +++ b/bazarr/server.py @@ -4,8 +4,7 @@ import warnings import logging import os import io -from gevent import pywsgi -from geventwebsocket.handler import WebSocketHandler +from waitress.server import create_server from get_args import args from config import settings, base_url @@ -27,23 +26,23 @@ class Server: # Mute Python3 BrokenPipeError warnings.simplefilter("ignore", BrokenPipeError) - self.server = pywsgi.WSGIServer((str(settings.general.ip), - int(args.port) if args.port else int(settings.general.port)), - app, - handler_class=WebSocketHandler) + self.server = create_server(app, + host=str(settings.general.ip), + port=int(args.port) if args.port else int(settings.general.port), + threads=100) def start(self): try: logging.info( 'BAZARR is started and waiting for request on http://' + str(settings.general.ip) + ':' + (str( args.port) if args.port else str(settings.general.port)) + str(base_url)) - self.server.serve_forever() + self.server.run() except KeyboardInterrupt: self.shutdown() def shutdown(self): try: - self.server.stop() + self.server.close() except Exception as e: logging.error('BAZARR Cannot stop Waitress: ' + repr(e)) else: @@ -60,7 +59,7 @@ class Server: def restart(self): try: - self.server.stop() + self.server.close() except Exception as e: logging.error('BAZARR Cannot stop Waitress: ' + repr(e)) else: diff --git a/bazarr/signalr_client.py b/bazarr/signalr_client.py index 9968bacfb..f8c3c7e8f 100644 --- a/bazarr/signalr_client.py +++ b/bazarr/signalr_client.py @@ -2,9 +2,9 @@ import logging -import gevent import json import os +import time from requests import Session from signalr import Connection from requests.exceptions import ConnectionError @@ -36,7 +36,6 @@ class SonarrSignalrClient: if get_sonarr_info.is_legacy(): logging.warning('BAZARR can only sync from Sonarr v3 SignalR feed to get real-time update. You should ' 'consider upgrading your version({}).'.format(get_sonarr_info.version())) - raise gevent.GreenletExit else: logging.info('BAZARR trying to connect to Sonarr SignalR feed...') self.configure() @@ -44,14 +43,13 @@ class SonarrSignalrClient: try: self.connection.start() except ConnectionError: - gevent.sleep(5) + time.sleep(5) except json.decoder.JSONDecodeError: logging.error("BAZARR cannot parse JSON returned by SignalR feed. This is caused by a permissions " "issue when Sonarr try to access its /config/.config directory. You should fix " "permissions on that directory and restart Sonarr. Also, if you're a Docker image " "user, you should make sure you properly defined PUID/PGID environment variables. " "Otherwise, please contact Sonarr support.") - raise gevent.GreenletExit else: logging.info('BAZARR SignalR client for Sonarr is connected and waiting for events.') finally: @@ -107,7 +105,7 @@ class RadarrSignalrClient: try: self.connection.start() except ConnectionError: - gevent.sleep(5) + time.sleep(5) def stop(self): logging.info('BAZARR SignalR client for Radarr is now disconnected.') diff --git a/bazarr/utils.py b/bazarr/utils.py index a0f68bcff..dfb1c7799 100644 --- a/bazarr/utils.py +++ b/bazarr/utils.py @@ -315,8 +315,10 @@ class GetRadarrInfo: if 'version' in radarr_json: radarr_version = radarr_json['version'] else: - rv = url_radarr() + "/api/v3/system/status?apikey=" + settings.radarr.apikey - radarr_version = requests.get(rv, timeout=60, verify=False, headers=headers).json()['version'] + raise json.decoder.JSONDecodeError + except json.decoder.JSONDecodeError: + rv = url_radarr() + "/api/v3/system/status?apikey=" + settings.radarr.apikey + radarr_version = requests.get(rv, timeout=60, verify=False, headers=headers).json()['version'] except Exception as e: logging.debug('BAZARR cannot get Radarr version') radarr_version = 'unknown' @@ -384,6 +386,7 @@ def delete_subtitles(media_type, language, forced, hi, media_path, subtitles_pat subtitles_path=path_mappings.path_replace_reverse(subtitles_path)) store_subtitles(path_mappings.path_replace_reverse(media_path), media_path) notify_sonarr(sonarr_series_id) + event_stream(type='series', action='update', payload=sonarr_series_id) event_stream(type='episode-wanted', action='update', payload=sonarr_episode_id) return True else: diff --git a/frontend/package-lock.json b/frontend/package-lock.json index f50bb3f88..879175837 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -9,16 +9,18 @@ "version": "1.0.0", "license": "GPL-3", "dependencies": { - "@fontsource/roboto": "^4.2.2", + "@fontsource/roboto": "^4.5.1", "@fortawesome/fontawesome-svg-core": "^1.2", "@fortawesome/free-brands-svg-icons": "^5.15", "@fortawesome/free-regular-svg-icons": "^5.15", "@fortawesome/free-solid-svg-icons": "^5.15", "@fortawesome/react-fontawesome": "^0.1.11", "@reduxjs/toolkit": "^1.6", - "axios": "^0.21", + "axios": "^0.23", "bootstrap": "^4", "lodash": "^4", + "moment": "^2.29.1", + "package.json": "^2.0.1", "rc-slider": "^9.7", "react": "^17", "react-bootstrap": "^1", @@ -27,7 +29,7 @@ "react-redux": "^7.2", "react-router-dom": "^5.3", "react-scripts": "^4", - "react-select": "^4", + "react-select": "^5.0.1", "react-table": "^7", "recharts": "^2.0.8", "rooks": "^5.7.1", @@ -44,7 +46,7 @@ "@types/react-dom": "^17", "@types/react-helmet": "^6.1", "@types/react-router-dom": "^5", - "@types/react-select": "^4.0.3", + "@types/react-select": "^5.0.1", "@types/react-table": "^7", "http-proxy-middleware": "^2", "husky": "^7", @@ -2083,9 +2085,9 @@ } }, "node_modules/@fontsource/roboto": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@fontsource/roboto/-/roboto-4.5.0.tgz", - "integrity": "sha512-ja4XYw/9kNRFM5Ndk9vwzHWsdBMXczyBazFkTXJQ74QQBnT0BbSsHn0pF60AU0Iznig1Wt9x3rADfG8LANvMpw==" + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/@fontsource/roboto/-/roboto-4.5.1.tgz", + "integrity": "sha512-3mhfL+eNPG/woMNqwD/OHaW5qMpeGEBsDwzmhFmjB1yUV+M+M9P0NhP/AyHvnGz3DrqkvZ7CPzNMa+UkVLeELg==" }, "node_modules/@fortawesome/fontawesome-common-types": { "version": "0.2.36", @@ -3668,15 +3670,13 @@ } }, "node_modules/@types/react-select": { - "version": "4.0.17", - "resolved": "https://registry.npmjs.org/@types/react-select/-/react-select-4.0.17.tgz", - "integrity": "sha512-ZK5wcBhJaqC8ntQl0CJvK2KXNNsk1k5flM7jO+vNPPlceRzdJQazA6zTtQUyNr6exp5yrAiwiudtYxgGlgGHLg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@types/react-select/-/react-select-5.0.1.tgz", + "integrity": "sha512-h5Im0AP0dr4AVeHtrcvQrLV+gmPa7SA0AGdxl2jOhtwiE6KgXBFSogWw8az32/nusE6AQHlCOHQWjP1S/+oMWA==", + "deprecated": "This is a stub types definition. react-select provides its own type definitions, so you do not need this installed.", "dev": true, "dependencies": { - "@emotion/serialize": "^1.0.0", - "@types/react": "*", - "@types/react-dom": "*", - "@types/react-transition-group": "*" + "react-select": "*" } }, "node_modules/@types/react-table": { @@ -4220,6 +4220,14 @@ "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.5.tgz", "integrity": "sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==" }, + "node_modules/abs": { + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/abs/-/abs-1.3.14.tgz", + "integrity": "sha512-PrS26IzwKLWwuURpiKl8wRmJ2KdR/azaVrLEBWG/TALwT20Y7qjtYp1qcMLHA4206hBHY5phv3w4pjf9NPv4Vw==", + "dependencies": { + "ul": "^5.0.0" + } + }, "node_modules/accepts": { "version": "1.3.7", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", @@ -4722,11 +4730,11 @@ } }, "node_modules/axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.23.0.tgz", + "integrity": "sha512-NmvAE4i0YAv5cKq8zlDoPd1VLKAqX5oLuZKs8xkJa4qi6RGn0uhCYFjWtHHC9EM/MwOwYWOs53W+V0aqEXq1sg==", "dependencies": { - "follow-redirects": "^1.10.0" + "follow-redirects": "^1.14.4" } }, "node_modules/axobject-query": { @@ -5883,6 +5891,14 @@ "node": "6.* || 8.* || >= 10.*" } }, + "node_modules/capture-stack-trace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.1.tgz", + "integrity": "sha512-mYQLZnx5Qt1JgB1WEiMCf2647plpGeQ2NMR/5L0HNZzGQo4fuSPnK+wjfPnKZV0aiJDgzmWqqkV/g7JD+DW0qw==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/case-sensitive-paths-webpack-plugin": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.3.0.tgz", @@ -6564,6 +6580,17 @@ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" }, + "node_modules/create-error-class": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/create-error-class/-/create-error-class-3.0.2.tgz", + "integrity": "sha1-Br56vvlHo/FKMP1hBnHUAbyot7Y=", + "dependencies": { + "capture-stack-trace": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/create-hash": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", @@ -7189,6 +7216,14 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/deep-is": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", @@ -7322,6 +7357,14 @@ "which": "bin/which" } }, + "node_modules/deffy": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/deffy/-/deffy-2.2.4.tgz", + "integrity": "sha512-pLc9lsbsWjr6RxmJ2OLyvm+9l4j1yK69h+TML/gUit/t3vTijpkNGh8LioaJYTGO7F25m6HZndADcUOo2PsiUg==", + "dependencies": { + "typpy": "^2.0.0" + } + }, "node_modules/define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", @@ -7696,6 +7739,14 @@ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" }, + "node_modules/duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "dependencies": { + "readable-stream": "^2.0.2" + } + }, "node_modules/duplexify": { "version": "3.7.1", "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", @@ -7877,6 +7928,14 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/err": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/err/-/err-1.1.1.tgz", + "integrity": "sha1-65KOLhGjFmSPeCgz0PlyWLpDwvg=", + "dependencies": { + "typpy": "^2.2.0" + } + }, "node_modules/errno": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", @@ -9019,6 +9078,15 @@ "safe-buffer": "^5.1.1" } }, + "node_modules/exec-limiter": { + "version": "3.2.13", + "resolved": "https://registry.npmjs.org/exec-limiter/-/exec-limiter-3.2.13.tgz", + "integrity": "sha512-86Ri699bwiHZVBzTzNj8gspqAhCPchg70zPVWIh3qzUOA1pUMcb272Em3LPk8AE0mS95B9yMJhtqF8vFJAn0dA==", + "dependencies": { + "limit-it": "^3.0.0", + "typpy": "^2.1.0" + } + }, "node_modules/exec-sh": { "version": "0.3.6", "resolved": "https://registry.npmjs.org/exec-sh/-/exec-sh-0.3.6.tgz", @@ -9648,9 +9716,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.1.tgz", - "integrity": "sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg==", + "version": "1.14.4", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.4.tgz", + "integrity": "sha512-zwGkiSXC1MUJG/qmeIFH2HBJx9u0V46QGUe3YR1fXG8bXQxq7fLj0RjLZQ5nubr9qNJUZrH+xUcwXEoXNpfS+g==", "funding": [ { "type": "individual", @@ -9872,6 +9940,14 @@ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, + "node_modules/function.name": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/function.name/-/function.name-1.0.13.tgz", + "integrity": "sha512-mVrqdoy5npWZyoXl4DxCeuVF6delDcQjVS9aPdvLYlBxtMTZDR2B5GVEQEoM1jJyspCqg3C0v4ABkLE7tp9xFA==", + "dependencies": { + "noop6": "^1.0.1" + } + }, "node_modules/functional-red-black-tree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", @@ -9941,6 +10017,47 @@ "node": ">=0.10.0" } }, + "node_modules/git-package-json": { + "version": "1.4.10", + "resolved": "https://registry.npmjs.org/git-package-json/-/git-package-json-1.4.10.tgz", + "integrity": "sha512-DRAcvbzd2SxGK7w8OgYfvKqhFliT5keX0lmSmVdgScgf1kkl5tbbo7Pam6uYoCa1liOiipKxQZG8quCtGWl/fA==", + "dependencies": { + "deffy": "^2.2.1", + "err": "^1.1.1", + "gry": "^5.0.0", + "normalize-package-data": "^2.3.5", + "oargv": "^3.4.1", + "one-by-one": "^3.1.0", + "r-json": "^1.2.1", + "r-package-json": "^1.0.0", + "tmp": "0.0.28" + } + }, + "node_modules/git-source": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/git-source/-/git-source-1.1.10.tgz", + "integrity": "sha512-XZZ7ZgnLL35oLgM/xjnLYgtlKlxJG0FohC1kWDvGkU7s1VKGXK0pFF/g1itQEwQ3D+uTQzBnzPi8XbqOv7Wc1Q==", + "dependencies": { + "git-url-parse": "^5.0.1" + } + }, + "node_modules/git-up": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/git-up/-/git-up-1.2.1.tgz", + "integrity": "sha1-JkSAoAax2EJhrB/gmjpRacV+oZ0=", + "dependencies": { + "is-ssh": "^1.0.0", + "parse-url": "^1.0.0" + } + }, + "node_modules/git-url-parse": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/git-url-parse/-/git-url-parse-5.0.1.tgz", + "integrity": "sha1-/j15xnRq4FBIz6UIyB553du6OEM=", + "dependencies": { + "git-up": "^1.0.0" + } + }, "node_modules/glob": { "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", @@ -10033,6 +10150,51 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/got": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/got/-/got-5.6.0.tgz", + "integrity": "sha1-ux1+4WO3gIK7yOuDbz85UATqb78=", + "dependencies": { + "create-error-class": "^3.0.1", + "duplexer2": "^0.1.4", + "is-plain-obj": "^1.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "node-status-codes": "^1.0.0", + "object-assign": "^4.0.1", + "parse-json": "^2.1.0", + "pinkie-promise": "^2.0.0", + "read-all-stream": "^3.0.0", + "readable-stream": "^2.0.5", + "timed-out": "^2.0.0", + "unzip-response": "^1.0.0", + "url-parse-lax": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/got/node_modules/is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/got/node_modules/parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dependencies": { + "error-ex": "^1.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/graceful-fs": { "version": "4.2.8", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", @@ -10044,6 +10206,17 @@ "integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=", "optional": true }, + "node_modules/gry": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/gry/-/gry-5.0.8.tgz", + "integrity": "sha512-meq9ZjYVpLzZh3ojhTg7IMad9grGsx6rUUKHLqPnhLXzJkRQvEL2U3tQpS5/WentYTtHtxkT3Ew/mb10D6F6/g==", + "dependencies": { + "abs": "^1.2.1", + "exec-limiter": "^3.0.0", + "one-by-one": "^3.0.0", + "ul": "^5.0.0" + } + }, "node_modules/gzip-size": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz", @@ -11261,6 +11434,14 @@ "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==" }, + "node_modules/is-redirect": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-redirect/-/is-redirect-1.0.0.tgz", + "integrity": "sha1-HQPd7VO9jbDzDCbk+V02/HyH3CQ=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-regex": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", @@ -11289,6 +11470,14 @@ "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" }, + "node_modules/is-retry-allowed": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", + "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-root": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", @@ -11297,6 +11486,14 @@ "node": ">=6" } }, + "node_modules/is-ssh": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/is-ssh/-/is-ssh-1.3.3.tgz", + "integrity": "sha512-NKzJmQzJfEEma3w5cJNcUMxoXfDjz0Zj0eyCalHn2E6VOwlzjZo0yuO2fcBSf8zhFuVCL/82/r5gRcoi6aEPVQ==", + "dependencies": { + "protocols": "^1.1.0" + } + }, "node_modules/is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", @@ -11497,6 +11694,11 @@ "node": ">=8" } }, + "node_modules/iterate-object": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/iterate-object/-/iterate-object-1.3.4.tgz", + "integrity": "sha512-4dG1D1x/7g8PwHS9aK6QV5V94+ZvyP4+d19qDv43EzImmrndysIl4prmJ1hWWIGCqrZHyaHBm6BSEWHOLnpoNw==" + }, "node_modules/jest": { "version": "26.6.0", "resolved": "https://registry.npmjs.org/jest/-/jest-26.6.0.tgz", @@ -13211,6 +13413,14 @@ "node": ">= 0.8.0" } }, + "node_modules/limit-it": { + "version": "3.2.10", + "resolved": "https://registry.npmjs.org/limit-it/-/limit-it-3.2.10.tgz", + "integrity": "sha512-T0NK99pHnkimldr1WUqvbGV1oWDku/xC9J/OqzJFsV1jeOS6Bwl8W7vkeQIBqwiON9dTALws+rX/XPMQqWerDQ==", + "dependencies": { + "typpy": "^2.0.0" + } + }, "node_modules/lines-and-columns": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", @@ -13372,6 +13582,14 @@ "tslib": "^2.0.3" } }, + "node_modules/lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -13780,6 +13998,14 @@ "mkdirp": "bin/cmd.js" } }, + "node_modules/moment": { + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz", + "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==", + "engines": { + "node": "*" + } + }, "node_modules/move-concurrently": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", @@ -14009,6 +14235,19 @@ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.73.tgz", "integrity": "sha512-uW7fodD6pyW2FZNZnp/Z3hvWKeEW1Y8R1+1CnErE8cXFXzl5blBOoVB41CvMer6P6Q0S5FXDwcHgFd1Wj0U9zg==" }, + "node_modules/node-status-codes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-status-codes/-/node-status-codes-1.0.0.tgz", + "integrity": "sha1-WuVUHQJGRdMqWPzdyc7s6nrjrC8=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/noop6": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/noop6/-/noop6-1.0.9.tgz", + "integrity": "sha512-DB3Hwyd89dPr5HqEPg3YHjzvwh/mCqizC1zZ8vyofqc+TQRyPDnT4wgXXbLGF4z9YAzwwTLi8pNLhGqcbSjgkA==" + }, "node_modules/normalize-package-data": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", @@ -14090,6 +14329,23 @@ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.0.tgz", "integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==" }, + "node_modules/oargv": { + "version": "3.4.10", + "resolved": "https://registry.npmjs.org/oargv/-/oargv-3.4.10.tgz", + "integrity": "sha512-SXaMANv9sr7S/dP0vj0+Ybipa47UE1ntTWQ2rpPRhC6Bsvfl+Jg03Xif7jfL0sWKOYWK8oPjcZ5eJ82t8AP/8g==", + "dependencies": { + "iterate-object": "^1.1.0", + "ul": "^5.0.0" + } + }, + "node_modules/obj-def": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/obj-def/-/obj-def-1.0.9.tgz", + "integrity": "sha512-bQ4ya3VYD6FAA1+s6mEhaURRHSmw4+sKaXE6UyXZ1XDYc5D+c7look25dFdydmLd18epUegh398gdDkMUZI9xg==", + "dependencies": { + "deffy": "^2.2.2" + } + }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -14340,6 +14596,15 @@ "wrappy": "1" } }, + "node_modules/one-by-one": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/one-by-one/-/one-by-one-3.2.8.tgz", + "integrity": "sha512-HR/pSzZdm46Xqj58K+Bu64kMbSTw8/u77AwWvV+rprO/OsuR++pPlkUJn+SmwqBGRgHKwSKQ974V3uls7crIeQ==", + "dependencies": { + "obj-def": "^1.0.0", + "sliced": "^1.0.1" + } + }, "node_modules/onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", @@ -14429,6 +14694,14 @@ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/p-each-series": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-2.2.0.tgz", @@ -14495,6 +14768,47 @@ "node": ">=6" } }, + "node_modules/package-json": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-2.4.0.tgz", + "integrity": "sha1-DRW9Z9HLvduyyiIv8u24a8sxqLs=", + "dependencies": { + "got": "^5.0.0", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/package-json-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/package-json-path/-/package-json-path-1.0.9.tgz", + "integrity": "sha512-uNu7f6Ef7tQHZRnkyVnCtzdSYVN9uBtge/sG7wzcUaawFWkPYUq67iXxRGrQSg/q0tzxIB8jSyIYUKjG2Jn//A==", + "dependencies": { + "abs": "^1.2.1" + } + }, + "node_modules/package-json/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/package.json": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/package.json/-/package.json-2.0.1.tgz", + "integrity": "sha1-+IYFnSpJ7QduZIg2ldc7K0bSHW0=", + "deprecated": "Use pkg.json instead.", + "dependencies": { + "git-package-json": "^1.4.0", + "git-source": "^1.1.0", + "package-json": "^2.3.1" + } + }, "node_modules/pako": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", @@ -14559,6 +14873,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/parse-url": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/parse-url/-/parse-url-1.3.11.tgz", + "integrity": "sha1-V8FUKKuKiSsfQ4aWRccR0OFEtVQ=", + "dependencies": { + "is-ssh": "^1.3.0", + "protocols": "^1.4.0" + } + }, "node_modules/parse5": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", @@ -16138,9 +16461,9 @@ } }, "node_modules/prettier": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.3.2.tgz", - "integrity": "sha512-lnJzDfJ66zkMy58OL5/NY5zp70S7Nz6KqcKkXYzn2tMVrNxvbqaBpg7H3qHaLxCJ5lNMsGuM8+ohS7cZrthdLQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.1.tgz", + "integrity": "sha512-9fbDAXSBcc6Bs1mZrDYb3XKzDLm4EXXL9sC1LqKP5rZkT6KRr/rf9amVUcODVXgguK/isJz0d0hP72WeaKWsvA==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -16354,6 +16677,11 @@ "react": ">=0.14.0" } }, + "node_modules/protocols": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/protocols/-/protocols-1.4.8.tgz", + "integrity": "sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg==" + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -16500,6 +16828,20 @@ } ] }, + "node_modules/r-json": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/r-json/-/r-json-1.2.10.tgz", + "integrity": "sha512-hu9vyLjSlHXT62NAS7DjI9WazDlvjN0lgp3n431dCVnirVcLkZIpzSwA3orhZEKzdDD2jqNYI+w0yG0aFf4kpA==" + }, + "node_modules/r-package-json": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/r-package-json/-/r-package-json-1.0.9.tgz", + "integrity": "sha512-G4Vpf1KImWmmPFGdtWQTU0L9zk0SjqEC4qs/jE7AQ+Ylmr5kizMzGeC4wnHp5+ijPqNN+2ZPpvyjVNdN1CDVcg==", + "dependencies": { + "package-json-path": "^1.0.0", + "r-json": "^1.2.1" + } + }, "node_modules/raf": { "version": "3.4.1", "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", @@ -16555,6 +16897,20 @@ "node": ">= 0.8" } }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, "node_modules/rc-align": { "version": "4.0.9", "resolved": "https://registry.npmjs.org/rc-align/-/rc-align-4.0.9.tgz", @@ -16650,6 +17006,14 @@ "react-dom": ">=16.9.0" } }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", @@ -17007,17 +17371,6 @@ "react": ">=16.3.0" } }, - "node_modules/react-input-autosize": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/react-input-autosize/-/react-input-autosize-3.0.0.tgz", - "integrity": "sha512-nL9uS7jEs/zu8sqwFE5MAPx6pPkNAriACQ2rGLlqmKr2sPGtN7TXTyDdQt4lbNXVx7Uzadb40x8qotIuru6Rhg==", - "dependencies": { - "prop-types": "^15.5.8" - }, - "peerDependencies": { - "react": "^16.3.0 || ^17.0.0" - } - }, "node_modules/react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", @@ -17274,16 +17627,16 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "node_modules/react-select": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/react-select/-/react-select-4.3.1.tgz", - "integrity": "sha512-HBBd0dYwkF5aZk1zP81Wx5UsLIIT2lSvAY2JiJo199LjoLHoivjn9//KsmvQMEFGNhe58xyuOITjfxKCcGc62Q==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.2.0.tgz", + "integrity": "sha512-JLH2/ei/m1an0Vu4ERMKvGgDB6yHLsgxltho2QennOLTq4Fx35GwGwh+2MFnz5EWgslX6G9rjclUTZWAA2DuBg==", "dependencies": { "@babel/runtime": "^7.12.0", "@emotion/cache": "^11.4.0", "@emotion/react": "^11.1.1", + "@types/react-transition-group": "^4.4.0", "memoize-one": "^5.0.0", "prop-types": "^15.6.0", - "react-input-autosize": "^3.0.0", "react-transition-group": "^4.3.0" }, "peerDependencies": { @@ -17364,6 +17717,18 @@ "react-dom": ">=16.6.0" } }, + "node_modules/read-all-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/read-all-stream/-/read-all-stream-3.1.0.tgz", + "integrity": "sha1-NcPhd/IHjveJ7kv6+kNzB06u9Po=", + "dependencies": { + "pinkie-promise": "^2.0.0", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/read-pkg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", @@ -17670,6 +18035,26 @@ "node": ">=4" } }, + "node_modules/registry-auth-token": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.4.0.tgz", + "integrity": "sha512-4LM6Fw8eBQdwMYcES4yTnn2TqIasbXuwDx3um+QRs7S55aMKCBKBxvPXl2RiUjHwuJLTyYfxSpmfSAjQpcuP+A==", + "dependencies": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/registry-url": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", + "integrity": "sha1-PU74cPc93h138M+aOBQyRE4XSUI=", + "dependencies": { + "rc": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/regjsgen": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", @@ -18694,6 +19079,11 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, + "node_modules/sliced": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz", + "integrity": "sha1-CzpmK10Ewxd7GSa+qCsD+Dei70E=" + }, "node_modules/snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", @@ -20106,6 +20496,14 @@ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" }, + "node_modules/timed-out": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-2.0.0.tgz", + "integrity": "sha1-84sK6B03R9YoAB9B2vxlKs5nHAo=", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/timers-browserify": { "version": "2.0.12", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", @@ -20132,6 +20530,17 @@ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, + "node_modules/tmp": { + "version": "0.0.28", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.28.tgz", + "integrity": "sha1-Fyc1t/YU6nrzlmT6hM8N5OUV0SA=", + "dependencies": { + "os-tmpdir": "~1.0.1" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/tmpl": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz", @@ -20375,6 +20784,23 @@ "node": ">=4.2.0" } }, + "node_modules/typpy": { + "version": "2.3.13", + "resolved": "https://registry.npmjs.org/typpy/-/typpy-2.3.13.tgz", + "integrity": "sha512-vOxIcQz9sxHi+rT09SJ5aDgVgrPppQjwnnayTrMye1ODaU8gIZTDM19t9TxmEElbMihx2Nq/0/b/MtyKfayRqA==", + "dependencies": { + "function.name": "^1.0.3" + } + }, + "node_modules/ul": { + "version": "5.2.15", + "resolved": "https://registry.npmjs.org/ul/-/ul-5.2.15.tgz", + "integrity": "sha512-svLEUy8xSCip5IWnsRa0UOg+2zP0Wsj4qlbjTmX6GJSmvKMHADBuHOm1dpNkWqWPIGuVSqzUkV3Cris5JrlTRQ==", + "dependencies": { + "deffy": "^2.2.2", + "typpy": "^2.3.4" + } + }, "node_modules/unbox-primitive": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", @@ -20568,6 +20994,14 @@ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" }, + "node_modules/unzip-response": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-1.0.2.tgz", + "integrity": "sha1-uYTwh3/AqJwsdzzB73tbIytbBv4=", + "engines": { + "node": ">=0.10" + } + }, "node_modules/upath": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", @@ -20652,6 +21086,17 @@ "requires-port": "^1.0.0" } }, + "node_modules/url-parse-lax": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", + "integrity": "sha1-evjzA2Rem9eaJy56FKxovAYJ2nM=", + "dependencies": { + "prepend-http": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/url/node_modules/punycode": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", @@ -23760,9 +24205,9 @@ } }, "@fontsource/roboto": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@fontsource/roboto/-/roboto-4.5.0.tgz", - "integrity": "sha512-ja4XYw/9kNRFM5Ndk9vwzHWsdBMXczyBazFkTXJQ74QQBnT0BbSsHn0pF60AU0Iznig1Wt9x3rADfG8LANvMpw==" + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/@fontsource/roboto/-/roboto-4.5.1.tgz", + "integrity": "sha512-3mhfL+eNPG/woMNqwD/OHaW5qMpeGEBsDwzmhFmjB1yUV+M+M9P0NhP/AyHvnGz3DrqkvZ7CPzNMa+UkVLeELg==" }, "@fortawesome/fontawesome-common-types": { "version": "0.2.36", @@ -24958,15 +25403,12 @@ } }, "@types/react-select": { - "version": "4.0.17", - "resolved": "https://registry.npmjs.org/@types/react-select/-/react-select-4.0.17.tgz", - "integrity": "sha512-ZK5wcBhJaqC8ntQl0CJvK2KXNNsk1k5flM7jO+vNPPlceRzdJQazA6zTtQUyNr6exp5yrAiwiudtYxgGlgGHLg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@types/react-select/-/react-select-5.0.1.tgz", + "integrity": "sha512-h5Im0AP0dr4AVeHtrcvQrLV+gmPa7SA0AGdxl2jOhtwiE6KgXBFSogWw8az32/nusE6AQHlCOHQWjP1S/+oMWA==", "dev": true, "requires": { - "@emotion/serialize": "^1.0.0", - "@types/react": "*", - "@types/react-dom": "*", - "@types/react-transition-group": "*" + "react-select": "*" } }, "@types/react-table": { @@ -25403,6 +25845,14 @@ "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.5.tgz", "integrity": "sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==" }, + "abs": { + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/abs/-/abs-1.3.14.tgz", + "integrity": "sha512-PrS26IzwKLWwuURpiKl8wRmJ2KdR/azaVrLEBWG/TALwT20Y7qjtYp1qcMLHA4206hBHY5phv3w4pjf9NPv4Vw==", + "requires": { + "ul": "^5.0.0" + } + }, "accepts": { "version": "1.3.7", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", @@ -25772,11 +26222,11 @@ "integrity": "sha512-5LMaDRWm8ZFPAEdzTYmgjjEdj1YnQcpfrVajO/sn/LhbpGp0Y0H64c2hLZI1gRMxfA+w1S71Uc/nHaOXgcCvGg==" }, "axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.23.0.tgz", + "integrity": "sha512-NmvAE4i0YAv5cKq8zlDoPd1VLKAqX5oLuZKs8xkJa4qi6RGn0uhCYFjWtHHC9EM/MwOwYWOs53W+V0aqEXq1sg==", "requires": { - "follow-redirects": "^1.10.0" + "follow-redirects": "^1.14.4" } }, "axobject-query": { @@ -26711,6 +27161,11 @@ "rsvp": "^4.8.4" } }, + "capture-stack-trace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.1.tgz", + "integrity": "sha512-mYQLZnx5Qt1JgB1WEiMCf2647plpGeQ2NMR/5L0HNZzGQo4fuSPnK+wjfPnKZV0aiJDgzmWqqkV/g7JD+DW0qw==" + }, "case-sensitive-paths-webpack-plugin": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.3.0.tgz", @@ -27260,6 +27715,14 @@ } } }, + "create-error-class": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/create-error-class/-/create-error-class-3.0.2.tgz", + "integrity": "sha1-Br56vvlHo/FKMP1hBnHUAbyot7Y=", + "requires": { + "capture-stack-trace": "^1.0.0" + } + }, "create-hash": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", @@ -27767,6 +28230,11 @@ "regexp.prototype.flags": "^1.2.0" } }, + "deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" + }, "deep-is": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", @@ -27866,6 +28334,14 @@ } } }, + "deffy": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/deffy/-/deffy-2.2.4.tgz", + "integrity": "sha512-pLc9lsbsWjr6RxmJ2OLyvm+9l4j1yK69h+TML/gUit/t3vTijpkNGh8LioaJYTGO7F25m6HZndADcUOo2PsiUg==", + "requires": { + "typpy": "^2.0.0" + } + }, "define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", @@ -28161,6 +28637,14 @@ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" }, + "duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha1-ixLauHjA1p4+eJEFFmKjL8a93ME=", + "requires": { + "readable-stream": "^2.0.2" + } + }, "duplexify": { "version": "3.7.1", "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", @@ -28309,6 +28793,14 @@ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" }, + "err": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/err/-/err-1.1.1.tgz", + "integrity": "sha1-65KOLhGjFmSPeCgz0PlyWLpDwvg=", + "requires": { + "typpy": "^2.2.0" + } + }, "errno": { "version": "0.1.8", "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", @@ -29113,6 +29605,15 @@ "safe-buffer": "^5.1.1" } }, + "exec-limiter": { + "version": "3.2.13", + "resolved": "https://registry.npmjs.org/exec-limiter/-/exec-limiter-3.2.13.tgz", + "integrity": "sha512-86Ri699bwiHZVBzTzNj8gspqAhCPchg70zPVWIh3qzUOA1pUMcb272Em3LPk8AE0mS95B9yMJhtqF8vFJAn0dA==", + "requires": { + "limit-it": "^3.0.0", + "typpy": "^2.1.0" + } + }, "exec-sh": { "version": "0.3.6", "resolved": "https://registry.npmjs.org/exec-sh/-/exec-sh-0.3.6.tgz", @@ -29619,9 +30120,9 @@ } }, "follow-redirects": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.1.tgz", - "integrity": "sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg==" + "version": "1.14.4", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.4.tgz", + "integrity": "sha512-zwGkiSXC1MUJG/qmeIFH2HBJx9u0V46QGUe3YR1fXG8bXQxq7fLj0RjLZQ5nubr9qNJUZrH+xUcwXEoXNpfS+g==" }, "for-in": { "version": "1.0.2", @@ -29781,6 +30282,14 @@ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, + "function.name": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/function.name/-/function.name-1.0.13.tgz", + "integrity": "sha512-mVrqdoy5npWZyoXl4DxCeuVF6delDcQjVS9aPdvLYlBxtMTZDR2B5GVEQEoM1jJyspCqg3C0v4ABkLE7tp9xFA==", + "requires": { + "noop6": "^1.0.1" + } + }, "functional-red-black-tree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", @@ -29829,6 +30338,47 @@ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=" }, + "git-package-json": { + "version": "1.4.10", + "resolved": "https://registry.npmjs.org/git-package-json/-/git-package-json-1.4.10.tgz", + "integrity": "sha512-DRAcvbzd2SxGK7w8OgYfvKqhFliT5keX0lmSmVdgScgf1kkl5tbbo7Pam6uYoCa1liOiipKxQZG8quCtGWl/fA==", + "requires": { + "deffy": "^2.2.1", + "err": "^1.1.1", + "gry": "^5.0.0", + "normalize-package-data": "^2.3.5", + "oargv": "^3.4.1", + "one-by-one": "^3.1.0", + "r-json": "^1.2.1", + "r-package-json": "^1.0.0", + "tmp": "0.0.28" + } + }, + "git-source": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/git-source/-/git-source-1.1.10.tgz", + "integrity": "sha512-XZZ7ZgnLL35oLgM/xjnLYgtlKlxJG0FohC1kWDvGkU7s1VKGXK0pFF/g1itQEwQ3D+uTQzBnzPi8XbqOv7Wc1Q==", + "requires": { + "git-url-parse": "^5.0.1" + } + }, + "git-up": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/git-up/-/git-up-1.2.1.tgz", + "integrity": "sha1-JkSAoAax2EJhrB/gmjpRacV+oZ0=", + "requires": { + "is-ssh": "^1.0.0", + "parse-url": "^1.0.0" + } + }, + "git-url-parse": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/git-url-parse/-/git-url-parse-5.0.1.tgz", + "integrity": "sha1-/j15xnRq4FBIz6UIyB553du6OEM=", + "requires": { + "git-up": "^1.0.0" + } + }, "glob": { "version": "7.1.7", "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", @@ -29896,6 +30446,44 @@ "slash": "^3.0.0" } }, + "got": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/got/-/got-5.6.0.tgz", + "integrity": "sha1-ux1+4WO3gIK7yOuDbz85UATqb78=", + "requires": { + "create-error-class": "^3.0.1", + "duplexer2": "^0.1.4", + "is-plain-obj": "^1.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "node-status-codes": "^1.0.0", + "object-assign": "^4.0.1", + "parse-json": "^2.1.0", + "pinkie-promise": "^2.0.0", + "read-all-stream": "^3.0.0", + "readable-stream": "^2.0.5", + "timed-out": "^2.0.0", + "unzip-response": "^1.0.0", + "url-parse-lax": "^1.0.0" + }, + "dependencies": { + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "requires": { + "error-ex": "^1.2.0" + } + } + } + }, "graceful-fs": { "version": "4.2.8", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz", @@ -29907,6 +30495,17 @@ "integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=", "optional": true }, + "gry": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/gry/-/gry-5.0.8.tgz", + "integrity": "sha512-meq9ZjYVpLzZh3ojhTg7IMad9grGsx6rUUKHLqPnhLXzJkRQvEL2U3tQpS5/WentYTtHtxkT3Ew/mb10D6F6/g==", + "requires": { + "abs": "^1.2.1", + "exec-limiter": "^3.0.0", + "one-by-one": "^3.0.0", + "ul": "^5.0.0" + } + }, "gzip-size": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-5.1.1.tgz", @@ -30795,6 +31394,11 @@ "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==" }, + "is-redirect": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-redirect/-/is-redirect-1.0.0.tgz", + "integrity": "sha1-HQPd7VO9jbDzDCbk+V02/HyH3CQ=" + }, "is-regex": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", @@ -30814,11 +31418,24 @@ "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" }, + "is-retry-allowed": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", + "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==" + }, "is-root": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==" }, + "is-ssh": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/is-ssh/-/is-ssh-1.3.3.tgz", + "integrity": "sha512-NKzJmQzJfEEma3w5cJNcUMxoXfDjz0Zj0eyCalHn2E6VOwlzjZo0yuO2fcBSf8zhFuVCL/82/r5gRcoi6aEPVQ==", + "requires": { + "protocols": "^1.1.0" + } + }, "is-stream": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", @@ -30960,6 +31577,11 @@ "istanbul-lib-report": "^3.0.0" } }, + "iterate-object": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/iterate-object/-/iterate-object-1.3.4.tgz", + "integrity": "sha512-4dG1D1x/7g8PwHS9aK6QV5V94+ZvyP4+d19qDv43EzImmrndysIl4prmJ1hWWIGCqrZHyaHBm6BSEWHOLnpoNw==" + }, "jest": { "version": "26.6.0", "resolved": "https://registry.npmjs.org/jest/-/jest-26.6.0.tgz", @@ -32257,6 +32879,14 @@ "type-check": "~0.4.0" } }, + "limit-it": { + "version": "3.2.10", + "resolved": "https://registry.npmjs.org/limit-it/-/limit-it-3.2.10.tgz", + "integrity": "sha512-T0NK99pHnkimldr1WUqvbGV1oWDku/xC9J/OqzJFsV1jeOS6Bwl8W7vkeQIBqwiON9dTALws+rX/XPMQqWerDQ==", + "requires": { + "typpy": "^2.0.0" + } + }, "lines-and-columns": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", @@ -32392,6 +33022,11 @@ "tslib": "^2.0.3" } }, + "lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" + }, "lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -32711,6 +33346,11 @@ "minimist": "^1.2.5" } }, + "moment": { + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz", + "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==" + }, "move-concurrently": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", @@ -32914,6 +33554,16 @@ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.73.tgz", "integrity": "sha512-uW7fodD6pyW2FZNZnp/Z3hvWKeEW1Y8R1+1CnErE8cXFXzl5blBOoVB41CvMer6P6Q0S5FXDwcHgFd1Wj0U9zg==" }, + "node-status-codes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-status-codes/-/node-status-codes-1.0.0.tgz", + "integrity": "sha1-WuVUHQJGRdMqWPzdyc7s6nrjrC8=" + }, + "noop6": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/noop6/-/noop6-1.0.9.tgz", + "integrity": "sha512-DB3Hwyd89dPr5HqEPg3YHjzvwh/mCqizC1zZ8vyofqc+TQRyPDnT4wgXXbLGF4z9YAzwwTLi8pNLhGqcbSjgkA==" + }, "normalize-package-data": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", @@ -32979,6 +33629,23 @@ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.0.tgz", "integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==" }, + "oargv": { + "version": "3.4.10", + "resolved": "https://registry.npmjs.org/oargv/-/oargv-3.4.10.tgz", + "integrity": "sha512-SXaMANv9sr7S/dP0vj0+Ybipa47UE1ntTWQ2rpPRhC6Bsvfl+Jg03Xif7jfL0sWKOYWK8oPjcZ5eJ82t8AP/8g==", + "requires": { + "iterate-object": "^1.1.0", + "ul": "^5.0.0" + } + }, + "obj-def": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/obj-def/-/obj-def-1.0.9.tgz", + "integrity": "sha512-bQ4ya3VYD6FAA1+s6mEhaURRHSmw4+sKaXE6UyXZ1XDYc5D+c7look25dFdydmLd18epUegh398gdDkMUZI9xg==", + "requires": { + "deffy": "^2.2.2" + } + }, "object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -33158,6 +33825,15 @@ "wrappy": "1" } }, + "one-by-one": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/one-by-one/-/one-by-one-3.2.8.tgz", + "integrity": "sha512-HR/pSzZdm46Xqj58K+Bu64kMbSTw8/u77AwWvV+rprO/OsuR++pPlkUJn+SmwqBGRgHKwSKQ974V3uls7crIeQ==", + "requires": { + "obj-def": "^1.0.0", + "sliced": "^1.0.1" + } + }, "onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", @@ -33225,6 +33901,11 @@ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=" + }, "p-each-series": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-2.2.0.tgz", @@ -33264,6 +33945,42 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" }, + "package-json": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-2.4.0.tgz", + "integrity": "sha1-DRW9Z9HLvduyyiIv8u24a8sxqLs=", + "requires": { + "got": "^5.0.0", + "registry-auth-token": "^3.0.1", + "registry-url": "^3.0.3", + "semver": "^5.1.0" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } + } + }, + "package-json-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/package-json-path/-/package-json-path-1.0.9.tgz", + "integrity": "sha512-uNu7f6Ef7tQHZRnkyVnCtzdSYVN9uBtge/sG7wzcUaawFWkPYUq67iXxRGrQSg/q0tzxIB8jSyIYUKjG2Jn//A==", + "requires": { + "abs": "^1.2.1" + } + }, + "package.json": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/package.json/-/package.json-2.0.1.tgz", + "integrity": "sha1-+IYFnSpJ7QduZIg2ldc7K0bSHW0=", + "requires": { + "git-package-json": "^1.4.0", + "git-source": "^1.1.0", + "package-json": "^2.3.1" + } + }, "pako": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", @@ -33319,6 +34036,15 @@ "lines-and-columns": "^1.1.6" } }, + "parse-url": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/parse-url/-/parse-url-1.3.11.tgz", + "integrity": "sha1-V8FUKKuKiSsfQ4aWRccR0OFEtVQ=", + "requires": { + "is-ssh": "^1.3.0", + "protocols": "^1.4.0" + } + }, "parse5": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", @@ -34596,9 +35322,9 @@ "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" }, "prettier": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.3.2.tgz", - "integrity": "sha512-lnJzDfJ66zkMy58OL5/NY5zp70S7Nz6KqcKkXYzn2tMVrNxvbqaBpg7H3qHaLxCJ5lNMsGuM8+ohS7cZrthdLQ==", + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.4.1.tgz", + "integrity": "sha512-9fbDAXSBcc6Bs1mZrDYb3XKzDLm4EXXL9sC1LqKP5rZkT6KRr/rf9amVUcODVXgguK/isJz0d0hP72WeaKWsvA==", "dev": true }, "prettier-plugin-organize-imports": { @@ -34759,6 +35485,11 @@ "warning": "^4.0.0" } }, + "protocols": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/protocols/-/protocols-1.4.8.tgz", + "integrity": "sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg==" + }, "proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -34872,6 +35603,20 @@ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" }, + "r-json": { + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/r-json/-/r-json-1.2.10.tgz", + "integrity": "sha512-hu9vyLjSlHXT62NAS7DjI9WazDlvjN0lgp3n431dCVnirVcLkZIpzSwA3orhZEKzdDD2jqNYI+w0yG0aFf4kpA==" + }, + "r-package-json": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/r-package-json/-/r-package-json-1.0.9.tgz", + "integrity": "sha512-G4Vpf1KImWmmPFGdtWQTU0L9zk0SjqEC4qs/jE7AQ+Ylmr5kizMzGeC4wnHp5+ijPqNN+2ZPpvyjVNdN1CDVcg==", + "requires": { + "package-json-path": "^1.0.0", + "r-json": "^1.2.1" + } + }, "raf": { "version": "3.4.1", "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", @@ -34920,6 +35665,24 @@ } } }, + "rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" + } + } + }, "rc-align": { "version": "4.0.9", "resolved": "https://registry.npmjs.org/rc-align/-/rc-align-4.0.9.tgz", @@ -35261,14 +36024,6 @@ "react-side-effect": "^2.1.0" } }, - "react-input-autosize": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/react-input-autosize/-/react-input-autosize-3.0.0.tgz", - "integrity": "sha512-nL9uS7jEs/zu8sqwFE5MAPx6pPkNAriACQ2rGLlqmKr2sPGtN7TXTyDdQt4lbNXVx7Uzadb40x8qotIuru6Rhg==", - "requires": { - "prop-types": "^15.5.8" - } - }, "react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", @@ -35466,16 +36221,16 @@ } }, "react-select": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/react-select/-/react-select-4.3.1.tgz", - "integrity": "sha512-HBBd0dYwkF5aZk1zP81Wx5UsLIIT2lSvAY2JiJo199LjoLHoivjn9//KsmvQMEFGNhe58xyuOITjfxKCcGc62Q==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.2.0.tgz", + "integrity": "sha512-JLH2/ei/m1an0Vu4ERMKvGgDB6yHLsgxltho2QennOLTq4Fx35GwGwh+2MFnz5EWgslX6G9rjclUTZWAA2DuBg==", "requires": { "@babel/runtime": "^7.12.0", "@emotion/cache": "^11.4.0", "@emotion/react": "^11.1.1", + "@types/react-transition-group": "^4.4.0", "memoize-one": "^5.0.0", "prop-types": "^15.6.0", - "react-input-autosize": "^3.0.0", "react-transition-group": "^4.3.0" } }, @@ -35533,6 +36288,15 @@ "prop-types": "^15.6.2" } }, + "read-all-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/read-all-stream/-/read-all-stream-3.1.0.tgz", + "integrity": "sha1-NcPhd/IHjveJ7kv6+kNzB06u9Po=", + "requires": { + "pinkie-promise": "^2.0.0", + "readable-stream": "^2.0.0" + } + }, "read-pkg": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", @@ -35788,6 +36552,23 @@ "unicode-match-property-value-ecmascript": "^1.2.0" } }, + "registry-auth-token": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.4.0.tgz", + "integrity": "sha512-4LM6Fw8eBQdwMYcES4yTnn2TqIasbXuwDx3um+QRs7S55aMKCBKBxvPXl2RiUjHwuJLTyYfxSpmfSAjQpcuP+A==", + "requires": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "registry-url": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", + "integrity": "sha1-PU74cPc93h138M+aOBQyRE4XSUI=", + "requires": { + "rc": "^1.0.1" + } + }, "regjsgen": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", @@ -36585,6 +37366,11 @@ "is-fullwidth-code-point": "^3.0.0" } }, + "sliced": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz", + "integrity": "sha1-CzpmK10Ewxd7GSa+qCsD+Dei70E=" + }, "snapdragon": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", @@ -37699,6 +38485,11 @@ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" }, + "timed-out": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-2.0.0.tgz", + "integrity": "sha1-84sK6B03R9YoAB9B2vxlKs5nHAo=" + }, "timers-browserify": { "version": "2.0.12", "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", @@ -37722,6 +38513,14 @@ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, + "tmp": { + "version": "0.0.28", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.28.tgz", + "integrity": "sha1-Fyc1t/YU6nrzlmT6hM8N5OUV0SA=", + "requires": { + "os-tmpdir": "~1.0.1" + } + }, "tmpl": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz", @@ -37907,6 +38706,23 @@ "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.3.tgz", "integrity": "sha512-4xfscpisVgqqDfPaJo5vkd+Qd/ItkoagnHpufr+i2QCHBsNYp+G7UAoyFl8aPtx879u38wPV65rZ8qbGZijalA==" }, + "typpy": { + "version": "2.3.13", + "resolved": "https://registry.npmjs.org/typpy/-/typpy-2.3.13.tgz", + "integrity": "sha512-vOxIcQz9sxHi+rT09SJ5aDgVgrPppQjwnnayTrMye1ODaU8gIZTDM19t9TxmEElbMihx2Nq/0/b/MtyKfayRqA==", + "requires": { + "function.name": "^1.0.3" + } + }, + "ul": { + "version": "5.2.15", + "resolved": "https://registry.npmjs.org/ul/-/ul-5.2.15.tgz", + "integrity": "sha512-svLEUy8xSCip5IWnsRa0UOg+2zP0Wsj4qlbjTmX6GJSmvKMHADBuHOm1dpNkWqWPIGuVSqzUkV3Cris5JrlTRQ==", + "requires": { + "deffy": "^2.2.2", + "typpy": "^2.3.4" + } + }, "unbox-primitive": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", @@ -38061,6 +38877,11 @@ } } }, + "unzip-response": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-1.0.2.tgz", + "integrity": "sha1-uYTwh3/AqJwsdzzB73tbIytbBv4=" + }, "upath": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", @@ -38131,6 +38952,14 @@ "requires-port": "^1.0.0" } }, + "url-parse-lax": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", + "integrity": "sha1-evjzA2Rem9eaJy56FKxovAYJ2nM=", + "requires": { + "prepend-http": "^1.0.1" + } + }, "use": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", diff --git a/frontend/package.json b/frontend/package.json index 6eb2ca608..0ebd83d4a 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -14,16 +14,18 @@ "private": true, "homepage": "./", "dependencies": { - "@fontsource/roboto": "^4.2.2", + "@fontsource/roboto": "^4.5.1", "@fortawesome/fontawesome-svg-core": "^1.2", "@fortawesome/free-brands-svg-icons": "^5.15", "@fortawesome/free-regular-svg-icons": "^5.15", "@fortawesome/free-solid-svg-icons": "^5.15", "@fortawesome/react-fontawesome": "^0.1.11", "@reduxjs/toolkit": "^1.6", - "axios": "^0.21", + "axios": "^0.23", "bootstrap": "^4", "lodash": "^4", + "moment": "^2.29.1", + "package.json": "^2.0.1", "rc-slider": "^9.7", "react": "^17", "react-bootstrap": "^1", @@ -32,7 +34,7 @@ "react-redux": "^7.2", "react-router-dom": "^5.3", "react-scripts": "^4", - "react-select": "^4", + "react-select": "^5.0.1", "react-table": "^7", "recharts": "^2.0.8", "rooks": "^5.7.1", @@ -49,7 +51,7 @@ "@types/react-dom": "^17", "@types/react-helmet": "^6.1", "@types/react-router-dom": "^5", - "@types/react-select": "^4.0.3", + "@types/react-select": "^5.0.1", "@types/react-table": "^7", "http-proxy-middleware": "^2", "husky": "^7", diff --git a/frontend/src/@types/system.d.ts b/frontend/src/@types/system.d.ts index 154bb5b98..737bd63d7 100644 --- a/frontend/src/@types/system.d.ts +++ b/frontend/src/@types/system.d.ts @@ -16,6 +16,7 @@ declare namespace System { python_version: string; radarr_version: string; sonarr_version: string; + start_time: number; } interface Health { diff --git a/frontend/src/DisplayItem/MovieDetail/index.tsx b/frontend/src/DisplayItem/MovieDetail/index.tsx index dbd5041c4..758817d52 100644 --- a/frontend/src/DisplayItem/MovieDetail/index.tsx +++ b/frontend/src/DisplayItem/MovieDetail/index.tsx @@ -94,7 +94,7 @@ const MovieDetailView: FunctionComponent = ({ match }) => { MoviesApi.action.bind(MoviesApi), { action: "scan-disk", radarrid: id } ); - dispatchTask("Scaning Disk...", [task], "Scaning..."); + dispatchTask("Scanning Disk...", [task], "Scanning..."); }} > Scan Disk diff --git a/frontend/src/Settings/Notifications/components.tsx b/frontend/src/Settings/Notifications/components.tsx index 16530069d..84add66ca 100644 --- a/frontend/src/Settings/Notifications/components.tsx +++ b/frontend/src/Settings/Notifications/components.tsx @@ -92,8 +92,7 @@ const NotificationModal: FunctionComponent = ({ variant="danger" onClick={() => { if (current) { - current.enabled = false; - update(current); + update({ ...current, enabled: false }); } closeModal(); }} diff --git a/frontend/src/Settings/Providers/components.tsx b/frontend/src/Settings/Providers/components.tsx index 93000df8f..9e005d123 100644 --- a/frontend/src/Settings/Providers/components.tsx +++ b/frontend/src/Settings/Providers/components.tsx @@ -2,12 +2,13 @@ import { capitalize, isArray, isBoolean } from "lodash"; import React, { FunctionComponent, useCallback, + useEffect, useMemo, useState, } from "react"; import { Button, Col, Container, Row } from "react-bootstrap"; import { components } from "react-select"; -import { SelectComponents } from "react-select/src/components"; +import { SelectComponents } from "react-select/dist/declarations/src/components"; import { BaseModal, Selector, @@ -81,6 +82,10 @@ export const ProviderModal: FunctionComponent = () => { const [staged, setChange] = useState({}); + useEffect(() => { + setInfo(payload); + }, [payload]); + const [info, setInfo] = useState>(payload); useOnModalShow((p) => setInfo(p), ModalKey); @@ -213,11 +218,12 @@ export const ProviderModal: FunctionComponent = () => { }, [info]); const selectorComponents = useMemo< - Partial> + Partial> >( () => ({ Option: ({ data, ...other }) => { - const { label, value } = data as SelectorOption; + const { label, value } = + data as unknown as SelectorOption; return ( {label} diff --git a/frontend/src/Settings/Providers/list.ts b/frontend/src/Settings/Providers/list.ts index 3eec72d7d..dc076eb1c 100644 --- a/frontend/src/Settings/Providers/list.ts +++ b/frontend/src/Settings/Providers/list.ts @@ -220,7 +220,8 @@ export const ProviderList: Readonly = [ key: "tusubtitulo", name: "Tusubtitulo.com", description: - "LATAM Spanish / Spanish / English Subtitles Provider for TV Shows", + "Provider requested to be removed from Bazarr so it will always return no subtitles. Could potentially come back in the future with an upcoming premium account.", + // "LATAM Spanish / Spanish / English Subtitles Provider for TV Shows", }, { key: "titulky", @@ -229,6 +230,14 @@ export const ProviderList: Readonly = [ defaultKey: { username: "", password: "", + skip_wrong_fps: false, + approved_only: false, + multithreading: true, + }, + keyNameOverride: { + skip_wrong_fps: "Skip mismatching FPS", + approved_only: "Skip unapproved subtitles", + multithreading: "Enable multithreading", }, }, { key: "tvsubtitles", name: "TVSubtitles" }, diff --git a/frontend/src/System/Status/index.tsx b/frontend/src/System/Status/index.tsx index 44573f6ef..1ac6507c3 100644 --- a/frontend/src/System/Status/index.tsx +++ b/frontend/src/System/Status/index.tsx @@ -6,9 +6,11 @@ import { } from "@fortawesome/free-brands-svg-icons"; import { faPaperPlane } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; -import React, { FunctionComponent } from "react"; +import moment from "moment"; +import React, { FunctionComponent, useState } from "react"; import { Col, Container, Row } from "react-bootstrap"; import { Helmet } from "react-helmet"; +import { useIntervalWhen } from "rooks"; import { useSystemHealth, useSystemStatus } from "../../@redux/hooks"; import { AsyncOverlay } from "../../components"; import { GithubRepoRoot } from "../../constants"; @@ -69,6 +71,28 @@ const SystemStatusView: FunctionComponent = () => { const health = useSystemHealth(); const status = useSystemStatus(); + const [uptime, setState] = useState(); + const [intervalWhenState] = useState(true); + + useIntervalWhen( + () => { + if (status) { + let duration = moment.duration( + moment().utc().unix() - status.start_time, + "seconds" + ), + days = duration.days(), + hours = duration.hours().toString().padStart(2, "0"), + minutes = duration.minutes().toString().padStart(2, "0"), + seconds = duration.seconds().toString().padStart(2, "0"); + setState(days + "d " + hours + ":" + minutes + ":" + seconds); + } + }, + 1000, + intervalWhenState, + true + ); + return ( @@ -106,6 +130,9 @@ const SystemStatusView: FunctionComponent = () => { {status?.bazarr_config_directory} + + {uptime} + diff --git a/frontend/src/components/inputs/Selector.tsx b/frontend/src/components/inputs/Selector.tsx index 7ac6099c1..f3df67459 100644 --- a/frontend/src/components/inputs/Selector.tsx +++ b/frontend/src/components/inputs/Selector.tsx @@ -1,7 +1,7 @@ import { isArray } from "lodash"; import React, { useCallback, useMemo } from "react"; -import ReactSelect from "react-select"; -import { SelectComponents } from "react-select/src/components"; +import Select from "react-select"; +import { SelectComponents } from "react-select/dist/declarations/src/components"; import "./selector.scss"; export interface SelectorProps { @@ -17,7 +17,7 @@ export interface SelectorProps { label?: (item: T) => string; defaultValue?: SelectorValueType; value?: SelectorValueType; - components?: Partial>; + components?: Partial>; } export function Selector( @@ -69,15 +69,15 @@ export function Selector( [label, multiple, nameFromItems] ); - const defaultWrapper = useMemo(() => wrapper(defaultValue), [ - defaultValue, - wrapper, - ]); + const defaultWrapper = useMemo( + () => wrapper(defaultValue), + [defaultValue, wrapper] + ); const valueWrapper = useMemo(() => wrapper(value), [wrapper, value]); return ( - = 10} @@ -92,7 +92,7 @@ export function Selector( className={`custom-selector w-100 ${className ?? ""}`} classNamePrefix="selector" onFocus={onFocus} - onChange={(v) => { + onChange={(v: SelectorOption[]) => { if (onChange) { let res: T | T[] | null = null; if (isArray(v)) { @@ -106,6 +106,6 @@ export function Selector( onChange(res as any); } }} - > + > ); } diff --git a/libs/apscheduler/events.py b/libs/apscheduler/events.py index 890763eb6..016da03c5 100644 --- a/libs/apscheduler/events.py +++ b/libs/apscheduler/events.py @@ -3,7 +3,7 @@ __all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDUL 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES', - 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent') + 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent') EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 diff --git a/libs/apscheduler/executors/asyncio.py b/libs/apscheduler/executors/asyncio.py index 5139622d1..06fc7f968 100644 --- a/libs/apscheduler/executors/asyncio.py +++ b/libs/apscheduler/executors/asyncio.py @@ -3,12 +3,11 @@ from __future__ import absolute_import import sys from apscheduler.executors.base import BaseExecutor, run_job +from apscheduler.util import iscoroutinefunction_partial try: - from asyncio import iscoroutinefunction from apscheduler.executors.base_py3 import run_coroutine_job except ImportError: - from trollius import iscoroutinefunction run_coroutine_job = None @@ -46,7 +45,7 @@ class AsyncIOExecutor(BaseExecutor): else: self._run_job_success(job.id, events) - if iscoroutinefunction(job.func): + if iscoroutinefunction_partial(job.func): if run_coroutine_job is not None: coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) f = self._eventloop.create_task(coro) diff --git a/libs/apscheduler/executors/base_py3.py b/libs/apscheduler/executors/base_py3.py index 61abd8424..7111d2aec 100644 --- a/libs/apscheduler/executors/base_py3.py +++ b/libs/apscheduler/executors/base_py3.py @@ -1,5 +1,6 @@ import logging import sys +import traceback from datetime import datetime, timedelta from traceback import format_tb @@ -33,6 +34,7 @@ async def run_coroutine_job(job, jobstore_alias, run_times, logger_name): events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) + traceback.clear_frames(tb) else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) diff --git a/libs/apscheduler/executors/pool.py b/libs/apscheduler/executors/pool.py index 2f4ef455c..c85896ec2 100644 --- a/libs/apscheduler/executors/pool.py +++ b/libs/apscheduler/executors/pool.py @@ -3,6 +3,11 @@ import concurrent.futures from apscheduler.executors.base import BaseExecutor, run_job +try: + from concurrent.futures.process import BrokenProcessPool +except ImportError: + BrokenProcessPool = None + class BasePoolExecutor(BaseExecutor): @abstractmethod @@ -19,7 +24,13 @@ class BasePoolExecutor(BaseExecutor): else: self._run_job_success(job.id, f.result()) - f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) + try: + f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) + except BrokenProcessPool: + self._logger.warning('Process pool is broken; replacing pool with a fresh instance') + self._pool = self._pool.__class__(self._pool._max_workers) + f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) + f.add_done_callback(callback) def shutdown(self, wait=True): @@ -33,10 +44,13 @@ class ThreadPoolExecutor(BasePoolExecutor): Plugin alias: ``threadpool`` :param max_workers: the maximum number of spawned threads. + :param pool_kwargs: dict of keyword arguments to pass to the underlying + ThreadPoolExecutor constructor """ - def __init__(self, max_workers=10): - pool = concurrent.futures.ThreadPoolExecutor(int(max_workers)) + def __init__(self, max_workers=10, pool_kwargs=None): + pool_kwargs = pool_kwargs or {} + pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs) super(ThreadPoolExecutor, self).__init__(pool) @@ -47,8 +61,11 @@ class ProcessPoolExecutor(BasePoolExecutor): Plugin alias: ``processpool`` :param max_workers: the maximum number of spawned processes. + :param pool_kwargs: dict of keyword arguments to pass to the underlying + ProcessPoolExecutor constructor """ - def __init__(self, max_workers=10): - pool = concurrent.futures.ProcessPoolExecutor(int(max_workers)) + def __init__(self, max_workers=10, pool_kwargs=None): + pool_kwargs = pool_kwargs or {} + pool = concurrent.futures.ProcessPoolExecutor(int(max_workers), **pool_kwargs) super(ProcessPoolExecutor, self).__init__(pool) diff --git a/libs/apscheduler/executors/tornado.py b/libs/apscheduler/executors/tornado.py index a4696ce79..3b97eec92 100644 --- a/libs/apscheduler/executors/tornado.py +++ b/libs/apscheduler/executors/tornado.py @@ -8,10 +8,10 @@ from tornado.gen import convert_yielded from apscheduler.executors.base import BaseExecutor, run_job try: - from inspect import iscoroutinefunction from apscheduler.executors.base_py3 import run_coroutine_job + from apscheduler.util import iscoroutinefunction_partial except ImportError: - def iscoroutinefunction(func): + def iscoroutinefunction_partial(func): return False @@ -44,7 +44,7 @@ class TornadoExecutor(BaseExecutor): else: self._run_job_success(job.id, events) - if iscoroutinefunction(job.func): + if iscoroutinefunction_partial(job.func): f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) else: f = self.executor.submit(run_job, job, job._jobstore_alias, run_times, diff --git a/libs/apscheduler/job.py b/libs/apscheduler/job.py index 4e24bec7a..445d9a868 100644 --- a/libs/apscheduler/job.py +++ b/libs/apscheduler/job.py @@ -1,4 +1,3 @@ -from collections import Iterable, Mapping from inspect import ismethod, isclass from uuid import uuid4 @@ -9,6 +8,11 @@ from apscheduler.util import ( ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, convert_to_datetime) +try: + from collections.abc import Iterable, Mapping +except ImportError: + from collections import Iterable, Mapping + class Job(object): """ @@ -24,7 +28,7 @@ class Job(object): :var trigger: the trigger object that controls the schedule of this job :var str executor: the name of the executor that will run this job :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to - be late + be late (``None`` means "allow the job to run no matter how late it is") :var int max_instances: the maximum number of concurrently executing instances allowed for this job :var datetime.datetime next_run_time: the next scheduled run time of this job @@ -36,7 +40,7 @@ class Job(object): __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances', - 'next_run_time') + 'next_run_time', '__weakref__') def __init__(self, scheduler, id=None, **kwargs): super(Job, self).__init__() @@ -238,8 +242,9 @@ class Job(object): # Instance methods cannot survive serialization as-is, so store the "self" argument # explicitly - if ismethod(self.func) and not isclass(self.func.__self__): - args = (self.func.__self__,) + tuple(self.args) + func = self.func + if ismethod(func) and not isclass(func.__self__) and obj_to_ref(func) == self.func_ref: + args = (func.__self__,) + tuple(self.args) else: args = self.args diff --git a/libs/apscheduler/jobstores/mongodb.py b/libs/apscheduler/jobstores/mongodb.py index 7dbc3b127..ea3097ddc 100644 --- a/libs/apscheduler/jobstores/mongodb.py +++ b/libs/apscheduler/jobstores/mongodb.py @@ -54,7 +54,7 @@ class MongoDBJobStore(BaseJobStore): def start(self, scheduler, alias): super(MongoDBJobStore, self).start(scheduler, alias) - self.collection.ensure_index('next_run_time', sparse=True) + self.collection.create_index('next_run_time', sparse=True) @property def connection(self): @@ -83,7 +83,7 @@ class MongoDBJobStore(BaseJobStore): def add_job(self, job): try: - self.collection.insert({ + self.collection.insert_one({ '_id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) @@ -96,13 +96,13 @@ class MongoDBJobStore(BaseJobStore): 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } - result = self.collection.update({'_id': job.id}, {'$set': changes}) - if result and result['n'] == 0: + result = self.collection.update_one({'_id': job.id}, {'$set': changes}) + if result and result.matched_count == 0: raise JobLookupError(job.id) def remove_job(self, job_id): - result = self.collection.remove(job_id) - if result and result['n'] == 0: + result = self.collection.delete_one({'_id': job_id}) + if result and result.deleted_count == 0: raise JobLookupError(job_id) def remove_all_jobs(self): diff --git a/libs/apscheduler/jobstores/redis.py b/libs/apscheduler/jobstores/redis.py index 61f913e9e..5bb69d635 100644 --- a/libs/apscheduler/jobstores/redis.py +++ b/libs/apscheduler/jobstores/redis.py @@ -14,7 +14,7 @@ except ImportError: # pragma: nocover import pickle try: - from redis import StrictRedis + from redis import Redis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') @@ -47,7 +47,7 @@ class RedisJobStore(BaseJobStore): self.pickle_protocol = pickle_protocol self.jobs_key = jobs_key self.run_times_key = run_times_key - self.redis = StrictRedis(db=int(db), **connect_args) + self.redis = Redis(db=int(db), **connect_args) def lookup_job(self, job_id): job_state = self.redis.hget(self.jobs_key, job_id) @@ -81,7 +81,9 @@ class RedisJobStore(BaseJobStore): pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: - pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) + pipe.zadd(self.run_times_key, + {job.id: datetime_to_utc_timestamp(job.next_run_time)}) + pipe.execute() def update_job(self, job): @@ -92,9 +94,11 @@ class RedisJobStore(BaseJobStore): pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: - pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) + pipe.zadd(self.run_times_key, + {job.id: datetime_to_utc_timestamp(job.next_run_time)}) else: pipe.zrem(self.run_times_key, job.id) + pipe.execute() def remove_job(self, job_id): diff --git a/libs/apscheduler/jobstores/rethinkdb.py b/libs/apscheduler/jobstores/rethinkdb.py index 2185c6cc1..d8a78cde3 100644 --- a/libs/apscheduler/jobstores/rethinkdb.py +++ b/libs/apscheduler/jobstores/rethinkdb.py @@ -10,7 +10,7 @@ except ImportError: # pragma: nocover import pickle try: - import rethinkdb as r + from rethinkdb import RethinkDB except ImportError: # pragma: nocover raise ImportError('RethinkDBJobStore requires rethinkdb installed') @@ -40,10 +40,12 @@ class RethinkDBJobStore(BaseJobStore): raise ValueError('The "table" parameter must not be empty') self.database = database - self.table = table + self.table_name = table + self.table = None self.client = client self.pickle_protocol = pickle_protocol self.connect_args = connect_args + self.r = RethinkDB() self.conn = None def start(self, scheduler, alias): @@ -52,31 +54,31 @@ class RethinkDBJobStore(BaseJobStore): if self.client: self.conn = maybe_ref(self.client) else: - self.conn = r.connect(db=self.database, **self.connect_args) + self.conn = self.r.connect(db=self.database, **self.connect_args) - if self.database not in r.db_list().run(self.conn): - r.db_create(self.database).run(self.conn) + if self.database not in self.r.db_list().run(self.conn): + self.r.db_create(self.database).run(self.conn) - if self.table not in r.table_list().run(self.conn): - r.table_create(self.table).run(self.conn) + if self.table_name not in self.r.table_list().run(self.conn): + self.r.table_create(self.table_name).run(self.conn) - if 'next_run_time' not in r.table(self.table).index_list().run(self.conn): - r.table(self.table).index_create('next_run_time').run(self.conn) + if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn): + self.r.table(self.table_name).index_create('next_run_time').run(self.conn) - self.table = r.db(self.database).table(self.table) + self.table = self.r.db(self.database).table(self.table_name) def lookup_job(self, job_id): results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) return self._reconstitute_job(results[0]['job_state']) if results else None def get_due_jobs(self, now): - return self._get_jobs(r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) + return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) def get_next_run_time(self): results = list( self.table - .filter(r.row['next_run_time'] != None) # flake8: noqa - .order_by(r.asc('next_run_time')) + .filter(self.r.row['next_run_time'] != None) # noqa + .order_by(self.r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) @@ -92,7 +94,7 @@ class RethinkDBJobStore(BaseJobStore): job_dict = { 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) + 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.insert(job_dict).run(self.conn) if results['errors'] > 0: @@ -101,7 +103,7 @@ class RethinkDBJobStore(BaseJobStore): def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) + 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.get_all(job.id).update(changes).run(self.conn) skipped = False in map(lambda x: results[x] == 0, results.keys()) @@ -130,20 +132,20 @@ class RethinkDBJobStore(BaseJobStore): def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] - query = (self.table.filter(r.row['next_run_time'] != None).filter(predicate) if - predicate else self.table) + query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa + if predicate else self.table) query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) - except: + except Exception: self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: - r.expr(failed_job_ids).for_each( + self.r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) return jobs diff --git a/libs/apscheduler/jobstores/sqlalchemy.py b/libs/apscheduler/jobstores/sqlalchemy.py index beb27fb56..dcfd3e565 100644 --- a/libs/apscheduler/jobstores/sqlalchemy.py +++ b/libs/apscheduler/jobstores/sqlalchemy.py @@ -11,7 +11,7 @@ except ImportError: # pragma: nocover try: from sqlalchemy import ( - create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select) + create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select, and_) from sqlalchemy.exc import IntegrityError from sqlalchemy.sql.expression import null except ImportError: # pragma: nocover @@ -106,7 +106,7 @@ class SQLAlchemyJobStore(BaseJobStore): }).where(self.jobs_t.c.id == job.id) result = self.engine.execute(update) if result.rowcount == 0: - raise JobLookupError(id) + raise JobLookupError(job.id) def remove_job(self, job_id): delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) @@ -134,7 +134,7 @@ class SQLAlchemyJobStore(BaseJobStore): jobs = [] selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ order_by(self.jobs_t.c.next_run_time) - selectable = selectable.where(*conditions) if conditions else selectable + selectable = selectable.where(and_(*conditions)) if conditions else selectable failed_job_ids = set() for row in self.engine.execute(selectable): try: diff --git a/libs/apscheduler/jobstores/zookeeper.py b/libs/apscheduler/jobstores/zookeeper.py index 2cca83e8f..525306936 100644 --- a/libs/apscheduler/jobstores/zookeeper.py +++ b/libs/apscheduler/jobstores/zookeeper.py @@ -1,6 +1,5 @@ from __future__ import absolute_import -import os from datetime import datetime from pytz import utc @@ -65,7 +64,7 @@ class ZooKeeperJobStore(BaseJobStore): def lookup_job(self, job_id): self._ensure_paths() - node_path = os.path.join(self.path, job_id) + node_path = self.path + "/" + str(job_id) try: content, _ = self.client.get(node_path) doc = pickle.loads(content) @@ -92,7 +91,7 @@ class ZooKeeperJobStore(BaseJobStore): def add_job(self, job): self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) + node_path = self.path + "/" + str(job.id) value = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() @@ -105,7 +104,7 @@ class ZooKeeperJobStore(BaseJobStore): def update_job(self, job): self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) + node_path = self.path + "/" + str(job.id) changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() @@ -118,7 +117,7 @@ class ZooKeeperJobStore(BaseJobStore): def remove_job(self, job_id): self._ensure_paths() - node_path = os.path.join(self.path, str(job_id)) + node_path = self.path + "/" + str(job_id) try: self.client.delete(node_path) except NoNodeError: @@ -151,7 +150,7 @@ class ZooKeeperJobStore(BaseJobStore): all_ids = self.client.get_children(self.path) for node_name in all_ids: try: - node_path = os.path.join(self.path, node_name) + node_path = self.path + "/" + node_name content, _ = self.client.get(node_path) doc = pickle.loads(content) job_def = { diff --git a/libs/apscheduler/schedulers/asyncio.py b/libs/apscheduler/schedulers/asyncio.py index 289ef13fe..70ebedeb6 100644 --- a/libs/apscheduler/schedulers/asyncio.py +++ b/libs/apscheduler/schedulers/asyncio.py @@ -38,13 +38,19 @@ class AsyncIOScheduler(BaseScheduler): _eventloop = None _timeout = None + def start(self, paused=False): + if not self._eventloop: + self._eventloop = asyncio.get_event_loop() + + super(AsyncIOScheduler, self).start(paused) + @run_in_event_loop def shutdown(self, wait=True): super(AsyncIOScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): - self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop() + self._eventloop = maybe_ref(config.pop('event_loop', None)) super(AsyncIOScheduler, self)._configure(config) def _start_timer(self, wait_seconds): diff --git a/libs/apscheduler/schedulers/background.py b/libs/apscheduler/schedulers/background.py index 03f29822a..bb8f77da2 100644 --- a/libs/apscheduler/schedulers/background.py +++ b/libs/apscheduler/schedulers/background.py @@ -29,7 +29,9 @@ class BackgroundScheduler(BlockingScheduler): super(BackgroundScheduler, self)._configure(config) def start(self, *args, **kwargs): - self._event = Event() + if self._event is None or self._event.is_set(): + self._event = Event() + BaseScheduler.start(self, *args, **kwargs) self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.daemon = self._daemon diff --git a/libs/apscheduler/schedulers/base.py b/libs/apscheduler/schedulers/base.py index 8f910a653..3dfb74377 100644 --- a/libs/apscheduler/schedulers/base.py +++ b/libs/apscheduler/schedulers/base.py @@ -1,7 +1,6 @@ from __future__ import print_function from abc import ABCMeta, abstractmethod -from collections import MutableMapping from threading import RLock from datetime import datetime, timedelta from logging import getLogger @@ -27,6 +26,11 @@ from apscheduler.events import ( EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) +try: + from collections.abc import MutableMapping +except ImportError: + from collections import MutableMapping + #: constant indicating a scheduler's stopped state STATE_STOPPED = 0 #: constant indicating a scheduler's running state (started and processing jobs) @@ -82,6 +86,11 @@ class BaseScheduler(six.with_metaclass(ABCMeta)): self.state = STATE_STOPPED self.configure(gconfig, **options) + def __getstate__(self): + raise TypeError("Schedulers cannot be serialized. Ensure that you are not passing a " + "scheduler instance as an argument to a job, or scheduling an instance " + "method where the instance contains a scheduler as an attribute.") + def configure(self, gconfig={}, prefix='apscheduler.', **options): """ Reconfigures the scheduler with the given options. @@ -398,7 +407,7 @@ class BaseScheduler(six.with_metaclass(ABCMeta)): :param str|unicode id: explicit identifier for the job (for modifying it later) :param str|unicode name: textual description of the job :param int misfire_grace_time: seconds after the designated runtime that the job is still - allowed to be run + allowed to be run (or ``None`` to allow the job to run no matter how late it is) :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession :param int max_instances: maximum number of concurrently running instances allowed for this @@ -594,14 +603,13 @@ class BaseScheduler(six.with_metaclass(ABCMeta)): """ jobstore_alias = None with self._jobstores_lock: + # Check if the job is among the pending jobs if self.state == STATE_STOPPED: - # Check if the job is among the pending jobs - if self.state == STATE_STOPPED: - for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): - if job.id == job_id and jobstore in (None, alias): - del self._pending_jobs[i] - jobstore_alias = alias - break + for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): + if job.id == job_id and jobstore in (None, alias): + del self._pending_jobs[i] + jobstore_alias = alias + break else: # Otherwise, try to remove it from each store until it succeeds or we run out of # stores to check diff --git a/libs/apscheduler/schedulers/blocking.py b/libs/apscheduler/schedulers/blocking.py index e61715757..4ecc9f6f1 100644 --- a/libs/apscheduler/schedulers/blocking.py +++ b/libs/apscheduler/schedulers/blocking.py @@ -14,7 +14,9 @@ class BlockingScheduler(BaseScheduler): _event = None def start(self, *args, **kwargs): - self._event = Event() + if self._event is None or self._event.is_set(): + self._event = Event() + super(BlockingScheduler, self).start(*args, **kwargs) self._main_loop() diff --git a/libs/apscheduler/schedulers/qt.py b/libs/apscheduler/schedulers/qt.py index 6ee5d332a..dda77d796 100644 --- a/libs/apscheduler/schedulers/qt.py +++ b/libs/apscheduler/schedulers/qt.py @@ -9,9 +9,13 @@ except (ImportError, RuntimeError): # pragma: nocover from PyQt4.QtCore import QObject, QTimer except ImportError: try: - from PySide.QtCore import QObject, QTimer # flake8: noqa + from PySide2.QtCore import QObject, QTimer # noqa except ImportError: - raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed') + try: + from PySide.QtCore import QObject, QTimer # noqa + except ImportError: + raise ImportError('QtScheduler requires either PyQt5, PyQt4, PySide2 ' + 'or PySide installed') class QtScheduler(BaseScheduler): @@ -26,7 +30,8 @@ class QtScheduler(BaseScheduler): def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: - self._timer = QTimer.singleShot(wait_seconds * 1000, self._process_jobs) + wait_time = min(wait_seconds * 1000, 2147483647) + self._timer = QTimer.singleShot(wait_time, self._process_jobs) def _stop_timer(self): if self._timer: diff --git a/libs/apscheduler/triggers/base.py b/libs/apscheduler/triggers/base.py index ce2526a88..55d010dba 100644 --- a/libs/apscheduler/triggers/base.py +++ b/libs/apscheduler/triggers/base.py @@ -22,27 +22,16 @@ class BaseTrigger(six.with_metaclass(ABCMeta)): def _apply_jitter(self, next_fire_time, jitter, now): """ - Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the - resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter. - - ``next_fire_time - jitter <= result <= next_fire_time + jitter`` + Randomize ``next_fire_time`` by adding a random value (the jitter). :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If ``None``, returns ``None``. - :param int|None jitter: maximum number of seconds to add or subtract to - ``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time`` + :param int|None jitter: maximum number of seconds to add to ``next_fire_time`` + (if ``None`` or ``0``, returns ``next_fire_time``) :param datetime.datetime now: current datetime :return datetime.datetime|None: next fire time with a jitter. """ if next_fire_time is None or not jitter: return next_fire_time - next_fire_time_with_jitter = next_fire_time + timedelta( - seconds=random.uniform(-jitter, jitter)) - - if next_fire_time_with_jitter < now: - # Next fire time with jitter is in the past. - # Ignore jitter to avoid false misfire. - return next_fire_time - - return next_fire_time_with_jitter + return next_fire_time + timedelta(seconds=random.uniform(0, jitter)) diff --git a/libs/apscheduler/triggers/combining.py b/libs/apscheduler/triggers/combining.py index 64f83011a..bb9000618 100644 --- a/libs/apscheduler/triggers/combining.py +++ b/libs/apscheduler/triggers/combining.py @@ -45,7 +45,7 @@ class AndTrigger(BaseCombiningTrigger): Trigger alias: ``and`` :param list triggers: triggers to combine - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. + :param int|None jitter: delay the job execution by ``jitter`` seconds at most """ __slots__ = () @@ -73,7 +73,7 @@ class OrTrigger(BaseCombiningTrigger): Trigger alias: ``or`` :param list triggers: triggers to combine - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. + :param int|None jitter: delay the job execution by ``jitter`` seconds at most .. note:: Triggers that depends on the previous fire time, such as the interval trigger, may seem to behave strangely since they are always passed the previous fire time produced by diff --git a/libs/apscheduler/triggers/cron/__init__.py b/libs/apscheduler/triggers/cron/__init__.py index ce675dd93..fec6e3b5c 100644 --- a/libs/apscheduler/triggers/cron/__init__.py +++ b/libs/apscheduler/triggers/cron/__init__.py @@ -16,7 +16,7 @@ class CronTrigger(BaseTrigger): :param int|str year: 4-digit year :param int|str month: month (1-12) - :param int|str day: day of the (1-31) + :param int|str day: day of month (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) @@ -26,7 +26,7 @@ class CronTrigger(BaseTrigger): :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. + :param int|None jitter: delay the job execution by ``jitter`` seconds at most .. note:: The first weekday is always **monday**. """ diff --git a/libs/apscheduler/triggers/interval.py b/libs/apscheduler/triggers/interval.py index 831ba3830..61094aa13 100644 --- a/libs/apscheduler/triggers/interval.py +++ b/libs/apscheduler/triggers/interval.py @@ -20,7 +20,7 @@ class IntervalTrigger(BaseTrigger): :param datetime|str start_date: starting point for the interval calculation :param datetime|str end_date: latest possible date/time to trigger on :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. + :param int|None jitter: delay the job execution by ``jitter`` seconds at most """ __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter' diff --git a/libs/apscheduler/util.py b/libs/apscheduler/util.py index 3c48e550b..1e643bffa 100644 --- a/libs/apscheduler/util.py +++ b/libs/apscheduler/util.py @@ -5,8 +5,9 @@ from __future__ import division from datetime import date, datetime, time, timedelta, tzinfo from calendar import timegm from functools import partial -from inspect import isclass +from inspect import isclass, ismethod import re +import sys from pytz import timezone, utc, FixedOffset import six @@ -21,6 +22,15 @@ try: except ImportError: TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows +try: + from asyncio import iscoroutinefunction +except ImportError: + try: + from trollius import iscoroutinefunction + except ImportError: + def iscoroutinefunction(func): + return False + __all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args', @@ -263,7 +273,18 @@ def obj_to_ref(obj): if '' in name: raise ValueError('Cannot create a reference to a nested function') - return '%s:%s' % (obj.__module__, name) + if ismethod(obj): + if hasattr(obj, 'im_self') and obj.im_self: + # bound method + module = obj.im_self.__module__ + elif hasattr(obj, 'im_class') and obj.im_class: + # unbound method + module = obj.im_class.__module__ + else: + module = obj.__module__ + else: + module = obj.__module__ + return '%s:%s' % (module, name) def ref_to_obj(ref): @@ -332,7 +353,10 @@ def check_callable_args(func, args, kwargs): has_varargs = has_var_kwargs = False try: - sig = signature(func) + if sys.version_info >= (3, 5): + sig = signature(func, follow_wrapped=False) + else: + sig = signature(func) except ValueError: # signature() doesn't work against every kind of callable return @@ -398,3 +422,12 @@ def check_callable_args(func, args, kwargs): raise ValueError( 'The target callable does not accept the following keyword arguments: %s' % ', '.join(unmatched_kwargs)) + + +def iscoroutinefunction_partial(f): + while isinstance(f, partial): + f = f.func + + # The asyncio version of iscoroutinefunction includes testing for @coroutine + # decorations vs. the inspect version which does not. + return iscoroutinefunction(f) diff --git a/libs/signalr/__init__.py b/libs/signalr/__init__.py index 3d155c5c6..7742eeb58 100644 --- a/libs/signalr/__init__.py +++ b/libs/signalr/__init__.py @@ -1,8 +1,3 @@ -from gevent import monkey - -monkey.patch_socket() -monkey.patch_ssl() - from ._connection import Connection -__version__ = '0.0.7' +__version__ = '0.0.12' diff --git a/libs/signalr/_connection.py b/libs/signalr/_connection.py index 377606f99..6471ba670 100644 --- a/libs/signalr/_connection.py +++ b/libs/signalr/_connection.py @@ -1,6 +1,6 @@ import json -import gevent import sys +from threading import Thread from signalr.events import EventHook from signalr.hubs import Hub from signalr.transports import AutoTransport @@ -15,14 +15,16 @@ class Connection: self.qs = {} self.__send_counter = -1 self.token = None + self.id = None self.data = None self.received = EventHook() self.error = EventHook() self.starting = EventHook() self.stopping = EventHook() self.exception = EventHook() + self.is_open = False self.__transport = AutoTransport(session, self) - self.__greenlet = None + self.__listener_thread = None self.started = False def handle_error(**kwargs): @@ -48,27 +50,32 @@ class Connection: negotiate_data = self.__transport.negotiate() self.token = negotiate_data['ConnectionToken'] + self.id = negotiate_data['ConnectionId'] listener = self.__transport.start() def wrapped_listener(): - try: - listener() - gevent.sleep() - except: - self.exception.fire(*sys.exc_info()) + while self.is_open: + try: + listener() + except: + self.exception.fire(*sys.exc_info()) + self.is_open = False - self.__greenlet = gevent.spawn(wrapped_listener) + self.is_open = True + self.__listener_thread = Thread(target=wrapped_listener) + self.__listener_thread.start() self.started = True def wait(self, timeout=30): - gevent.joinall([self.__greenlet], timeout) + Thread.join(self.__listener_thread, timeout) def send(self, data): self.__transport.send(data) def close(self): - gevent.kill(self.__greenlet) + self.is_open = False + self.__listener_thread.join() self.__transport.close() def register_hub(self, name): diff --git a/libs/signalr/transports/_sse_transport.py b/libs/signalr/transports/_sse_transport.py index 63d978643..7faaf936a 100644 --- a/libs/signalr/transports/_sse_transport.py +++ b/libs/signalr/transports/_sse_transport.py @@ -12,11 +12,16 @@ class ServerSentEventsTransport(Transport): return 'serverSentEvents' def start(self): - self.__response = sseclient.SSEClient(self._get_url('connect'), session=self._session) + connect_url = self._get_url('connect') + self.__response = iter(sseclient.SSEClient(connect_url, session=self._session)) self._session.get(self._get_url('start')) def _receive(): - for notification in self.__response: + try: + notification = next(self.__response) + except StopIteration: + return + else: if notification.data != 'initialized': self._handle_notification(notification.data) diff --git a/libs/signalr/transports/_transport.py b/libs/signalr/transports/_transport.py index c0d0d4278..af62672fd 100644 --- a/libs/signalr/transports/_transport.py +++ b/libs/signalr/transports/_transport.py @@ -1,13 +1,12 @@ from abc import abstractmethod import json import sys - +import threading if sys.version_info[0] < 3: from urllib import quote_plus else: from urllib.parse import quote_plus -import gevent class Transport: @@ -48,7 +47,7 @@ class Transport: if len(message) > 0: data = json.loads(message) self._connection.received.fire(**data) - gevent.sleep() + #thread.sleep() #TODO: investigate if we should sleep here def _get_url(self, action, **kwargs): args = kwargs.copy() diff --git a/libs/signalr/transports/_ws_transport.py b/libs/signalr/transports/_ws_transport.py index 14fefa6cc..4d9a80ad1 100644 --- a/libs/signalr/transports/_ws_transport.py +++ b/libs/signalr/transports/_ws_transport.py @@ -1,7 +1,6 @@ import json import sys -import gevent if sys.version_info[0] < 3: from urlparse import urlparse, urlunparse @@ -39,14 +38,14 @@ class WebSocketsTransport(Transport): self._session.get(self._get_url('start')) def _receive(): - for notification in self.ws: - self._handle_notification(notification) + notification = self.ws.recv() + self._handle_notification(notification) return _receive def send(self, data): self.ws.send(json.dumps(data)) - gevent.sleep() + #thread.sleep() #TODO: inveistage if we should sleep here or not def close(self): self.ws.close() diff --git a/libs/subliminal/score.py b/libs/subliminal/score.py index 749774ffd..229e2f3a1 100644 --- a/libs/subliminal/score.py +++ b/libs/subliminal/score.py @@ -45,7 +45,7 @@ movie_scores = {'hash': 119, 'title': 60, 'year': 30, 'release_group': 15, 'source': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1} #: Equivalent release groups -equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'}) +equivalent_release_groups = ({'FraMeSToR', 'W4NK3R', 'BHDStudio'}, {'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'}) def get_equivalent_release_groups(release_group): diff --git a/libs/subliminal_patch/core.py b/libs/subliminal_patch/core.py index 4025bce90..b6629526a 100644 --- a/libs/subliminal_patch/core.py +++ b/libs/subliminal_patch/core.py @@ -21,6 +21,7 @@ from bs4 import UnicodeDammit from babelfish import LanguageReverseError from guessit.jsonutils import GuessitEncoder from subliminal import ProviderError, refiner_manager +from concurrent.futures import as_completed from .extensions import provider_registry from subliminal.exceptions import ServiceUnavailable, DownloadLimitExceeded @@ -427,6 +428,58 @@ class SZProviderPool(ProviderPool): return downloaded_subtitles + def list_supported_languages(self): + """List supported languages. + + :return: languages supported by the providers. + :rtype: list of dicts + + """ + languages = [] + + for name in self.providers: + # list supported languages for a single provider + try: + provider_languages = self[name].languages + except AttributeError: + logger.exception(f"{name} provider doesn't have a languages attribute") + continue + + if provider_languages is None: + logger.info(f"Skipping provider {name} because it doesn't support any languages.") + continue + + # add the languages for this provider + languages.append({'provider': name, 'languages': provider_languages}) + + return languages + + def list_supported_video_types(self): + """List supported video types. + + :return: video types supported by the providers. + :rtype: tuple of video types + + """ + video_types = [] + + for name in self.providers: + # list supported video types for a single provider + try: + provider_video_type = self[name].video_types + except AttributeError: + logger.exception(f"{name} provider doesn't have a video_types method") + continue + + if provider_video_type is None: + logger.info(f"Skipping provider {name} because it doesn't support any video type.") + continue + + # add the video types for this provider + video_types.append({'provider': name, 'video_types': provider_video_type}) + + return video_types + class SZAsyncProviderPool(SZProviderPool): """Subclass of :class:`ProviderPool` with asynchronous support for :meth:`~ProviderPool.list_subtitles`. @@ -474,6 +527,65 @@ class SZAsyncProviderPool(SZProviderPool): return subtitles + def list_supported_languages(self): + """List supported languages asynchronously. + + :return: languages supported by the providers. + :rtype: list of dicts + + """ + languages = [] + + def get_providers_languages(provider_name): + provider_languages = None + try: + provider_languages = {'provider': provider_name, 'languages': self[provider_name].languages} + except AttributeError: + logger.exception(f"{provider_name} provider doesn't have a languages attribute") + + return provider_languages + + with ThreadPoolExecutor(self.max_workers) as executor: + for future in as_completed([executor.submit(get_providers_languages, x) for x in self.providers]): + provider_languages = future.result() + if provider_languages is None: + continue + + # add the languages for this provider + languages.append(provider_languages) + + return languages + + def list_supported_video_types(self): + """List supported video types asynchronously. + + :return: video types supported by the providers. + :rtype: tuple of video types + + """ + video_types = [] + + def get_providers_video_types(provider_name): + provider_video_types = None + try: + provider_video_types = {'provider': provider_name, + 'video_types': self[provider_name].video_types} + except AttributeError: + logger.exception(f"{provider_name} provider doesn't have a video_types attribute") + + return provider_video_types + + with ThreadPoolExecutor(self.max_workers) as executor: + for future in as_completed([executor.submit(get_providers_video_types, x) for x in self.providers]): + provider_video_types = future.result() + if provider_video_types is None: + continue + + # add the languages for this provider + video_types.append(provider_video_types) + + return video_types + if is_windows_special_path: SZAsyncProviderPool = SZProviderPool @@ -758,6 +870,16 @@ def list_all_subtitles(videos, languages, **kwargs): return listed_subtitles +def list_supported_languages(pool_class, **kwargs): + with pool_class(**kwargs) as pool: + return pool.list_supported_languages() + + +def list_supported_video_types(pool_class, **kwargs): + with pool_class(**kwargs) as pool: + return pool.list_supported_video_types() + + def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs): """Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`. diff --git a/libs/subliminal_patch/providers/assrt.py b/libs/subliminal_patch/providers/assrt.py index e8e6a499f..a1f7caba3 100644 --- a/libs/subliminal_patch/providers/assrt.py +++ b/libs/subliminal_patch/providers/assrt.py @@ -109,6 +109,7 @@ class AssrtSubtitle(Subtitle): class AssrtProvider(Provider): """Assrt Provider.""" languages = {Language(*l) for l in supported_languages} + video_types = (Episode, Movie) def __init__(self, token=None): if not token: diff --git a/libs/subliminal_patch/providers/bsplayer.py b/libs/subliminal_patch/providers/bsplayer.py index f062e5cf8..9b44089fe 100644 --- a/libs/subliminal_patch/providers/bsplayer.py +++ b/libs/subliminal_patch/providers/bsplayer.py @@ -72,6 +72,7 @@ class BSPlayerProvider(Provider): 'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por', 'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho' ]} + video_types = (Episode, Movie) SEARCH_THROTTLE = 8 hash_verifiable = True # fmt: on diff --git a/libs/subliminal_patch/providers/greeksubs.py b/libs/subliminal_patch/providers/greeksubs.py index bec0749c6..9b0e8074a 100644 --- a/libs/subliminal_patch/providers/greeksubs.py +++ b/libs/subliminal_patch/providers/greeksubs.py @@ -54,6 +54,7 @@ class GreekSubsSubtitle(Subtitle): class GreekSubsProvider(Provider): """GreekSubs Provider.""" languages = {Language('ell')} + video_types = (Episode, Movie) server_url = 'https://greeksubs.net/' subtitle_class = GreekSubsSubtitle diff --git a/libs/subliminal_patch/providers/greeksubtitles.py b/libs/subliminal_patch/providers/greeksubtitles.py index fbda6ab12..7ee26efce 100644 --- a/libs/subliminal_patch/providers/greeksubtitles.py +++ b/libs/subliminal_patch/providers/greeksubtitles.py @@ -52,6 +52,7 @@ class GreekSubtitlesSubtitle(Subtitle): class GreekSubtitlesProvider(Provider): """GreekSubtitles Provider.""" languages = {Language(l) for l in ['ell', 'eng']} + video_types = (Episode, Movie) server_url = 'http://gr.greek-subtitles.com/' search_url = 'search.php?name={}' download_url = 'http://www.greeksubtitles.info/getp.php?id={:d}' diff --git a/libs/subliminal_patch/providers/ktuvit.py b/libs/subliminal_patch/providers/ktuvit.py index bdf37bfc4..56c713c63 100644 --- a/libs/subliminal_patch/providers/ktuvit.py +++ b/libs/subliminal_patch/providers/ktuvit.py @@ -110,6 +110,7 @@ class KtuvitProvider(Provider): """Ktuvit Provider.""" languages = {Language(l) for l in ["heb"]} + video_types = (Episode, Movie) server_url = "https://www.ktuvit.me/" sign_in_url = "Services/MembershipService.svc/Login" search_url = "Services/ContentProvider.svc/SearchPage_search" diff --git a/libs/subliminal_patch/providers/legendasdivx.py b/libs/subliminal_patch/providers/legendasdivx.py index 8b535f59d..d72b762b4 100644 --- a/libs/subliminal_patch/providers/legendasdivx.py +++ b/libs/subliminal_patch/providers/legendasdivx.py @@ -121,6 +121,7 @@ class LegendasdivxSubtitle(Subtitle): class LegendasdivxProvider(Provider): """Legendasdivx Provider.""" languages = {Language('por', 'BR')} | {Language('por')} + video_types = (Episode, Movie) SEARCH_THROTTLE = 8 site = 'https://www.legendasdivx.pt' headers = { @@ -272,7 +273,7 @@ class LegendasdivxProvider(Provider): querytext = video.imdb_id if video.imdb_id else video.title if isinstance(video, Episode): - querytext = '{} S{:02d}E{:02d}'.format(video.series, video.season, video.episode) + querytext = '%22{}%20S{:02d}E{:02d}%22'.format(video.series, video.season, video.episode) querytext = quote(querytext.lower()) # language query filter @@ -430,13 +431,16 @@ class LegendasdivxProvider(Provider): _guess = guessit(name) if isinstance(subtitle.video, Episode): - logger.debug("Legendasdivx.pt :: guessing %s", name) - logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode) + if all(key in _guess for key in ('season', 'episode')): + logger.debug("Legendasdivx.pt :: guessing %s", name) + logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode) - if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']: - logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping') + if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']: + logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping') + continue + else: + logger.debug('Legendasdivx.pt :: no "season" and/or "episode" on "_guess" , skipping') continue - matches = set() matches |= guess_matches(subtitle.video, _guess) logger.debug('Legendasdivx.pt :: sub matches: %s', matches) diff --git a/libs/subliminal_patch/providers/legendastv.py b/libs/subliminal_patch/providers/legendastv.py index 43db667a7..638f332fb 100644 --- a/libs/subliminal_patch/providers/legendastv.py +++ b/libs/subliminal_patch/providers/legendastv.py @@ -68,6 +68,7 @@ class LegendasTVSubtitle(_LegendasTVSubtitle): class LegendasTVProvider(_LegendasTVProvider): languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()} + video_types = (Episode, Movie) subtitle_class = LegendasTVSubtitle def __init__(self, username=None, password=None, featured_only=False): diff --git a/libs/subliminal_patch/providers/napiprojekt.py b/libs/subliminal_patch/providers/napiprojekt.py index 119892b0a..647e8f196 100644 --- a/libs/subliminal_patch/providers/napiprojekt.py +++ b/libs/subliminal_patch/providers/napiprojekt.py @@ -5,6 +5,7 @@ import logging from subliminal.providers.napiprojekt import NapiProjektProvider as _NapiProjektProvider, \ NapiProjektSubtitle as _NapiProjektSubtitle, get_subhash from subzero.language import Language +from subliminal.video import Episode, Movie logger = logging.getLogger(__name__) @@ -21,6 +22,7 @@ class NapiProjektSubtitle(_NapiProjektSubtitle): class NapiProjektProvider(_NapiProjektProvider): languages = {Language.fromalpha2(l) for l in ['pl']} + video_types = (Episode, Movie) subtitle_class = NapiProjektSubtitle def query(self, language, hash): diff --git a/libs/subliminal_patch/providers/napisy24.py b/libs/subliminal_patch/providers/napisy24.py index acbc2bba9..337c1fb87 100644 --- a/libs/subliminal_patch/providers/napisy24.py +++ b/libs/subliminal_patch/providers/napisy24.py @@ -12,6 +12,7 @@ from subliminal import __short_version__ from subliminal.exceptions import AuthenticationError, ConfigurationError from subliminal.subtitle import fix_line_ending from subzero.language import Language +from subliminal.video import Episode, Movie logger = logging.getLogger(__name__) @@ -47,6 +48,7 @@ class Napisy24Subtitle(Subtitle): class Napisy24Provider(Provider): '''Napisy24 Provider.''' languages = {Language(l) for l in ['pol']} + video_types = (Episode, Movie) required_hash = 'napisy24' api_url = 'http://napisy24.pl/run/CheckSubAgent.php' diff --git a/libs/subliminal_patch/providers/nekur.py b/libs/subliminal_patch/providers/nekur.py index 112911266..096c34abd 100644 --- a/libs/subliminal_patch/providers/nekur.py +++ b/libs/subliminal_patch/providers/nekur.py @@ -104,6 +104,7 @@ class NekurProvider(Provider, ProviderSubtitleArchiveMixin): """Nekur Provider.""" subtitle_class = NekurSubtitle languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']} + video_types = (Movie,) server_url = 'http://subtitri.nekur.net/' search_url = server_url + 'modules/Subtitles.php' diff --git a/libs/subliminal_patch/providers/opensubtitles.py b/libs/subliminal_patch/providers/opensubtitles.py index 0152b37d6..cfa144670 100644 --- a/libs/subliminal_patch/providers/opensubtitles.py +++ b/libs/subliminal_patch/providers/opensubtitles.py @@ -140,6 +140,8 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider): languages.update(set(Language.rebuild(l, forced=True) for l in languages)) languages.update(set(Language.rebuild(l, hi=True) for l in languages)) + video_types = (Episode, Movie) + def __init__(self, username=None, password=None, use_tag_search=False, only_foreign=False, also_foreign=False, skip_wrong_fps=True, is_vip=False, use_ssl=True, timeout=15): if any((username, password)) and not all((username, password)): diff --git a/libs/subliminal_patch/providers/opensubtitlescom.py b/libs/subliminal_patch/providers/opensubtitlescom.py index c97f765d1..f5ee0f8e9 100644 --- a/libs/subliminal_patch/providers/opensubtitlescom.py +++ b/libs/subliminal_patch/providers/opensubtitlescom.py @@ -51,7 +51,7 @@ class OpenSubtitlesComSubtitle(Subtitle): hash_verifiable = False def __init__(self, language, forced, hearing_impaired, page_link, file_id, releases, uploader, title, year, - hash_matched, hash=None, season=None, episode=None): + hash_matched, file_hash=None, season=None, episode=None): language = Language.rebuild(language, hi=hearing_impaired, forced=forced) self.title = title @@ -68,7 +68,7 @@ class OpenSubtitlesComSubtitle(Subtitle): self.download_link = None self.uploader = uploader self.matches = None - self.hash = hash + self.hash = file_hash self.encoding = 'utf-8' self.hash_matched = hash_matched @@ -123,8 +123,10 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider): """OpenSubtitlesCom Provider""" server_url = 'https://api.opensubtitles.com/api/v1/' - languages = {Language.fromopensubtitles(l) for l in language_converters['szopensubtitles'].codes} - languages.update(set(Language.rebuild(l, forced=True) for l in languages)) + languages = {Language.fromopensubtitles(lang) for lang in language_converters['szopensubtitles'].codes} + languages.update(set(Language.rebuild(lang, forced=True) for lang in languages)) + + video_types = (Episode, Movie) def __init__(self, username=None, password=None, use_hash=True, api_key=None): if not all((username, password)): @@ -183,26 +185,16 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider): @staticmethod def sanitize_external_ids(external_id): if isinstance(external_id, str): - external_id = external_id.lower().lstrip('tt') + external_id = external_id.lower().lstrip('tt').lstrip('0') sanitized_id = external_id[:-1].lstrip('0') + external_id[-1] return int(sanitized_id) @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) def search_titles(self, title): title_id = None - imdb_id = None - if isinstance(self.video, Episode) and self.video.series_imdb_id: - imdb_id = self.sanitize_external_ids(self.video.series_imdb_id) - elif isinstance(self.video, Movie) and self.video.imdb_id: - imdb_id = self.sanitize_external_ids(self.video.imdb_id) - - if imdb_id: - parameters = {'imdb_id': imdb_id} - logging.debug('Searching using this IMDB id: {}'.format(imdb_id)) - else: - parameters = {'query': title.lower()} - logging.debug('Searching using this title: {}'.format(title)) + parameters = {'query': title.lower()} + logging.debug('Searching using this title: {}'.format(title)) results = self.session.get(self.server_url + 'features', params=parameters, timeout=30) @@ -230,10 +222,19 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider): else: # loop over results for result in results_dict: - if fix_tv_naming(title).lower() == result['attributes']['title'].lower() and \ - (not self.video.year or self.video.year == int(result['attributes']['year'])): - title_id = result['id'] - break + if 'title' in result['attributes']: + if isinstance(self.video, Episode): + if fix_tv_naming(title).lower() == result['attributes']['title'].lower() and \ + (not self.video.year or self.video.year == int(result['attributes']['year'])): + title_id = result['id'] + break + else: + if fix_movie_naming(title).lower() == result['attributes']['title'].lower() and \ + (not self.video.year or self.video.year == int(result['attributes']['year'])): + title_id = result['id'] + break + else: + continue if title_id: logging.debug('Found this title ID: {}'.format(title_id)) @@ -245,19 +246,28 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider): def query(self, languages, video): self.video = video if self.use_hash: - hash = self.video.hashes.get('opensubtitlescom') + file_hash = self.video.hashes.get('opensubtitlescom') logging.debug('Searching using this hash: {}'.format(hash)) else: - hash = None + file_hash = None if isinstance(self.video, Episode): title = self.video.series else: title = self.video.title - title_id = self.search_titles(title) - if not title_id: - return [] + imdb_id = None + if isinstance(self.video, Episode) and self.video.series_imdb_id: + imdb_id = self.sanitize_external_ids(self.video.series_imdb_id) + elif isinstance(self.video, Movie) and self.video.imdb_id: + imdb_id = self.sanitize_external_ids(self.video.imdb_id) + + title_id = None + if not imdb_id: + title_id = self.search_titles(title) + if not title_id: + return [] + lang_strings = [str(lang.basename) for lang in languages] only_foreign = all([lang.forced for lang in languages]) also_foreign = any([lang.forced for lang in languages]) @@ -277,17 +287,17 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider): params=(('episode_number', self.video.episode), ('foreign_parts_only', forced), ('languages', langs.lower()), - ('moviehash', hash), - ('parent_feature_id', title_id), + ('moviehash', file_hash), + ('parent_feature_id', title_id) if title_id else ('imdb_id', imdb_id), ('season_number', self.video.season), ('query', os.path.basename(self.video.name))), timeout=30) else: res = self.session.get(self.server_url + 'subtitles', params=(('foreign_parts_only', forced), - ('id', title_id), + ('id', title_id) if title_id else ('imdb_id', imdb_id), ('languages', langs.lower()), - ('moviehash', hash), + ('moviehash', file_hash), ('query', os.path.basename(self.video.name))), timeout=30) diff --git a/libs/subliminal_patch/providers/podnapisi.py b/libs/subliminal_patch/providers/podnapisi.py index 9952240df..5aafaa8fe 100644 --- a/libs/subliminal_patch/providers/podnapisi.py +++ b/libs/subliminal_patch/providers/podnapisi.py @@ -27,8 +27,7 @@ except ImportError: except ImportError: import xml.etree.ElementTree as etree from babelfish import language_converters -from subliminal import Episode -from subliminal import Movie +from subliminal.video import Episode, Movie from subliminal.providers.podnapisi import PodnapisiProvider as _PodnapisiProvider, \ PodnapisiSubtitle as _PodnapisiSubtitle from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming @@ -130,6 +129,8 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin): languages.update(set(Language.rebuild(l, forced=True) for l in languages)) languages.update(set(Language.rebuild(l, hi=True) for l in languages)) + video_types = (Episode, Movie) + server_url = 'https://podnapisi.net/subtitles/' only_foreign = False also_foreign = False diff --git a/libs/subliminal_patch/providers/regielive.py b/libs/subliminal_patch/providers/regielive.py index 16bbdf2b0..91853f790 100644 --- a/libs/subliminal_patch/providers/regielive.py +++ b/libs/subliminal_patch/providers/regielive.py @@ -65,6 +65,7 @@ class RegieLiveProvider(Provider): """RegieLive Provider.""" languages = {Language(l) for l in ['ron']} language = list(languages)[0] + video_types = (Episode, Movie) SEARCH_THROTTLE = 8 def __init__(self): diff --git a/libs/subliminal_patch/providers/shooter.py b/libs/subliminal_patch/providers/shooter.py index 8b81856df..a20d9bfb4 100644 --- a/libs/subliminal_patch/providers/shooter.py +++ b/libs/subliminal_patch/providers/shooter.py @@ -2,6 +2,7 @@ from __future__ import absolute_import from subliminal.providers.shooter import ShooterProvider as _ShooterProvider, ShooterSubtitle as _ShooterSubtitle +from subliminal.video import Episode, Movie class ShooterSubtitle(_ShooterSubtitle): @@ -13,4 +14,4 @@ class ShooterSubtitle(_ShooterSubtitle): class ShooterProvider(_ShooterProvider): subtitle_class = ShooterSubtitle - + video_types = (Episode, Movie) diff --git a/libs/subliminal_patch/providers/soustitreseu.py b/libs/subliminal_patch/providers/soustitreseu.py index a26600ba3..4c7ca7d8e 100644 --- a/libs/subliminal_patch/providers/soustitreseu.py +++ b/libs/subliminal_patch/providers/soustitreseu.py @@ -102,6 +102,7 @@ class SoustitreseuProvider(Provider, ProviderSubtitleArchiveMixin): """Sous-Titres.eu Provider.""" subtitle_class = SoustitreseuSubtitle languages = {Language(l) for l in ['fra', 'eng']} + video_types = (Episode, Movie) server_url = 'https://www.sous-titres.eu/' search_url = server_url + 'search.html' diff --git a/libs/subliminal_patch/providers/subdivx.py b/libs/subliminal_patch/providers/subdivx.py index 6bdff363b..181b44118 100644 --- a/libs/subliminal_patch/providers/subdivx.py +++ b/libs/subliminal_patch/providers/subdivx.py @@ -83,6 +83,7 @@ class SubdivxSubtitlesProvider(Provider): provider_name = "subdivx" hash_verifiable = False languages = {Language("spa", "MX")} | {Language.fromalpha2("es")} + video_types = (Episode, Movie) subtitle_class = SubdivxSubtitle server_url = "https://www.subdivx.com/" diff --git a/libs/subliminal_patch/providers/subscene.py b/libs/subliminal_patch/providers/subscene.py index ad3e3b748..42f0221b0 100644 --- a/libs/subliminal_patch/providers/subscene.py +++ b/libs/subliminal_patch/providers/subscene.py @@ -21,6 +21,7 @@ from babelfish import language_converters from guessit import guessit from dogpile.cache.api import NO_VALUE from subliminal import Episode, ProviderError +from subliminal.video import Episode, Movie from subliminal.exceptions import ConfigurationError, ServiceUnavailable from subliminal.utils import sanitize_release_group from subliminal.cache import region @@ -124,7 +125,7 @@ class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin): languages = supported_languages languages.update(set(Language.rebuild(l, forced=True) for l in languages)) languages.update(set(Language.rebuild(l, hi=True) for l in languages)) - + video_types = (Episode, Movie) session = None skip_wrong_fps = False hearing_impaired_verifiable = True diff --git a/libs/subliminal_patch/providers/subscenter.py b/libs/subliminal_patch/providers/subscenter.py index 92ccdeda7..5dd591296 100644 --- a/libs/subliminal_patch/providers/subscenter.py +++ b/libs/subliminal_patch/providers/subscenter.py @@ -3,7 +3,7 @@ from __future__ import absolute_import from guessit import guessit -from subliminal.video import Episode +from subliminal.video import Episode, Movie from subliminal.providers.subscenter import SubsCenterProvider as _SubsCenterProvider, \ SubsCenterSubtitle as _SubsCenterSubtitle from subzero.language import Language @@ -37,7 +37,7 @@ class SubsCenterSubtitle(_SubsCenterSubtitle): class SubsCenterProvider(_SubsCenterProvider): languages = {Language.fromalpha2(l) for l in ['he']} + video_types = (Episode, Movie) subtitle_class = SubsCenterSubtitle hearing_impaired_verifiable = True server_url = 'http://www.subscenter.info/he/' - diff --git a/libs/subliminal_patch/providers/subssabbz.py b/libs/subliminal_patch/providers/subssabbz.py index 9a0e246b5..18c9ffbef 100644 --- a/libs/subliminal_patch/providers/subssabbz.py +++ b/libs/subliminal_patch/providers/subssabbz.py @@ -122,6 +122,7 @@ class SubsSabBzProvider(Provider): languages = {Language(l) for l in [ 'bul', 'eng' ]} + video_types = (Episode, Movie) def initialize(self): self.session = Session() diff --git a/libs/subliminal_patch/providers/subsunacs.py b/libs/subliminal_patch/providers/subsunacs.py index 4cb771605..df969bcc8 100644 --- a/libs/subliminal_patch/providers/subsunacs.py +++ b/libs/subliminal_patch/providers/subsunacs.py @@ -120,6 +120,7 @@ class SubsUnacsProvider(Provider): languages = {Language(l) for l in [ 'bul', 'eng' ]} + video_types = (Episode, Movie) def initialize(self): self.session = Session() diff --git a/libs/subliminal_patch/providers/subtitrarinoi.py b/libs/subliminal_patch/providers/subtitrarinoi.py index e396d03bb..39ed5703f 100644 --- a/libs/subliminal_patch/providers/subtitrarinoi.py +++ b/libs/subliminal_patch/providers/subtitrarinoi.py @@ -124,6 +124,7 @@ class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin): subtitle_class = SubtitrarinoiSubtitle languages = {Language(lang) for lang in ['ron']} languages.update(set(Language.rebuild(lang, forced=True) for lang in languages)) + video_types = (Episode, Movie) server_url = 'https://www.subtitrari-noi.ro/' api_url = server_url + 'paginare_filme.php' diff --git a/libs/subliminal_patch/providers/subtitriid.py b/libs/subliminal_patch/providers/subtitriid.py index 516cf02df..99062496f 100644 --- a/libs/subliminal_patch/providers/subtitriid.py +++ b/libs/subliminal_patch/providers/subtitriid.py @@ -94,6 +94,7 @@ class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin): """subtitri.id.lv Provider.""" subtitle_class = SubtitriIdSubtitle languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']} + video_types = (Movie,) server_url = 'http://subtitri.id.lv' search_url = server_url + '/search/' diff --git a/libs/subliminal_patch/providers/titlovi.py b/libs/subliminal_patch/providers/titlovi.py index 0a0ebd927..5b9a39986 100644 --- a/libs/subliminal_patch/providers/titlovi.py +++ b/libs/subliminal_patch/providers/titlovi.py @@ -135,6 +135,7 @@ class TitloviSubtitle(Subtitle): class TitloviProvider(Provider, ProviderSubtitleArchiveMixin): subtitle_class = TitloviSubtitle languages = {Language.fromtitlovi(l) for l in language_converters['titlovi'].codes} | {Language.fromietf('sr-Latn')} + video_types = (Episode, Movie) api_url = 'https://kodi.titlovi.com/api/subtitles' api_gettoken_url = api_url + '/gettoken' api_search_url = api_url + '/search' diff --git a/libs/subliminal_patch/providers/titrari.py b/libs/subliminal_patch/providers/titrari.py index d2f8a9af9..534870e73 100644 --- a/libs/subliminal_patch/providers/titrari.py +++ b/libs/subliminal_patch/providers/titrari.py @@ -125,6 +125,7 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin): subtitle_class = TitrariSubtitle languages = {Language(lang) for lang in ['ron', 'eng']} languages.update(set(Language.rebuild(lang, forced=True) for lang in languages)) + video_types = (Episode, Movie) api_url = 'https://www.titrari.ro/' query_advanced_search = 'cautarepreaavansata' diff --git a/libs/subliminal_patch/providers/titulky.py b/libs/subliminal_patch/providers/titulky.py index 0639a042f..7e7b63d09 100644 --- a/libs/subliminal_patch/providers/titulky.py +++ b/libs/subliminal_patch/providers/titulky.py @@ -1,361 +1,886 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import import io import logging -import os +import math +import re import zipfile -import time +from random import randint +from threading import Thread import rarfile -from subzero.language import Language from guessit import guessit from requests import Session -from six import text_type +from requests.adapters import HTTPAdapter +from requests.exceptions import HTTPError -from subliminal import __short_version__ -from subliminal.exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded -from subliminal.providers import ParserBeautifulSoup, Provider -from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending, Subtitle -from subliminal_patch.subtitle import guess_matches +from subliminal.exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, Error, ProviderError +from subliminal.providers import ParserBeautifulSoup +from subliminal.subtitle import fix_line_ending from subliminal.video import Episode, Movie -from subliminal.utils import sanitize_release_group -from subliminal.score import get_equivalent_release_groups -from subliminal_patch.utils import sanitize + +from subliminal_patch.exceptions import ParseResponseError +from subliminal_patch.providers import Provider +from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin +from subliminal_patch.score import framerate_equal +from subliminal_patch.subtitle import Subtitle, guess_matches, sanitize + +from subzero.language import Language + +from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST logger = logging.getLogger(__name__) -# class TitulkySubtitle(Subtitle): -# """Titulky Subtitle.""" -# provider_name = 'Titulky' -# -# def __init__(self, language, page_link, year, version, download_link): -# super(TitulkySubtitle, self).__init__(language, page_link=page_link) -# self.year = year -# self.version = version -# self.download_link = download_link -# self.hearing_impaired = None -# self.encoding = 'UTF-8' -# -# @property -# def id(self): -# return self.download_link -# -# def get_matches(self, video): -# matches = set() -# -# # episode -# if isinstance(video, Episode): -# # other properties -# matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True) -# # movie -# elif isinstance(video, Movie): -# # other properties -# matches |= guess_matches(video, guessit(self.version, {'type': 'movie'}), partial=True) -# -# return matches - class TitulkySubtitle(Subtitle): + """Titulky.com subtitle""" provider_name = 'titulky' - - def __init__(self, language, page_link, season, episode, version, download_link, year, title, asked_for_release_group=None, + + hash_verifiable = False + hearing_impaired_verifiable = False + + def __init__(self, + sub_id, + imdb_id, + language, + names, + season, + episode, + year, + releases, + fps, + uploader, + approved, + page_link, + download_link, + skip_wrong_fps=False, asked_for_episode=None): - super(TitulkySubtitle, self).__init__(language, page_link=page_link) + super().__init__(language, page_link=page_link) + + self.names = names + self.year = year + self.sub_id = sub_id + self.imdb_id = imdb_id + self.fps = fps self.season = season self.episode = episode - self.version = version - self.year = year - self.download_link = download_link - for t in title: - self.title = t - if year: - self.year = int(year) - + self.releases = releases + self.release_info = ', '.join(releases) + self.language = language + self.approved = approved self.page_link = page_link - self.asked_for_release_group = asked_for_release_group + self.uploader = uploader + self.download_link = download_link + self.skip_wrong_fps = skip_wrong_fps self.asked_for_episode = asked_for_episode - + self.matches = None + + # Try to parse S00E00 string from the main subtitle name + season_episode_string = re.findall(r'S(\d+)E(\d+)', self.names[0], + re.IGNORECASE) + + # If we did not search for subtitles with season and episode numbers in search query, + # try to parse it from the main subtitle name that most likely contains it + if season_episode_string: + if self.season is None: + self.season = int(season_episode_string[0][0]) + if self.episode is None: + self.episode = int(season_episode_string[0][1]) + @property def id(self): - return self.download_link - - def get_matches(self, video): - """ - patch: set guessit to single_value - :param video: - :return: - """ - matches = set() + return self.sub_id - # episode - if isinstance(video, Episode): - # series - if video.series: - matches.add('series') - # year - if video.original_series and self.year is None or video.year and video.year == self.year: - matches.add('year') - # season - if video.season and self.season == video.season: + def get_fps(self): + return self.fps + + def get_matches(self, video): + matches = set() + _type = 'movie' if isinstance(video, Movie) else 'episode' + + sub_names = self._remove_season_episode_string(self.names) + + if _type == 'episode': + ## EPISODE + + # match imdb_id of a series + if video.series_imdb_id and video.series_imdb_id == self.imdb_id: + matches.add('series_imdb_id') + + # match season/episode + if self.season and self.season == video.season: matches.add('season') - # episode - if video.episode and self.episode == video.episode: + if self.episode and self.episode == video.episode: matches.add('episode') - # guess - matches |= guess_matches(video, guessit(self.version, {'type': 'episode'})) - pass - # movie - elif isinstance(video, Movie): - # title - if video.title and (sanitize(self.title) in ( - sanitize(name) for name in [video.title] + video.alternative_titles)): + + # match series name + series_names = [video.series] + video.alternative_series + logger.debug( + f"Titulky.com: Finding exact match between subtitle names {sub_names} and series names {series_names}" + ) + if _contains_element(_from=series_names, + _in=sub_names, + exactly=True): + matches.add('series') + + # match episode title + episode_titles = [video.title] + logger.debug( + f"Titulky.com: Finding exact match between subtitle names {sub_names} and episode titles {episode_titles}" + ) + if _contains_element(_from=episode_titles, + _in=sub_names, + exactly=True): + matches.add('episode_title') + + elif _type == 'movie': + ## MOVIE + + # match imdb_id of a movie + if video.imdb_id and video.imdb_id == self.imdb_id: + matches.add('imdb_id') + + # match movie title + video_titles = [video.title] + video.alternative_titles + logger.debug( + f"Titulky.com: Finding exact match between subtitle names {sub_names} and video titles {video_titles}" + ) + if _contains_element(_from=video_titles, + _in=sub_names, + exactly=True): matches.add('title') - # year - if video.year and self.year == video.year: - matches.add('year') - # guess - matches |= guess_matches(video, guessit(self.version, {'type': 'movie'})) + + ## MOVIE OR EPISODE + + # match year + if video.year and video.year == self.year: + matches.add('year') + + # match other properties based on release infos + for release in self.releases: + matches |= guess_matches(video, guessit(release, {"type": _type})) + + # If turned on in settings, then do not match if video FPS is not equal to subtitle FPS + if self.skip_wrong_fps and video.fps and self.fps and not framerate_equal( + video.fps, self.fps): + logger.info(f"Titulky.com: Skipping subtitle {self}: wrong FPS") + matches.clear() self.matches = matches return matches - - # def get_matches(self, video): - # matches = set() - # - # # episode - # if isinstance(video, Episode): - # # series - # if video.series and (sanitize(self.series_name) in ( - # sanitize(name) for name in [video.series] + video.alternative_series)): - # matches.add('series') - # # movie - # elif isinstance(video, Movie): - # # title - # if video.title and (sanitize(self.movie_name) in ( - # sanitize(name) for name in [video.title] + video.alternative_titles)): - # matches.add('title') - # - # # # episode - # # if isinstance(video, Episode): - # # # other properties - # # matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True) - # # # movie - # # elif isinstance(video, Movie): - # # # other properties - # # matches |= guess_matches(video, guessit(self.version, {'type': 'movie'}), partial=True) - # - # return matches + + # Remove the S00E00 from elements of names array + def _remove_season_episode_string(self, names): + result = names.copy() + + for i, name in enumerate(result): + cleaned_name = re.sub(r'S\d+E\d+', '', name, flags=re.IGNORECASE) + cleaned_name = cleaned_name.strip() + + result[i] = cleaned_name + + return result -class TitulkyProvider(Provider): - """Titulky Provider.""" +class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin): + """Titulky.com provider""" + languages = {Language(l) for l in ['ces', 'slk']} - - server_url = 'https://oldpremium.titulky.com' - sign_out_url = '?Logoff=true' - search_url_series = '?Fulltext={}' - search_url_movies = '?Searching=AdvancedResult&ARelease={}' - dn_url = 'https://oldpremium.titulky.com' - download_url = 'https://oldpremium.titulky.com/idown.php?titulky=' - - UserAgent = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)' - + video_types = (Episode, Movie) + hash_verifiable = False + hearing_impaired_verifiable = False + + server_url = 'https://premium.titulky.com' + login_url = server_url + logout_url = f"{server_url}?action=logout" + download_url = f"{server_url}/download.php?id=" + + timeout = 30 + max_threads = 5 + subtitle_class = TitulkySubtitle - - def __init__(self, username=None, password=None): - if any((username, password)) and not all((username, password)): - raise ConfigurationError('Username and password must be specified') - + + def __init__(self, + username=None, + password=None, + skip_wrong_fps=None, + approved_only=None, + multithreading=None): + if not all([username, password]): + raise ConfigurationError("Username and password must be specified!") + + if type(skip_wrong_fps) is not bool: + raise ConfigurationError( + f"Skip_wrong_fps {skip_wrong_fps} must be a boolean!") + + if type(approved_only) is not bool: + raise ConfigurationError( + f"Approved_only {approved_only} must be a boolean!") + + if type(multithreading) is not bool: + raise ConfigurationError( + f"Multithreading {multithreading} must be a boolean!") + self.username = username self.password = password - self.logged_in = False + self.skip_wrong_fps = skip_wrong_fps + self.approved_only = approved_only + self.multithreading = multithreading + self.session = None - + def initialize(self): self.session = Session() - self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__) - - # login - if self.username and self.password: - logger.info('Logging in') - self.session.get(self.server_url) - data = {'Login': self.username, - 'Password': self.password} - r = self.session.post(self.server_url, data, allow_redirects=False, timeout=10) - - if 'BadLogin' in r.text: - raise AuthenticationError(self.username) - - logger.debug('Logged in') - self.logged_in = True - + # Set max pool size to the max number of threads we will use (i .e. the max number of search result rows) + # or set it to the default value if multithreading is disabled. + pool_maxsize = self.max_threads + 3 if self.max_threads > 10 else 10 + self.session.mount('https://', HTTPAdapter(pool_maxsize=pool_maxsize)) + self.session.mount('http://', HTTPAdapter(pool_maxsize=pool_maxsize)) + + # Set headers + self.session.headers['User-Agent'] = AGENT_LIST[randint( + 0, + len(AGENT_LIST) - 1)] + self.session.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' + self.session.headers['Accept-Language'] = 'sk,cz,en;q=0.5' + self.session.headers['Accept-Encoding'] = 'gzip, deflate' + self.session.headers['DNT'] = '1' + self.session.headers['Connection'] = 'keep-alive' + self.session.headers['Upgrade-Insecure-Requests'] = '1' + self.session.headers['Cache-Control'] = 'max-age=0' + + self.login() + def terminate(self): - # logout - if self.logged_in: - logger.info('Logging out') - r = self.session.get(self.server_url + self.sign_out_url, timeout=10) - r.raise_for_status() - logger.debug('Logged out') - self.logged_in = False - + self.logout() self.session.close() - - def query(self, keyword, season=None, episode=None, year=None, video=None): - params = keyword - if season and episode: - params += ' S{season:02d}E{episode:02d}'.format(season=season, episode=episode) - elif year: - params += '&ARok={:4d}'.format(year) - - logger.debug('Searching subtitles %r', params) - subtitles = [] - if season and episode: - search_link = self.server_url + text_type(self.search_url_series).format(params) + + def login(self): + logger.info("Titulky.com: Logging in") + + self.session.get(self.server_url) + + data = {'LoginName': self.username, 'LoginPassword': self.password} + res = self.session.post(self.server_url, + data, + allow_redirects=False, + timeout=self.timeout) + + # If the response is a redirect and doesnt point to an error message page, then we are logged in + if res.status_code == 302 and 'msg_type=i' in res.headers['Location']: + return True + else: + raise AuthenticationError("Login failed") + + def logout(self): + logger.info("Titulky.com: Logging out") + + res = self.session.get(self.logout_url, + allow_redirects=False, + timeout=self.timeout) + + # If the response is a redirect and doesnt point to an error message page, then we are logged out + if res.status_code == 302 and 'msg_type=i' in res.headers['Location']: + return True + else: + raise AuthenticationError("Logout failed.") + + def fetch_page(self, url, ref=None): + logger.debug(f"Titulky.com: Fetching url: {url}") + + res = self.session.get( + url, + timeout=self.timeout, + headers={'Referer': ref if ref else self.server_url}) + + if res.status_code != 200: + raise HTTPError(f"Fetch failed with status code {res.status_code}") + if not res.text: + raise ProviderError("No response returned from the provider") + + return res.text + + def build_search_url(self, params): + result = f"{self.server_url}/?" + + params['action'] = 'search' + # Requires subtitle names to match full search keyword + params['fsf'] = 1 + + for key, value in params.items(): + result += f'{key}={value}&' + + # Remove the last & + result = result[:-1] + + # Remove spaces + result = result.replace(' ', '+') + + return result + + # Parse details of an individual subtitle: imdb_id, release, language, uploader, fps and year + def parse_details(self, details_url, search_url): + html_src = self.fetch_page(details_url, ref=search_url) + details_page_soup = ParserBeautifulSoup(html_src, + ['lxml', 'html.parser']) + + details_container = details_page_soup.find('div', class_='detail') + if not details_container: + # The subtitles could be removed and got redirected to a different page. Better treat this silently. + logger.info("Titulky.com: Could not find details div container. Skipping.") + return False + + ### IMDB ID + imdb_id = None + imdb_tag = details_container.find('a', attrs={'target': 'imdb'}) + + if imdb_tag: + imdb_url = imdb_tag.get('href') + imdb_id = re.findall(r'tt(\d+)', imdb_url)[0] + + if not imdb_id: + logger.debug("Titulky.com: No IMDB ID supplied on details page.") + + ### RELEASE + release = None + release_tag = details_container.find('div', class_='releas') + + if not release_tag: + raise ParseResponseError( + "Could not find release tag. Did the HTML source change?") + + release = release_tag.get_text(strip=True) + + if not release: + logger.debug("Titulky.com: No release information supplied on details page.") + + ### LANGUAGE + language = None + czech_flag = details_container.select('img[src*=\'flag-CZ\']') + slovak_flag = details_container.select('img[src*=\'flag-SK\']') + + if czech_flag and not slovak_flag: + language = Language('ces') + elif slovak_flag and not czech_flag: + language = Language('slk') + + if not language: + logger.debug("Titulky.com: No language information supplied on details page.") + + ### UPLOADER + uploader = None + uploader_tag = details_container.find('div', class_='ulozil') + + if not uploader_tag: + raise ParseResponseError( + "Could not find uploader tag. Did the HTML source change?") + + uploader_anchor_tag = uploader_tag.find('a') + + if not uploader_anchor_tag: + raise ParseResponseError( + "Could not find uploader anchor tag. Did the HTML source change?" + ) + + uploader = uploader_anchor_tag.string.strip( + ) if uploader_anchor_tag else None + + if not uploader: + logger.debug("Titulky.com: No uploader name supplied on details page.") + + ### FPS + fps = None + fps_icon_tag_selection = details_container.select( + 'img[src*=\'Movieroll\']') + + if not fps_icon_tag_selection and not hasattr(fps_icon_tag_selection[0], + 'parent'): + raise ParseResponseError( + "Could not find parent of the fps icon tag. Did the HTML source change?" + ) + + fps_icon_tag = fps_icon_tag_selection[0] + parent_text = fps_icon_tag.parent.get_text(strip=True) + match = re.findall(r'(\d+,\d+) fps', parent_text) + + # If the match is found, change the decimal separator to a dot and convert to float + fps = float(match[0].replace(',', '.')) if len(match) > 0 else None + + if not fps: + logger.debug("Titulky.com: No fps supplied on details page.") + + ### YEAR + year = None + h1_tag = details_container.find('h1', id='titulky') + + if not h1_tag: + raise ParseResponseError( + "Could not find h1 tag. Did the HTML source change?") + + # The h1 tag contains the name of the subtitle and a year + h1_texts = [text for text in h1_tag.stripped_strings] + year = int(h1_texts[1]) if len(h1_texts) > 1 else None + + if not year: + logger.debug("Titulky.com: No year supplied on details page.") + + # Clean up + details_page_soup.decompose() + details_page_soup = None + + # Return the subtitle details + return { + 'releases': [release], + 'language': language, + 'uploader': uploader, + 'fps': fps, + 'year': year, + 'imdb_id': imdb_id + } + + def process_row(self, + row, + video_names, + search_url, + thread_id=None, + threads_data=None): + try: + # The first anchor tag is an image preview, the second is the name + anchor_tag = row.find_all('a')[1] + # The details link is relative, so we need to remove the dot at the beginning + details_link = f"{self.server_url}{anchor_tag.get('href')[1:]}" + id_match = re.findall(r'id=(\d+)', details_link) + sub_id = id_match[0] if len(id_match) > 0 else None + download_link = f"{self.download_url}{sub_id}" + + # Approved subtitles have a pbl1 class for their row, others have a pbl0 class + approved = True if 'pbl1' in row.get('class') else False + + # Subtitle name + its alternative names + table_columns = row.findAll('td') + main_sub_name = anchor_tag.get_text(strip=True) + + alt_sub_names = [ + alt_sub_name.strip() + for alt_sub_name in table_columns[2].string.split('/') + ] if table_columns[2].string else [] + sub_names = [main_sub_name] + alt_sub_names + + # Does at least one subtitle name contain one of the video names? + # Skip subtitles that do not match + # Video names -> the main title and alternative titles of a movie or an episode and so on... + # Subtitle names -> the main name and alternative names of a subtitle displayed in search results. + # Could be handled in TitulkySubtitle class, however we want to keep the number of requests + # as low as possible and this prevents the from requesting the details page unnecessarily + if not _contains_element(_from=video_names, _in=sub_names): + logger.info( + f"Titulky.com: Skipping subtitle with names: {sub_names}, because there was no match with video names: {video_names}" + ) + if type(threads_data) is list and type(thread_id) is int: + threads_data[thread_id] = { + 'sub_info': None, + 'exception': None + } + + return None + + details = self.parse_details(details_link, search_url) + if not details: + # Details parsing was NOT successful, skipping + if type(threads_data) is list and type(thread_id) is int: + threads_data[thread_id] = { + 'sub_info': None, + 'exception': None + } + + return None + + # Combine all subtitle data into one dict + result = { + 'names': sub_names, + 'id': sub_id, + 'approved': approved, + 'details_link': details_link, + 'download_link': download_link + } + + result.update(details) + + if type(threads_data) is list and type(thread_id) is int: + threads_data[thread_id] = { + 'sub_info': result, + 'exception': None + } + + return details + except Exception as e: + if type(threads_data) is list and type(thread_id) is int: + threads_data[thread_id] = {'sub_info': None, 'exception': e} + + raise e + + # There are multiple ways to find subs from this provider: + # 1. SEARCH by sub title + # - parameter: .................. Fulltext= + # 2. SEARCH by imdb id + # - parameter: .................. IMDB= + # 3. SEARCH by season/episode + # - parameter: .................. Sezona= + # - parameter: .................. Epizoda= + # 4. SEARCH by year + # - parameter: .................. Rok= + # 5. SEARCH by video type + # - parameter: .................. Serial=<('S' for series | 'F' for movies | '' for all)> + # 6. SEARCH by language + # - parameter: .................. Jazyk=<('CZ' for czech | 'SK' for slovak | '' for all)> + # 7. SEARCH by status + # - parameter: .................. ASchvalene=<('1' for approved only | '-0' for subs awaiting approval | '' for all)> + # - redirects should NOT be allowed here + # + # 8. BROWSE subtitles by IMDB ID + # - Subtitles are here categorised by seasons and episodes + # - URL: https://premium.titulky.com/?action=serial&step=&id= + # - it seems that the url redirects to a page with their own internal ID, redirects should be allowed here + def query(self, + language, + video_names, + type, + keyword=None, + year=None, + season=None, + episode=None, + imdb_id=None): + ## Build the search URL + params = {} + + # Keyword + if keyword: + params['Fulltext'] = keyword + # Video type + if type == 'episode': + params['Serial'] = 'S' + else: + params['Serial'] = 'F' + # Season / Episode + if season: + params['Sezona'] = season + if episode: + params['Epizoda'] = episode + # IMDB ID + if imdb_id: + params['IMDB'] = imdb_id[2:] # Remove the tt from the imdb id + # Year + if year: + params['Rok'] = year + # Language + if language == Language('ces'): + params['Jazyk'] = 'CZ' + elif language == Language('slk'): + params['Jazyk'] = 'SK' + elif language == None: + params['Jazyk'] = '' else: - search_link = self.server_url + text_type(self.search_url_movies).format(params) - - - r = self.session.get(search_link, timeout=30) - r.raise_for_status() - - if not r.content: - logger.debug('No data returned from provider') return [] - - # soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser']) - - # for entity in soup.select('table .main_table > tbody > tr'): - # for entity in soup.find_all("table", class_="main_table"): - # moviename = entity.text - # entity_url = self.server_url + entity['href'] - # logger.debug(entity_url) - # r = self.session.get(entity_url, timeout=30) - # r.raise_for_status() - # logger.debug('looking into ' + entity_url) - - soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser']).find("table", - class_="main_table") - # loop over subtitles cells - if soup: - subs = soup.find_all("tr", class_="row1") - subs += soup.find_all("tr", class_="row2") - for sub in subs: - page_link = '%s%s' % (self.server_url, sub.a.get('href').encode('utf-8')) - title = sub.find_all('td')[0:1] - title = [x.text for x in title] - version = sub.find(class_="fixedTip") - if version is None: - version = "" - else: - version = version['title'] - try: - r = sub.find_all('td')[6:7] - # r2 = td.find("td", "img") - langs = [x.text.encode('utf-8') for x in r] - pass - except: - langs = 'CZ' - name = '%s (%s)' % (version, langs) - - if b'CZ' in langs: - language = Language('ces') - elif b'SK' in langs: - language = Language('slk') - # read the item - # subtitle = self.subtitle_class(language, page_link, year, version, page_link.replace("detail", "dld")) - download_link = sub.find('a', class_='titulkydownloadajax') - download_link = self.download_url + download_link.get('href') - - subtitle = self.subtitle_class(language, page_link, - season, episode, version, download_link, year, title, - asked_for_release_group=video.release_group, - asked_for_episode=episode) - - logger.debug('Found subtitle %r', subtitle) - subtitles.append(subtitle) - - soup.decompose() - soup = None - - return subtitles - - def list_subtitles(self, video, languages): - if isinstance(video, Episode): - titles = [video.series] + video.alternative_series - elif isinstance(video, Movie): - titles = [video.title] + video.alternative_titles + # Status + if self.approved_only: + logger.debug(f"Titulky.com: Searching only for approved subtitles") + params['ASchvalene'] = '1' else: - titles = [] - + params['ASchvalene'] = '' + + search_url = self.build_search_url(params) + + ## Search results page parsing + html_src = self.fetch_page(search_url) + search_page_soup = ParserBeautifulSoup(html_src, + ['lxml', 'html.parser']) + + # If there is a message containing "Žádny odpovídající záznam", it means that there are no results + # If that's the case, return an empty list + error_message = search_page_soup.select('.panel-body > strong') + if len( + error_message + ) > 0 and 'Žádný odpovídající záznam' in error_message[0].get_text( + strip=True): + logger.info("Titulky.com: No results found") + return [] + + # Get the table containing the search results + table = search_page_soup.find('table', class_='table') + if not table: + logger.debug("Titulky.com: Could not find table") + raise ParseResponseError( + "Could not find table. Did the HTML source change?") + + # Get table body containing rows of subtitles + table_body = table.find('tbody') + if not table_body: + logger.debug("Titulky.com: Could not find table body") + raise ParseResponseError( + "Could not find table body. Did the HTML source change?") + + ## Loop over all subtitles on the first page and put them in a list subtitles = [] - # query for subtitles with the show_id - for title in titles: - if isinstance(video, Episode): - subtitles += [s for s in self.query(title, season=video.season, episode=video.episode, - year=video.year, video=video) - if s.language in languages] - elif isinstance(video, Movie): - subtitles += [s for s in self.query(title, year=video.year, video=video) - if s.language in languages] - + rows = table_body.find_all('tr') + + if not self.multithreading: + # Process the rows sequentially + logger.info("Titulky.com: processing results in sequence") + for i, row in enumerate(rows): + sub_info = self.process_row(row, video_names, search_url) + + # If subtitle info was returned, then everything was okay + # and we can instationate it and add it to the list + if sub_info: + logger.debug( + f"Titulky.com: Sucessfully retrieved subtitle info, row: {i}" + ) + + # If we found the subtitle by IMDB ID, no need to get it from details page + sub_imdb_id = imdb_id or sub_info['imdb_id'] + + subtitle_instance = self.subtitle_class( + sub_info['id'], + sub_imdb_id, + sub_info['language'], + sub_info['names'], + season, + episode, + sub_info['year'], + sub_info['releases'], + sub_info['fps'], + sub_info['uploader'], + sub_info['approved'], + sub_info['details_link'], + sub_info['download_link'], + skip_wrong_fps=self.skip_wrong_fps, + asked_for_episode=(type == 'episode')) + subtitles.append(subtitle_instance) + else: + # No subtitle info was returned, i. e. something unexpected + # happend during subtitle details page fetching and processing. + logger.debug(f"Titulky.com: No subtitle info retrieved, row: {i}") + else: + # Process the rows in paralell + logger.info( + f"Titulky.com: processing results in parelell, {self.max_threads} rows at a time." + ) + + threads = [None] * len(rows) + threads_data = [None] * len(rows) + + # Process rows in parallel, self.max_threads at a time. + cycles = math.ceil(len(rows) / self.max_threads) + for i in range(cycles): + # Batch number i + starting_index = i * self.max_threads # Inclusive + ending_index = starting_index + self.max_threads # Non-inclusive + + # Create threads for all rows in this batch + for j in range(starting_index, ending_index): + # Check if j-th row exists + if j < len(rows): + # Row number j + logger.debug( + f"Titulky.com: Creating thread {j} (batch: {i})") + # Create a thread for row j and start it + threads[j] = Thread( + target=self.process_row, + args=[rows[j], video_names, search_url], + kwargs={ + 'thread_id': j, + 'threads_data': threads_data + }) + threads[j].start() + + # Wait for all created threads to finish before moving to another batch of rows + for j in range(starting_index, ending_index): + # Check if j-th row exists + if j < len(rows): + threads[j].join() + + # Process the resulting data from all threads + for i in range(len(threads_data)): + thread_data = threads_data[i] + + # If the thread returned didn't return anything, but expected a dict object + if not thread_data: + raise ProviderError(f"No data returned from thread ID: {i}") + + # If an exception was raised in a thread, raise it again here + if 'exception' in thread_data and thread_data['exception']: + logger.debug( + f"Titulky.com: An error occured while processing a row in the thread ID {i}" + ) + raise thread_data['exception'] + + # If the thread returned a subtitle info, great, instantiate it and add it to the list + if 'sub_info' in thread_data and thread_data['sub_info']: + # Instantiate the subtitle object + logger.debug( + f"Titulky.com: Sucessfully retrieved subtitle info, thread ID: {i}" + ) + sub_info = thread_data['sub_info'] + + # If we found the subtitle by IMDB ID, no need to get it from details page + sub_imdb_id = imdb_id or sub_info['imdb_id'] + + subtitle_instance = self.subtitle_class( + sub_info['id'], + sub_imdb_id, + sub_info['language'], + sub_info['names'], + season, + episode, + sub_info['year'], + sub_info['releases'], + sub_info['fps'], + sub_info['uploader'], + sub_info['approved'], + sub_info['details_link'], + sub_info['download_link'], + skip_wrong_fps=self.skip_wrong_fps, + asked_for_episode=(type == 'episode')) + subtitles.append(subtitle_instance) + else: + # The thread returned data, but it didn't contain a subtitle info, i. e. something unexpected + # happend during subtitle details page fetching and processing. + logger.debug( + f"Titulky.com: No subtitle info retrieved, thread ID: {i}" + ) + + # Clean up + search_page_soup.decompose() + search_page_soup = None + + logger.debug(f"Titulky.com: Found subtitles: {subtitles}") + return subtitles - + + def list_subtitles(self, video, languages): + subtitles = [] + + # Possible paths: + # (1) Search by IMDB ID [and season/episode for tv series] + # (2) Search by keyword: video (title|series) [and season/episode for tv series] + # (3) Search by keyword: video series + S00E00 (tv series only) + + for language in languages: + if isinstance(video, Episode): + video_names = [video.series, video.title + ] + video.alternative_series + + # (1) + logger.info( + "Titulky.com: Finding subtitles by IMDB ID, Season and Episode (1)" + ) + if video.series_imdb_id: + partial_subs = self.query(language, + video_names, + 'episode', + imdb_id=video.series_imdb_id, + season=video.season, + episode=video.episode) + if (len(partial_subs) > 0): + subtitles += partial_subs + continue + + # (2) + logger.info( + "Titulky.com: Finding subtitles by keyword, Season and Episode (2)" + ) + keyword = video.series + partial_subs = self.query(language, + video_names, + 'episode', + keyword=keyword, + season=video.season, + episode=video.episode) + if (len(partial_subs) > 0): + subtitles += partial_subs + continue + + # (3) + logger.info("Titulky.com: Finding subtitles by keyword only (3)") + keyword = f"{video.series} S{video.season:02d}E{video.episode:02d}" + partial_subs = self.query(language, + video_names, + 'episode', + keyword=keyword) + subtitles += partial_subs + elif isinstance(video, Movie): + video_names = [video.title] + video.alternative_titles + + # (1) + logger.info("Titulky.com: Finding subtitles by IMDB ID (1)") + if video.imdb_id: + partial_subs = self.query(language, + video_names, + 'movie', + imdb_id=video.imdb_id) + if (len(partial_subs) > 0): + subtitles += partial_subs + continue + + # (2) + logger.info("Titulky.com: Finding subtitles by keyword (2)") + keyword = video.title + partial_subs = self.query(language, + video_names, + 'movie', + keyword=keyword) + subtitles += partial_subs + + return subtitles + def download_subtitle(self, subtitle): - if isinstance(subtitle, TitulkySubtitle): - # download the subtitle - logger.info('Downloading subtitle %r', subtitle) - r = self.session.get(subtitle.download_link, headers={'Referer': subtitle.page_link}, - timeout=30) - r.raise_for_status() - - if not r.content: - logger.debug('Unable to download subtitle. No data returned from provider') - return - elif 'Limit vyčerpán' in r.text: - raise DownloadLimitExceeded - - soup = ParserBeautifulSoup(r.text, ['lxml', 'html.parser']) - # links = soup.find("a", {"id": "downlink"}).find_all('a') - link = soup.find(id="downlink") - # TODO: add settings for choice - - url = self.dn_url + link.get('href') - time.sleep(0.5) - r = self.session.get(url, headers={'Referer': subtitle.download_link}, - timeout=30) - r.raise_for_status() - - - archive_stream = io.BytesIO(r.content) + res = self.session.get(subtitle.download_link, + headers={'Referer': subtitle.page_link}, + timeout=self.timeout) + + try: + res.raise_for_status() + except: + raise HTTPError( + f"An error occured during the download request to {subtitle.download_link}" + ) + + archive_stream = io.BytesIO(res.content) archive = None if rarfile.is_rarfile(archive_stream): - logger.debug('Identified rar archive') + logger.debug("Titulky.com: Identified rar archive") archive = rarfile.RarFile(archive_stream) - subtitle_content = _get_subtitle_from_archive(archive) + subtitle_content = self.get_subtitle_from_archive(subtitle, archive) elif zipfile.is_zipfile(archive_stream): - logger.debug('Identified zip archive') + logger.debug("Titulky.com: Identified zip archive") archive = zipfile.ZipFile(archive_stream) - subtitle_content = _get_subtitle_from_archive(archive) + subtitle_content = self.get_subtitle_from_archive(subtitle, archive) else: - subtitle_content = r.content - - if subtitle_content: - subtitle.content = fix_line_ending(subtitle_content) - else: - logger.debug('Could not extract subtitle from %r', archive) + subtitle_content = fix_line_ending(res.content) + + if not subtitle_content: + logger.debug( + "Titulky.com: No subtitle content found. The downloading limit has been most likely exceeded." + ) + raise DownloadLimitExceeded( + "Subtitles download limit has been exceeded") + + subtitle.content = subtitle_content -def _get_subtitle_from_archive(archive): - for name in archive.namelist(): - # discard hidden files - if os.path.split(name)[-1].startswith('.'): - continue - - # discard non-subtitle files - if not name.lower().endswith(SUBTITLE_EXTENSIONS): - continue - - return archive.read(name) - - return None +# Check if any element from source array is contained partially or exactly in any element from target array +# Returns on the first match +def _contains_element(_from=None, _in=None, exactly=False): + source_array = _from + target_array = _in + + for source in source_array: + for target in target_array: + if exactly: + if sanitize(source) == sanitize(target): + return True + else: + if sanitize(source) in sanitize(target): + return True + + return False diff --git a/libs/subliminal_patch/providers/tusubtitulo.py b/libs/subliminal_patch/providers/tusubtitulo.py index 8385ebeab..979c28b5f 100644 --- a/libs/subliminal_patch/providers/tusubtitulo.py +++ b/libs/subliminal_patch/providers/tusubtitulo.py @@ -141,7 +141,7 @@ class TuSubtituloProvider(Provider): completed = "%" not in content[5].text download_url = ( - content[6].find_all("a")[1].get("href").split("?sub=")[-1] + parse.unquote(content[6].find_all("a")[1].get("href").split("?sub=")[-1]) ) episode_id = download_url.split("/")[4] @@ -219,9 +219,9 @@ class TuSubtituloProvider(Provider): soup = bso(r.content, "lxml") for url, selected in zip(soup.select(_CSS1), soup.select(_CSS2)): - meta = ".".join( + meta = parse.unquote(".".join( selected.get("href").split(discriminator)[-1].split(".")[:-1] - ) + )) if meta in episode_dict["download_url"]: id_url = url.find_all("a")[0].get("href") @@ -255,7 +255,11 @@ class TuSubtituloProvider(Provider): return [] def list_subtitles(self, video, languages): - return self.query(video) + # return self.query(video) + + # returning no subtitles automatically to prevent requests to the provider who explicitly requested to be + # removed in https://github.com/morpheus65535/bazarr/issues/1591 + return [] @staticmethod def _check_response(response): diff --git a/libs/subliminal_patch/providers/tvsubtitles.py b/libs/subliminal_patch/providers/tvsubtitles.py index 9e0ec4216..cb7c43fda 100644 --- a/libs/subliminal_patch/providers/tvsubtitles.py +++ b/libs/subliminal_patch/providers/tvsubtitles.py @@ -10,6 +10,7 @@ from subliminal.cache import SHOW_EXPIRATION_TIME, region, EPISODE_EXPIRATION_TI from subliminal.providers.tvsubtitles import TVsubtitlesProvider as _TVsubtitlesProvider, \ TVsubtitlesSubtitle as _TVsubtitlesSubtitle, link_re, episode_id_re from subliminal.utils import sanitize +from subliminal.video import Episode logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class TVsubtitlesProvider(_TVsubtitlesProvider): 'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por', 'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho' ]} + video_types = (Episode,) subtitle_class = TVsubtitlesSubtitle @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) diff --git a/libs/subliminal_patch/providers/wizdom.py b/libs/subliminal_patch/providers/wizdom.py index a68102203..f6ef2498e 100644 --- a/libs/subliminal_patch/providers/wizdom.py +++ b/libs/subliminal_patch/providers/wizdom.py @@ -79,6 +79,7 @@ class WizdomSubtitle(Subtitle): class WizdomProvider(Provider): """Wizdom Provider.""" languages = {Language(l) for l in ['heb']} + video_types = (Episode, Movie) server_url = 'wizdom.xyz' _tmdb_api_key = 'a51ee051bcd762543373903de296e0a3' diff --git a/libs/subliminal_patch/providers/yavkanet.py b/libs/subliminal_patch/providers/yavkanet.py index e8b67e827..7c6c3c37d 100644 --- a/libs/subliminal_patch/providers/yavkanet.py +++ b/libs/subliminal_patch/providers/yavkanet.py @@ -102,6 +102,7 @@ class YavkaNetProvider(Provider): languages = {Language(l) for l in [ 'bul', 'eng', 'rus', 'spa', 'ita' ]} + video_types = (Episode, Movie) def initialize(self): self.session = Session() diff --git a/libs/subliminal_patch/providers/zimuku.py b/libs/subliminal_patch/providers/zimuku.py index a405bf086..ec3fa0864 100644 --- a/libs/subliminal_patch/providers/zimuku.py +++ b/libs/subliminal_patch/providers/zimuku.py @@ -84,6 +84,7 @@ class ZimukuProvider(Provider): """Zimuku Provider.""" languages = {Language(*l) for l in supported_languages} + video_types = (Episode, Movie) logger.info(str(supported_languages)) server_url = "http://zimuku.org" diff --git a/libs/version.txt b/libs/version.txt index c3fc11bff..07101175f 100644 --- a/libs/version.txt +++ b/libs/version.txt @@ -1,5 +1,5 @@ apprise=0.8.8 -apscheduler=3.5.1 +apscheduler=3.8.0 babelfish=0.5.5 backports.functools-lru-cache=1.5 Beaker=1.10.0 @@ -14,7 +14,6 @@ enzyme=0.4.1 ffsubsync=0.4.11 Flask=1.1.1 flask-socketio=5.0.2dev -gevent-websocker=0.10.1 gitpython=2.1.9 guessit=3.3.1 guess_language-spirit=0.5.3 @@ -33,7 +32,7 @@ rarfile=3.0 rebulk=3.0.1 requests=2.18.4 semver=2.13.0 -signalr-client=0.0.7 <-- Modified to work with Sonarr and added exception handler +signalr-client-threads=0.0.12 <-- Modified to work with Sonarr signalrcore=0.9.2 <-- https://github.com/mandrewcito/signalrcore/pull/60 and 61 SimpleConfigParser=0.1.0 <-- modified version: do not update!!! six=1.11.0 @@ -44,6 +43,7 @@ subliminal=2.1.0dev tzlocal=2.1b1 twine=3.4.1 urllib3=1.23 +waitress=2.0.0 websocket-client=1.0.0 ## indirect dependencies diff --git a/libs/waitress/__init__.py b/libs/waitress/__init__.py new file mode 100644 index 000000000..bbb99da03 --- /dev/null +++ b/libs/waitress/__init__.py @@ -0,0 +1,46 @@ +import logging + +from waitress.server import create_server + + +def serve(app, **kw): + _server = kw.pop("_server", create_server) # test shim + _quiet = kw.pop("_quiet", False) # test shim + _profile = kw.pop("_profile", False) # test shim + if not _quiet: # pragma: no cover + # idempotent if logging has already been set up + logging.basicConfig() + server = _server(app, **kw) + if not _quiet: # pragma: no cover + server.print_listen("Serving on http://{}:{}") + if _profile: # pragma: no cover + profile("server.run()", globals(), locals(), (), False) + else: + server.run() + + +def serve_paste(app, global_conf, **kw): + serve(app, **kw) + return 0 + + +def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover + # runs a command under the profiler and print profiling output at shutdown + import os + import profile + import pstats + import tempfile + + fd, fn = tempfile.mkstemp() + try: + profile.runctx(cmd, globals, locals, fn) + stats = pstats.Stats(fn) + stats.strip_dirs() + # calls,time,cumulative and cumulative,calls,time are useful + stats.sort_stats(*(sort_order or ("cumulative", "calls", "time"))) + if callers: + stats.print_callers(0.3) + else: + stats.print_stats(0.3) + finally: + os.remove(fn) diff --git a/libs/waitress/__main__.py b/libs/waitress/__main__.py new file mode 100644 index 000000000..9bcd07e59 --- /dev/null +++ b/libs/waitress/__main__.py @@ -0,0 +1,3 @@ +from waitress.runner import run # pragma nocover + +run() # pragma nocover diff --git a/libs/waitress/adjustments.py b/libs/waitress/adjustments.py new file mode 100644 index 000000000..466b5c4a9 --- /dev/null +++ b/libs/waitress/adjustments.py @@ -0,0 +1,523 @@ +############################################################################## +# +# Copyright (c) 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""Adjustments are tunable parameters. +""" +import getopt +import socket +import warnings + +from .compat import HAS_IPV6, WIN +from .proxy_headers import PROXY_HEADERS + +truthy = frozenset(("t", "true", "y", "yes", "on", "1")) + +KNOWN_PROXY_HEADERS = frozenset( + header.lower().replace("_", "-") for header in PROXY_HEADERS +) + + +def asbool(s): + """Return the boolean value ``True`` if the case-lowered value of string + input ``s`` is any of ``t``, ``true``, ``y``, ``on``, or ``1``, otherwise + return the boolean value ``False``. If ``s`` is the value ``None``, + return ``False``. If ``s`` is already one of the boolean values ``True`` + or ``False``, return it.""" + if s is None: + return False + if isinstance(s, bool): + return s + s = str(s).strip() + return s.lower() in truthy + + +def asoctal(s): + """Convert the given octal string to an actual number.""" + return int(s, 8) + + +def aslist_cronly(value): + if isinstance(value, str): + value = filter(None, [x.strip() for x in value.splitlines()]) + return list(value) + + +def aslist(value): + """Return a list of strings, separating the input based on newlines + and, if flatten=True (the default), also split on spaces within + each line.""" + values = aslist_cronly(value) + result = [] + for value in values: + subvalues = value.split() + result.extend(subvalues) + return result + + +def asset(value): + return set(aslist(value)) + + +def slash_fixed_str(s): + s = s.strip() + if s: + # always have a leading slash, replace any number of leading slashes + # with a single slash, and strip any trailing slashes + s = "/" + s.lstrip("/").rstrip("/") + return s + + +def str_iftruthy(s): + return str(s) if s else None + + +def as_socket_list(sockets): + """Checks if the elements in the list are of type socket and + removes them if not.""" + return [sock for sock in sockets if isinstance(sock, socket.socket)] + + +class _str_marker(str): + pass + + +class _int_marker(int): + pass + + +class _bool_marker: + pass + + +class Adjustments: + """This class contains tunable parameters.""" + + _params = ( + ("host", str), + ("port", int), + ("ipv4", asbool), + ("ipv6", asbool), + ("listen", aslist), + ("threads", int), + ("trusted_proxy", str_iftruthy), + ("trusted_proxy_count", int), + ("trusted_proxy_headers", asset), + ("log_untrusted_proxy_headers", asbool), + ("clear_untrusted_proxy_headers", asbool), + ("url_scheme", str), + ("url_prefix", slash_fixed_str), + ("backlog", int), + ("recv_bytes", int), + ("send_bytes", int), + ("outbuf_overflow", int), + ("outbuf_high_watermark", int), + ("inbuf_overflow", int), + ("connection_limit", int), + ("cleanup_interval", int), + ("channel_timeout", int), + ("log_socket_errors", asbool), + ("max_request_header_size", int), + ("max_request_body_size", int), + ("expose_tracebacks", asbool), + ("ident", str_iftruthy), + ("asyncore_loop_timeout", int), + ("asyncore_use_poll", asbool), + ("unix_socket", str), + ("unix_socket_perms", asoctal), + ("sockets", as_socket_list), + ("channel_request_lookahead", int), + ("server_name", str), + ) + + _param_map = dict(_params) + + # hostname or IP address to listen on + host = _str_marker("0.0.0.0") + + # TCP port to listen on + port = _int_marker(8080) + + listen = ["{}:{}".format(host, port)] + + # number of threads available for tasks + threads = 4 + + # Host allowed to overrid ``wsgi.url_scheme`` via header + trusted_proxy = None + + # How many proxies we trust when chained + # + # X-Forwarded-For: 192.0.2.1, "[2001:db8::1]" + # + # or + # + # Forwarded: for=192.0.2.1, For="[2001:db8::1]" + # + # means there were (potentially), two proxies involved. If we know there is + # only 1 valid proxy, then that initial IP address "192.0.2.1" is not + # trusted and we completely ignore it. If there are two trusted proxies in + # the path, this value should be set to a higher number. + trusted_proxy_count = None + + # Which of the proxy headers should we trust, this is a set where you + # either specify forwarded or one or more of forwarded-host, forwarded-for, + # forwarded-proto, forwarded-port. + trusted_proxy_headers = set() + + # Would you like waitress to log warnings about untrusted proxy headers + # that were encountered while processing the proxy headers? This only makes + # sense to set when you have a trusted_proxy, and you expect the upstream + # proxy server to filter invalid headers + log_untrusted_proxy_headers = False + + # Should waitress clear any proxy headers that are not deemed trusted from + # the environ? Change to True by default in 2.x + clear_untrusted_proxy_headers = _bool_marker + + # default ``wsgi.url_scheme`` value + url_scheme = "http" + + # default ``SCRIPT_NAME`` value, also helps reset ``PATH_INFO`` + # when nonempty + url_prefix = "" + + # server identity (sent in Server: header) + ident = "waitress" + + # backlog is the value waitress passes to pass to socket.listen() This is + # the maximum number of incoming TCP connections that will wait in an OS + # queue for an available channel. From listen(1): "If a connection + # request arrives when the queue is full, the client may receive an error + # with an indication of ECONNREFUSED or, if the underlying protocol + # supports retransmission, the request may be ignored so that a later + # reattempt at connection succeeds." + backlog = 1024 + + # recv_bytes is the argument to pass to socket.recv(). + recv_bytes = 8192 + + # deprecated setting controls how many bytes will be buffered before + # being flushed to the socket + send_bytes = 1 + + # A tempfile should be created if the pending output is larger than + # outbuf_overflow, which is measured in bytes. The default is 1MB. This + # is conservative. + outbuf_overflow = 1048576 + + # The app_iter will pause when pending output is larger than this value + # in bytes. + outbuf_high_watermark = 16777216 + + # A tempfile should be created if the pending input is larger than + # inbuf_overflow, which is measured in bytes. The default is 512K. This + # is conservative. + inbuf_overflow = 524288 + + # Stop creating new channels if too many are already active (integer). + # Each channel consumes at least one file descriptor, and, depending on + # the input and output body sizes, potentially up to three. The default + # is conservative, but you may need to increase the number of file + # descriptors available to the Waitress process on most platforms in + # order to safely change it (see ``ulimit -a`` "open files" setting). + # Note that this doesn't control the maximum number of TCP connections + # that can be waiting for processing; the ``backlog`` argument controls + # that. + connection_limit = 100 + + # Minimum seconds between cleaning up inactive channels. + cleanup_interval = 30 + + # Maximum seconds to leave an inactive connection open. + channel_timeout = 120 + + # Boolean: turn off to not log premature client disconnects. + log_socket_errors = True + + # maximum number of bytes of all request headers combined (256K default) + max_request_header_size = 262144 + + # maximum number of bytes in request body (1GB default) + max_request_body_size = 1073741824 + + # expose tracebacks of uncaught exceptions + expose_tracebacks = False + + # Path to a Unix domain socket to use. + unix_socket = None + + # Path to a Unix domain socket to use. + unix_socket_perms = 0o600 + + # The socket options to set on receiving a connection. It is a list of + # (level, optname, value) tuples. TCP_NODELAY disables the Nagle + # algorithm for writes (Waitress already buffers its writes). + socket_options = [ + (socket.SOL_TCP, socket.TCP_NODELAY, 1), + ] + + # The asyncore.loop timeout value + asyncore_loop_timeout = 1 + + # The asyncore.loop flag to use poll() instead of the default select(). + asyncore_use_poll = False + + # Enable IPv4 by default + ipv4 = True + + # Enable IPv6 by default + ipv6 = True + + # A list of sockets that waitress will use to accept connections. They can + # be used for e.g. socket activation + sockets = [] + + # By setting this to a value larger than zero, each channel stays readable + # and continues to read requests from the client even if a request is still + # running, until the number of buffered requests exceeds this value. + # This allows detecting if a client closed the connection while its request + # is being processed. + channel_request_lookahead = 0 + + # This setting controls the SERVER_NAME of the WSGI environment, this is + # only ever used if the remote client sent a request without a Host header + # (or when using the Proxy settings, without forwarding a Host header) + server_name = "waitress.invalid" + + def __init__(self, **kw): + + if "listen" in kw and ("host" in kw or "port" in kw): + raise ValueError("host or port may not be set if listen is set.") + + if "listen" in kw and "sockets" in kw: + raise ValueError("socket may not be set if listen is set.") + + if "sockets" in kw and ("host" in kw or "port" in kw): + raise ValueError("host or port may not be set if sockets is set.") + + if "sockets" in kw and "unix_socket" in kw: + raise ValueError("unix_socket may not be set if sockets is set") + + if "unix_socket" in kw and ("host" in kw or "port" in kw): + raise ValueError("unix_socket may not be set if host or port is set") + + if "unix_socket" in kw and "listen" in kw: + raise ValueError("unix_socket may not be set if listen is set") + + if "send_bytes" in kw: + warnings.warn( + "send_bytes will be removed in a future release", DeprecationWarning + ) + + for k, v in kw.items(): + if k not in self._param_map: + raise ValueError("Unknown adjustment %r" % k) + setattr(self, k, self._param_map[k](v)) + + if not isinstance(self.host, _str_marker) or not isinstance( + self.port, _int_marker + ): + self.listen = ["{}:{}".format(self.host, self.port)] + + enabled_families = socket.AF_UNSPEC + + if not self.ipv4 and not HAS_IPV6: # pragma: no cover + raise ValueError( + "IPv4 is disabled but IPv6 is not available. Cowardly refusing to start." + ) + + if self.ipv4 and not self.ipv6: + enabled_families = socket.AF_INET + + if not self.ipv4 and self.ipv6 and HAS_IPV6: + enabled_families = socket.AF_INET6 + + wanted_sockets = [] + hp_pairs = [] + for i in self.listen: + if ":" in i: + (host, port) = i.rsplit(":", 1) + + # IPv6 we need to make sure that we didn't split on the address + if "]" in port: # pragma: nocover + (host, port) = (i, str(self.port)) + else: + (host, port) = (i, str(self.port)) + + if WIN: # pragma: no cover + try: + # Try turning the port into an integer + port = int(port) + + except Exception: + raise ValueError( + "Windows does not support service names instead of port numbers" + ) + + try: + if "[" in host and "]" in host: # pragma: nocover + host = host.strip("[").rstrip("]") + + if host == "*": + host = None + + for s in socket.getaddrinfo( + host, + port, + enabled_families, + socket.SOCK_STREAM, + socket.IPPROTO_TCP, + socket.AI_PASSIVE, + ): + (family, socktype, proto, _, sockaddr) = s + + # It seems that getaddrinfo() may sometimes happily return + # the same result multiple times, this of course makes + # bind() very unhappy... + # + # Split on %, and drop the zone-index from the host in the + # sockaddr. Works around a bug in OS X whereby + # getaddrinfo() returns the same link-local interface with + # two different zone-indices (which makes no sense what so + # ever...) yet treats them equally when we attempt to bind(). + if ( + sockaddr[1] == 0 + or (sockaddr[0].split("%", 1)[0], sockaddr[1]) not in hp_pairs + ): + wanted_sockets.append((family, socktype, proto, sockaddr)) + hp_pairs.append((sockaddr[0].split("%", 1)[0], sockaddr[1])) + + except Exception: + raise ValueError("Invalid host/port specified.") + + if self.trusted_proxy_count is not None and self.trusted_proxy is None: + raise ValueError( + "trusted_proxy_count has no meaning without setting " "trusted_proxy" + ) + + elif self.trusted_proxy_count is None: + self.trusted_proxy_count = 1 + + if self.trusted_proxy_headers and self.trusted_proxy is None: + raise ValueError( + "trusted_proxy_headers has no meaning without setting " "trusted_proxy" + ) + + if self.trusted_proxy_headers: + self.trusted_proxy_headers = { + header.lower() for header in self.trusted_proxy_headers + } + + unknown_values = self.trusted_proxy_headers - KNOWN_PROXY_HEADERS + if unknown_values: + raise ValueError( + "Received unknown trusted_proxy_headers value (%s) expected one " + "of %s" + % (", ".join(unknown_values), ", ".join(KNOWN_PROXY_HEADERS)) + ) + + if ( + "forwarded" in self.trusted_proxy_headers + and self.trusted_proxy_headers - {"forwarded"} + ): + raise ValueError( + "The Forwarded proxy header and the " + "X-Forwarded-{By,Host,Proto,Port,For} headers are mutually " + "exclusive. Can't trust both!" + ) + + elif self.trusted_proxy is not None: + warnings.warn( + "No proxy headers were marked as trusted, but trusted_proxy was set. " + "Implicitly trusting X-Forwarded-Proto for backwards compatibility. " + "This will be removed in future versions of waitress.", + DeprecationWarning, + ) + self.trusted_proxy_headers = {"x-forwarded-proto"} + + if self.clear_untrusted_proxy_headers is _bool_marker: + warnings.warn( + "In future versions of Waitress clear_untrusted_proxy_headers will be " + "set to True by default. You may opt-out by setting this value to " + "False, or opt-in explicitly by setting this to True.", + DeprecationWarning, + ) + self.clear_untrusted_proxy_headers = False + + self.listen = wanted_sockets + + self.check_sockets(self.sockets) + + @classmethod + def parse_args(cls, argv): + """Pre-parse command line arguments for input into __init__. Note that + this does not cast values into adjustment types, it just creates a + dictionary suitable for passing into __init__, where __init__ does the + casting. + """ + long_opts = ["help", "call"] + for opt, cast in cls._params: + opt = opt.replace("_", "-") + if cast is asbool: + long_opts.append(opt) + long_opts.append("no-" + opt) + else: + long_opts.append(opt + "=") + + kw = { + "help": False, + "call": False, + } + + opts, args = getopt.getopt(argv, "", long_opts) + for opt, value in opts: + param = opt.lstrip("-").replace("-", "_") + + if param == "listen": + kw["listen"] = "{} {}".format(kw.get("listen", ""), value) + continue + + if param.startswith("no_"): + param = param[3:] + kw[param] = "false" + elif param in ("help", "call"): + kw[param] = True + elif cls._param_map[param] is asbool: + kw[param] = "true" + else: + kw[param] = value + + return kw, args + + @classmethod + def check_sockets(cls, sockets): + has_unix_socket = False + has_inet_socket = False + has_unsupported_socket = False + for sock in sockets: + if ( + sock.family == socket.AF_INET or sock.family == socket.AF_INET6 + ) and sock.type == socket.SOCK_STREAM: + has_inet_socket = True + elif ( + hasattr(socket, "AF_UNIX") + and sock.family == socket.AF_UNIX + and sock.type == socket.SOCK_STREAM + ): + has_unix_socket = True + else: + has_unsupported_socket = True + if has_unix_socket and has_inet_socket: + raise ValueError("Internet and UNIX sockets may not be mixed.") + if has_unsupported_socket: + raise ValueError("Only Internet or UNIX stream sockets may be used.") diff --git a/libs/waitress/buffers.py b/libs/waitress/buffers.py new file mode 100644 index 000000000..0086fe8f3 --- /dev/null +++ b/libs/waitress/buffers.py @@ -0,0 +1,308 @@ +############################################################################## +# +# Copyright (c) 2001-2004 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""Buffers +""" +from io import BytesIO + +# copy_bytes controls the size of temp. strings for shuffling data around. +COPY_BYTES = 1 << 18 # 256K + +# The maximum number of bytes to buffer in a simple string. +STRBUF_LIMIT = 8192 + + +class FileBasedBuffer: + + remain = 0 + + def __init__(self, file, from_buffer=None): + self.file = file + if from_buffer is not None: + from_file = from_buffer.getfile() + read_pos = from_file.tell() + from_file.seek(0) + while True: + data = from_file.read(COPY_BYTES) + if not data: + break + file.write(data) + self.remain = int(file.tell() - read_pos) + from_file.seek(read_pos) + file.seek(read_pos) + + def __len__(self): + return self.remain + + def __nonzero__(self): + return True + + __bool__ = __nonzero__ # py3 + + def append(self, s): + file = self.file + read_pos = file.tell() + file.seek(0, 2) + file.write(s) + file.seek(read_pos) + self.remain = self.remain + len(s) + + def get(self, numbytes=-1, skip=False): + file = self.file + if not skip: + read_pos = file.tell() + if numbytes < 0: + # Read all + res = file.read() + else: + res = file.read(numbytes) + if skip: + self.remain -= len(res) + else: + file.seek(read_pos) + return res + + def skip(self, numbytes, allow_prune=0): + if self.remain < numbytes: + raise ValueError( + "Can't skip %d bytes in buffer of %d bytes" % (numbytes, self.remain) + ) + self.file.seek(numbytes, 1) + self.remain = self.remain - numbytes + + def newfile(self): + raise NotImplementedError() + + def prune(self): + file = self.file + if self.remain == 0: + read_pos = file.tell() + file.seek(0, 2) + sz = file.tell() + file.seek(read_pos) + if sz == 0: + # Nothing to prune. + return + nf = self.newfile() + while True: + data = file.read(COPY_BYTES) + if not data: + break + nf.write(data) + self.file = nf + + def getfile(self): + return self.file + + def close(self): + if hasattr(self.file, "close"): + self.file.close() + self.remain = 0 + + +class TempfileBasedBuffer(FileBasedBuffer): + def __init__(self, from_buffer=None): + FileBasedBuffer.__init__(self, self.newfile(), from_buffer) + + def newfile(self): + from tempfile import TemporaryFile + + return TemporaryFile("w+b") + + +class BytesIOBasedBuffer(FileBasedBuffer): + def __init__(self, from_buffer=None): + if from_buffer is not None: + FileBasedBuffer.__init__(self, BytesIO(), from_buffer) + else: + # Shortcut. :-) + self.file = BytesIO() + + def newfile(self): + return BytesIO() + + +def _is_seekable(fp): + if hasattr(fp, "seekable"): + return fp.seekable() + return hasattr(fp, "seek") and hasattr(fp, "tell") + + +class ReadOnlyFileBasedBuffer(FileBasedBuffer): + # used as wsgi.file_wrapper + + def __init__(self, file, block_size=32768): + self.file = file + self.block_size = block_size # for __iter__ + + def prepare(self, size=None): + if _is_seekable(self.file): + start_pos = self.file.tell() + self.file.seek(0, 2) + end_pos = self.file.tell() + self.file.seek(start_pos) + fsize = end_pos - start_pos + if size is None: + self.remain = fsize + else: + self.remain = min(fsize, size) + return self.remain + + def get(self, numbytes=-1, skip=False): + # never read more than self.remain (it can be user-specified) + if numbytes == -1 or numbytes > self.remain: + numbytes = self.remain + file = self.file + if not skip: + read_pos = file.tell() + res = file.read(numbytes) + if skip: + self.remain -= len(res) + else: + file.seek(read_pos) + return res + + def __iter__(self): # called by task if self.filelike has no seek/tell + return self + + def next(self): + val = self.file.read(self.block_size) + if not val: + raise StopIteration + return val + + __next__ = next # py3 + + def append(self, s): + raise NotImplementedError + + +class OverflowableBuffer: + """ + This buffer implementation has four stages: + - No data + - Bytes-based buffer + - BytesIO-based buffer + - Temporary file storage + The first two stages are fastest for simple transfers. + """ + + overflowed = False + buf = None + strbuf = b"" # Bytes-based buffer. + + def __init__(self, overflow): + # overflow is the maximum to be stored in a StringIO buffer. + self.overflow = overflow + + def __len__(self): + buf = self.buf + if buf is not None: + # use buf.__len__ rather than len(buf) FBO of not getting + # OverflowError on Python 2 + return buf.__len__() + else: + return self.strbuf.__len__() + + def __nonzero__(self): + # use self.__len__ rather than len(self) FBO of not getting + # OverflowError on Python 2 + return self.__len__() > 0 + + __bool__ = __nonzero__ # py3 + + def _create_buffer(self): + strbuf = self.strbuf + if len(strbuf) >= self.overflow: + self._set_large_buffer() + else: + self._set_small_buffer() + buf = self.buf + if strbuf: + buf.append(self.strbuf) + self.strbuf = b"" + return buf + + def _set_small_buffer(self): + self.buf = BytesIOBasedBuffer(self.buf) + self.overflowed = False + + def _set_large_buffer(self): + self.buf = TempfileBasedBuffer(self.buf) + self.overflowed = True + + def append(self, s): + buf = self.buf + if buf is None: + strbuf = self.strbuf + if len(strbuf) + len(s) < STRBUF_LIMIT: + self.strbuf = strbuf + s + return + buf = self._create_buffer() + buf.append(s) + # use buf.__len__ rather than len(buf) FBO of not getting + # OverflowError on Python 2 + sz = buf.__len__() + if not self.overflowed: + if sz >= self.overflow: + self._set_large_buffer() + + def get(self, numbytes=-1, skip=False): + buf = self.buf + if buf is None: + strbuf = self.strbuf + if not skip: + return strbuf + buf = self._create_buffer() + return buf.get(numbytes, skip) + + def skip(self, numbytes, allow_prune=False): + buf = self.buf + if buf is None: + if allow_prune and numbytes == len(self.strbuf): + # We could slice instead of converting to + # a buffer, but that would eat up memory in + # large transfers. + self.strbuf = b"" + return + buf = self._create_buffer() + buf.skip(numbytes, allow_prune) + + def prune(self): + """ + A potentially expensive operation that removes all data + already retrieved from the buffer. + """ + buf = self.buf + if buf is None: + self.strbuf = b"" + return + buf.prune() + if self.overflowed: + # use buf.__len__ rather than len(buf) FBO of not getting + # OverflowError on Python 2 + sz = buf.__len__() + if sz < self.overflow: + # Revert to a faster buffer. + self._set_small_buffer() + + def getfile(self): + buf = self.buf + if buf is None: + buf = self._create_buffer() + return buf.getfile() + + def close(self): + buf = self.buf + if buf is not None: + buf.close() diff --git a/libs/waitress/channel.py b/libs/waitress/channel.py new file mode 100644 index 000000000..296a16aaf --- /dev/null +++ b/libs/waitress/channel.py @@ -0,0 +1,487 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +import socket +import threading +import time +import traceback + +from waitress.buffers import OverflowableBuffer, ReadOnlyFileBasedBuffer +from waitress.parser import HTTPRequestParser +from waitress.task import ErrorTask, WSGITask +from waitress.utilities import InternalServerError + +from . import wasyncore + + +class ClientDisconnected(Exception): + """ Raised when attempting to write to a closed socket.""" + + +class HTTPChannel(wasyncore.dispatcher): + """ + Setting self.requests = [somerequest] prevents more requests from being + received until the out buffers have been flushed. + + Setting self.requests = [] allows more requests to be received. + """ + + task_class = WSGITask + error_task_class = ErrorTask + parser_class = HTTPRequestParser + + # A request that has not been received yet completely is stored here + request = None + last_activity = 0 # Time of last activity + will_close = False # set to True to close the socket. + close_when_flushed = False # set to True to close the socket when flushed + sent_continue = False # used as a latch after sending 100 continue + total_outbufs_len = 0 # total bytes ready to send + current_outbuf_count = 0 # total bytes written to current outbuf + + # + # ASYNCHRONOUS METHODS (including __init__) + # + + def __init__(self, server, sock, addr, adj, map=None): + self.server = server + self.adj = adj + self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)] + self.creation_time = self.last_activity = time.time() + self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) + + # requests_lock used to push/pop requests and modify the request that is + # currently being created + self.requests_lock = threading.Lock() + # outbuf_lock used to access any outbuf (expected to use an RLock) + self.outbuf_lock = threading.Condition() + + wasyncore.dispatcher.__init__(self, sock, map=map) + + # Don't let wasyncore.dispatcher throttle self.addr on us. + self.addr = addr + self.requests = [] + + def check_client_disconnected(self): + """ + This method is inserted into the environment of any created task so it + may occasionally check if the client has disconnected and interrupt + execution. + """ + return not self.connected + + def writable(self): + # if there's data in the out buffer or we've been instructed to close + # the channel (possibly by our server maintenance logic), run + # handle_write + + return self.total_outbufs_len or self.will_close or self.close_when_flushed + + def handle_write(self): + # Precondition: there's data in the out buffer to be sent, or + # there's a pending will_close request + + if not self.connected: + # we dont want to close the channel twice + + return + + # try to flush any pending output + + if not self.requests: + # 1. There are no running tasks, so we don't need to try to lock + # the outbuf before sending + # 2. The data in the out buffer should be sent as soon as possible + # because it's either data left over from task output + # or a 100 Continue line sent within "received". + flush = self._flush_some + elif self.total_outbufs_len >= self.adj.send_bytes: + # 1. There's a running task, so we need to try to lock + # the outbuf before sending + # 2. Only try to send if the data in the out buffer is larger + # than self.adj_bytes to avoid TCP fragmentation + flush = self._flush_some_if_lockable + else: + # 1. There's not enough data in the out buffer to bother to send + # right now. + flush = None + + if flush: + try: + flush() + except OSError: + if self.adj.log_socket_errors: + self.logger.exception("Socket error") + self.will_close = True + except Exception: # pragma: nocover + self.logger.exception("Unexpected exception when flushing") + self.will_close = True + + if self.close_when_flushed and not self.total_outbufs_len: + self.close_when_flushed = False + self.will_close = True + + if self.will_close: + self.handle_close() + + def readable(self): + # We might want to read more requests. We can only do this if: + # 1. We're not already about to close the connection. + # 2. We're not waiting to flush remaining data before closing the + # connection + # 3. There are not too many tasks already queued + # 4. There's no data in the output buffer that needs to be sent + # before we potentially create a new task. + + return not ( + self.will_close + or self.close_when_flushed + or len(self.requests) > self.adj.channel_request_lookahead + or self.total_outbufs_len + ) + + def handle_read(self): + try: + data = self.recv(self.adj.recv_bytes) + except OSError: + if self.adj.log_socket_errors: + self.logger.exception("Socket error") + self.handle_close() + + return + + if data: + self.last_activity = time.time() + self.received(data) + else: + # Client disconnected. + self.connected = False + + def send_continue(self): + """ + Send a 100-Continue header to the client. This is either called from + receive (if no requests are running and the client expects it) or at + the end of service (if no more requests are queued and a request has + been read partially that expects it). + """ + self.request.expect_continue = False + outbuf_payload = b"HTTP/1.1 100 Continue\r\n\r\n" + num_bytes = len(outbuf_payload) + with self.outbuf_lock: + self.outbufs[-1].append(outbuf_payload) + self.current_outbuf_count += num_bytes + self.total_outbufs_len += num_bytes + self.sent_continue = True + self._flush_some() + self.request.completed = False + + def received(self, data): + """ + Receives input asynchronously and assigns one or more requests to the + channel. + """ + if not data: + return False + + with self.requests_lock: + while data: + if self.request is None: + self.request = self.parser_class(self.adj) + n = self.request.received(data) + + # if there are requests queued, we can not send the continue + # header yet since the responses need to be kept in order + if ( + self.request.expect_continue + and self.request.headers_finished + and not self.requests + and not self.sent_continue + ): + self.send_continue() + + if self.request.completed: + # The request (with the body) is ready to use. + self.sent_continue = False + + if not self.request.empty: + self.requests.append(self.request) + if len(self.requests) == 1: + # self.requests was empty before so the main thread + # is in charge of starting the task. Otherwise, + # service() will add a new task after each request + # has been processed + self.server.add_task(self) + self.request = None + + if n >= len(data): + break + data = data[n:] + + return True + + def _flush_some_if_lockable(self): + # Since our task may be appending to the outbuf, we try to acquire + # the lock, but we don't block if we can't. + + if self.outbuf_lock.acquire(False): + try: + self._flush_some() + + if self.total_outbufs_len < self.adj.outbuf_high_watermark: + self.outbuf_lock.notify() + finally: + self.outbuf_lock.release() + + def _flush_some(self): + # Send as much data as possible to our client + + sent = 0 + dobreak = False + + while True: + outbuf = self.outbufs[0] + # use outbuf.__len__ rather than len(outbuf) FBO of not getting + # OverflowError on 32-bit Python + outbuflen = outbuf.__len__() + + while outbuflen > 0: + chunk = outbuf.get(self.sendbuf_len) + num_sent = self.send(chunk) + + if num_sent: + outbuf.skip(num_sent, True) + outbuflen -= num_sent + sent += num_sent + self.total_outbufs_len -= num_sent + else: + # failed to write anything, break out entirely + dobreak = True + + break + else: + # self.outbufs[-1] must always be a writable outbuf + + if len(self.outbufs) > 1: + toclose = self.outbufs.pop(0) + try: + toclose.close() + except Exception: + self.logger.exception("Unexpected error when closing an outbuf") + else: + # caught up, done flushing for now + dobreak = True + + if dobreak: + break + + if sent: + self.last_activity = time.time() + + return True + + return False + + def handle_close(self): + with self.outbuf_lock: + for outbuf in self.outbufs: + try: + outbuf.close() + except Exception: + self.logger.exception( + "Unknown exception while trying to close outbuf" + ) + self.total_outbufs_len = 0 + self.connected = False + self.outbuf_lock.notify() + wasyncore.dispatcher.close(self) + + def add_channel(self, map=None): + """See wasyncore.dispatcher + + This hook keeps track of opened channels. + """ + wasyncore.dispatcher.add_channel(self, map) + self.server.active_channels[self._fileno] = self + + def del_channel(self, map=None): + """See wasyncore.dispatcher + + This hook keeps track of closed channels. + """ + fd = self._fileno # next line sets this to None + wasyncore.dispatcher.del_channel(self, map) + ac = self.server.active_channels + + if fd in ac: + del ac[fd] + + # + # SYNCHRONOUS METHODS + # + + def write_soon(self, data): + if not self.connected: + # if the socket is closed then interrupt the task so that it + # can cleanup possibly before the app_iter is exhausted + raise ClientDisconnected + + if data: + # the async mainloop might be popping data off outbuf; we can + # block here waiting for it because we're in a task thread + with self.outbuf_lock: + self._flush_outbufs_below_high_watermark() + + if not self.connected: + raise ClientDisconnected + num_bytes = len(data) + + if data.__class__ is ReadOnlyFileBasedBuffer: + # they used wsgi.file_wrapper + self.outbufs.append(data) + nextbuf = OverflowableBuffer(self.adj.outbuf_overflow) + self.outbufs.append(nextbuf) + self.current_outbuf_count = 0 + else: + if self.current_outbuf_count >= self.adj.outbuf_high_watermark: + # rotate to a new buffer if the current buffer has hit + # the watermark to avoid it growing unbounded + nextbuf = OverflowableBuffer(self.adj.outbuf_overflow) + self.outbufs.append(nextbuf) + self.current_outbuf_count = 0 + self.outbufs[-1].append(data) + self.current_outbuf_count += num_bytes + self.total_outbufs_len += num_bytes + + if self.total_outbufs_len >= self.adj.send_bytes: + self.server.pull_trigger() + + return num_bytes + + return 0 + + def _flush_outbufs_below_high_watermark(self): + # check first to avoid locking if possible + + if self.total_outbufs_len > self.adj.outbuf_high_watermark: + with self.outbuf_lock: + while ( + self.connected + and self.total_outbufs_len > self.adj.outbuf_high_watermark + ): + self.server.pull_trigger() + self.outbuf_lock.wait() + + def service(self): + """Execute one request. If there are more, we add another task to the + server at the end.""" + + request = self.requests[0] + + if request.error: + task = self.error_task_class(self, request) + else: + task = self.task_class(self, request) + + try: + if self.connected: + task.service() + else: + task.close_on_finish = True + except ClientDisconnected: + self.logger.info("Client disconnected while serving %s" % task.request.path) + task.close_on_finish = True + except Exception: + self.logger.exception("Exception while serving %s" % task.request.path) + + if not task.wrote_header: + if self.adj.expose_tracebacks: + body = traceback.format_exc() + else: + body = "The server encountered an unexpected internal server error" + req_version = request.version + req_headers = request.headers + err_request = self.parser_class(self.adj) + err_request.error = InternalServerError(body) + # copy some original request attributes to fulfill + # HTTP 1.1 requirements + err_request.version = req_version + try: + err_request.headers["CONNECTION"] = req_headers["CONNECTION"] + except KeyError: + pass + task = self.error_task_class(self, err_request) + try: + task.service() # must not fail + except ClientDisconnected: + task.close_on_finish = True + else: + task.close_on_finish = True + + if task.close_on_finish: + with self.requests_lock: + self.close_when_flushed = True + + for request in self.requests: + request.close() + self.requests = [] + else: + # before processing a new request, ensure there is not too + # much data in the outbufs waiting to be flushed + # NB: currently readable() returns False while we are + # flushing data so we know no new requests will come in + # that we need to account for, otherwise it'd be better + # to do this check at the start of the request instead of + # at the end to account for consecutive service() calls + + if len(self.requests) > 1: + self._flush_outbufs_below_high_watermark() + + # this is a little hacky but basically it's forcing the + # next request to create a new outbuf to avoid sharing + # outbufs across requests which can cause outbufs to + # not be deallocated regularly when a connection is open + # for a long time + + if self.current_outbuf_count > 0: + self.current_outbuf_count = self.adj.outbuf_high_watermark + + request.close() + + # Add new task to process the next request + with self.requests_lock: + self.requests.pop(0) + if self.connected and self.requests: + self.server.add_task(self) + elif ( + self.connected + and self.request is not None + and self.request.expect_continue + and self.request.headers_finished + and not self.sent_continue + ): + # A request waits for a signal to continue, but we could + # not send it until now because requests were being + # processed and the output needs to be kept in order + self.send_continue() + + if self.connected: + self.server.pull_trigger() + + self.last_activity = time.time() + + def cancel(self): + """ Cancels all pending / active requests """ + self.will_close = True + self.connected = False + self.last_activity = time.time() + self.requests = [] diff --git a/libs/waitress/compat.py b/libs/waitress/compat.py new file mode 100644 index 000000000..67543b9ca --- /dev/null +++ b/libs/waitress/compat.py @@ -0,0 +1,29 @@ +import platform + +# Fix for issue reported in https://github.com/Pylons/waitress/issues/138, +# Python on Windows may not define IPPROTO_IPV6 in socket. +import socket +import sys +import warnings + +# True if we are running on Windows +WIN = platform.system() == "Windows" + +MAXINT = sys.maxsize +HAS_IPV6 = socket.has_ipv6 + +if hasattr(socket, "IPPROTO_IPV6") and hasattr(socket, "IPV6_V6ONLY"): + IPPROTO_IPV6 = socket.IPPROTO_IPV6 + IPV6_V6ONLY = socket.IPV6_V6ONLY +else: # pragma: no cover + if WIN: + IPPROTO_IPV6 = 41 + IPV6_V6ONLY = 27 + else: + warnings.warn( + "OS does not support required IPv6 socket flags. This is requirement " + "for Waitress. Please open an issue at https://github.com/Pylons/waitress. " + "IPv6 support has been disabled.", + RuntimeWarning, + ) + HAS_IPV6 = False diff --git a/libs/waitress/parser.py b/libs/waitress/parser.py new file mode 100644 index 000000000..3b99921b0 --- /dev/null +++ b/libs/waitress/parser.py @@ -0,0 +1,439 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""HTTP Request Parser + +This server uses asyncore to accept connections and do initial +processing but threads to do work. +""" +from io import BytesIO +import re +from urllib import parse +from urllib.parse import unquote_to_bytes + +from waitress.buffers import OverflowableBuffer +from waitress.receiver import ChunkedReceiver, FixedStreamReceiver +from waitress.utilities import ( + BadRequest, + RequestEntityTooLarge, + RequestHeaderFieldsTooLarge, + ServerNotImplemented, + find_double_newline, +) + +from .rfc7230 import HEADER_FIELD + + +def unquote_bytes_to_wsgi(bytestring): + return unquote_to_bytes(bytestring).decode("latin-1") + + +class ParsingError(Exception): + pass + + +class TransferEncodingNotImplemented(Exception): + pass + + +class HTTPRequestParser: + """A structure that collects the HTTP request. + + Once the stream is completed, the instance is passed to + a server task constructor. + """ + + completed = False # Set once request is completed. + empty = False # Set if no request was made. + expect_continue = False # client sent "Expect: 100-continue" header + headers_finished = False # True when headers have been read + header_plus = b"" + chunked = False + content_length = 0 + header_bytes_received = 0 + body_bytes_received = 0 + body_rcv = None + version = "1.0" + error = None + connection_close = False + + # Other attributes: first_line, header, headers, command, uri, version, + # path, query, fragment + + def __init__(self, adj): + """ + adj is an Adjustments object. + """ + # headers is a mapping containing keys translated to uppercase + # with dashes turned into underscores. + self.headers = {} + self.adj = adj + + def received(self, data): + """ + Receives the HTTP stream for one request. Returns the number of + bytes consumed. Sets the completed flag once both the header and the + body have been received. + """ + + if self.completed: + return 0 # Can't consume any more. + + datalen = len(data) + br = self.body_rcv + + if br is None: + # In header. + max_header = self.adj.max_request_header_size + + s = self.header_plus + data + index = find_double_newline(s) + consumed = 0 + + if index >= 0: + # If the headers have ended, and we also have part of the body + # message in data we still want to validate we aren't going + # over our limit for received headers. + self.header_bytes_received += index + consumed = datalen - (len(s) - index) + else: + self.header_bytes_received += datalen + consumed = datalen + + # If the first line + headers is over the max length, we return a + # RequestHeaderFieldsTooLarge error rather than continuing to + # attempt to parse the headers. + + if self.header_bytes_received >= max_header: + self.parse_header(b"GET / HTTP/1.0\r\n") + self.error = RequestHeaderFieldsTooLarge( + "exceeds max_header of %s" % max_header + ) + self.completed = True + + return consumed + + if index >= 0: + # Header finished. + header_plus = s[:index] + + # Remove preceeding blank lines. This is suggested by + # https://tools.ietf.org/html/rfc7230#section-3.5 to support + # clients sending an extra CR LF after another request when + # using HTTP pipelining + header_plus = header_plus.lstrip() + + if not header_plus: + self.empty = True + self.completed = True + else: + try: + self.parse_header(header_plus) + except ParsingError as e: + self.error = BadRequest(e.args[0]) + self.completed = True + except TransferEncodingNotImplemented as e: + self.error = ServerNotImplemented(e.args[0]) + self.completed = True + else: + if self.body_rcv is None: + # no content-length header and not a t-e: chunked + # request + self.completed = True + + if self.content_length > 0: + max_body = self.adj.max_request_body_size + # we won't accept this request if the content-length + # is too large + + if self.content_length >= max_body: + self.error = RequestEntityTooLarge( + "exceeds max_body of %s" % max_body + ) + self.completed = True + self.headers_finished = True + + return consumed + + # Header not finished yet. + self.header_plus = s + + return datalen + else: + # In body. + consumed = br.received(data) + self.body_bytes_received += consumed + max_body = self.adj.max_request_body_size + + if self.body_bytes_received >= max_body: + # this will only be raised during t-e: chunked requests + self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) + self.completed = True + elif br.error: + # garbage in chunked encoding input probably + self.error = br.error + self.completed = True + elif br.completed: + # The request (with the body) is ready to use. + self.completed = True + + if self.chunked: + # We've converted the chunked transfer encoding request + # body into a normal request body, so we know its content + # length; set the header here. We already popped the + # TRANSFER_ENCODING header in parse_header, so this will + # appear to the client to be an entirely non-chunked HTTP + # request with a valid content-length. + self.headers["CONTENT_LENGTH"] = str(br.__len__()) + + return consumed + + def parse_header(self, header_plus): + """ + Parses the header_plus block of text (the headers plus the + first line of the request). + """ + index = header_plus.find(b"\r\n") + + if index >= 0: + first_line = header_plus[:index].rstrip() + header = header_plus[index + 2 :] + else: + raise ParsingError("HTTP message header invalid") + + if b"\r" in first_line or b"\n" in first_line: + raise ParsingError("Bare CR or LF found in HTTP message") + + self.first_line = first_line # for testing + + lines = get_header_lines(header) + + headers = self.headers + + for line in lines: + header = HEADER_FIELD.match(line) + + if not header: + raise ParsingError("Invalid header") + + key, value = header.group("name", "value") + + if b"_" in key: + # TODO(xistence): Should we drop this request instead? + + continue + + # Only strip off whitespace that is considered valid whitespace by + # RFC7230, don't strip the rest + value = value.strip(b" \t") + key1 = key.upper().replace(b"-", b"_").decode("latin-1") + # If a header already exists, we append subsequent values + # separated by a comma. Applications already need to handle + # the comma separated values, as HTTP front ends might do + # the concatenation for you (behavior specified in RFC2616). + try: + headers[key1] += (b", " + value).decode("latin-1") + except KeyError: + headers[key1] = value.decode("latin-1") + + # command, uri, version will be bytes + command, uri, version = crack_first_line(first_line) + version = version.decode("latin-1") + command = command.decode("latin-1") + self.command = command + self.version = version + ( + self.proxy_scheme, + self.proxy_netloc, + self.path, + self.query, + self.fragment, + ) = split_uri(uri) + self.url_scheme = self.adj.url_scheme + connection = headers.get("CONNECTION", "") + + if version == "1.0": + if connection.lower() != "keep-alive": + self.connection_close = True + + if version == "1.1": + # since the server buffers data from chunked transfers and clients + # never need to deal with chunked requests, downstream clients + # should not see the HTTP_TRANSFER_ENCODING header; we pop it + # here + te = headers.pop("TRANSFER_ENCODING", "") + + # NB: We can not just call bare strip() here because it will also + # remove other non-printable characters that we explicitly do not + # want removed so that if someone attempts to smuggle a request + # with these characters we don't fall prey to it. + # + # For example \x85 is stripped by default, but it is not considered + # valid whitespace to be stripped by RFC7230. + encodings = [ + encoding.strip(" \t").lower() for encoding in te.split(",") if encoding + ] + + for encoding in encodings: + # Out of the transfer-codings listed in + # https://tools.ietf.org/html/rfc7230#section-4 we only support + # chunked at this time. + + # Note: the identity transfer-coding was removed in RFC7230: + # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus + # not supported + + if encoding not in {"chunked"}: + raise TransferEncodingNotImplemented( + "Transfer-Encoding requested is not supported." + ) + + if encodings and encodings[-1] == "chunked": + self.chunked = True + buf = OverflowableBuffer(self.adj.inbuf_overflow) + self.body_rcv = ChunkedReceiver(buf) + elif encodings: # pragma: nocover + raise TransferEncodingNotImplemented( + "Transfer-Encoding requested is not supported." + ) + + expect = headers.get("EXPECT", "").lower() + self.expect_continue = expect == "100-continue" + + if connection.lower() == "close": + self.connection_close = True + + if not self.chunked: + try: + cl = int(headers.get("CONTENT_LENGTH", 0)) + except ValueError: + raise ParsingError("Content-Length is invalid") + + self.content_length = cl + + if cl > 0: + buf = OverflowableBuffer(self.adj.inbuf_overflow) + self.body_rcv = FixedStreamReceiver(cl, buf) + + def get_body_stream(self): + body_rcv = self.body_rcv + + if body_rcv is not None: + return body_rcv.getfile() + else: + return BytesIO() + + def close(self): + body_rcv = self.body_rcv + + if body_rcv is not None: + body_rcv.getbuf().close() + + +def split_uri(uri): + # urlsplit handles byte input by returning bytes on py3, so + # scheme, netloc, path, query, and fragment are bytes + + scheme = netloc = path = query = fragment = b"" + + # urlsplit below will treat this as a scheme-less netloc, thereby losing + # the original intent of the request. Here we shamelessly stole 4 lines of + # code from the CPython stdlib to parse out the fragment and query but + # leave the path alone. See + # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 + # and https://github.com/Pylons/waitress/issues/260 + + if uri[:2] == b"//": + path = uri + + if b"#" in path: + path, fragment = path.split(b"#", 1) + + if b"?" in path: + path, query = path.split(b"?", 1) + else: + try: + scheme, netloc, path, query, fragment = parse.urlsplit(uri) + except UnicodeError: + raise ParsingError("Bad URI") + + return ( + scheme.decode("latin-1"), + netloc.decode("latin-1"), + unquote_bytes_to_wsgi(path), + query.decode("latin-1"), + fragment.decode("latin-1"), + ) + + +def get_header_lines(header): + """ + Splits the header into lines, putting multi-line headers together. + """ + r = [] + lines = header.split(b"\r\n") + + for line in lines: + if not line: + continue + + if b"\r" in line or b"\n" in line: + raise ParsingError( + 'Bare CR or LF found in header line "%s"' % str(line, "latin-1") + ) + + if line.startswith((b" ", b"\t")): + if not r: + # https://corte.si/posts/code/pathod/pythonservers/index.html + raise ParsingError('Malformed header line "%s"' % str(line, "latin-1")) + r[-1] += line + else: + r.append(line) + + return r + + +first_line_re = re.compile( + b"([^ ]+) " + b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" + b"(( HTTP/([0-9.]+))$|$)" +) + + +def crack_first_line(line): + m = first_line_re.match(line) + + if m is not None and m.end() == len(line): + if m.group(3): + version = m.group(5) + else: + version = b"" + method = m.group(1) + + # the request methods that are currently defined are all uppercase: + # https://www.iana.org/assignments/http-methods/http-methods.xhtml and + # the request method is case sensitive according to + # https://tools.ietf.org/html/rfc7231#section-4.1 + + # By disallowing anything but uppercase methods we save poor + # unsuspecting souls from sending lowercase HTTP methods to waitress + # and having the request complete, while servers like nginx drop the + # request onto the floor. + + if method != method.upper(): + raise ParsingError('Malformed HTTP method "%s"' % str(method, "latin-1")) + uri = m.group(2) + + return method, uri, version + else: + return b"", b"", b"" diff --git a/libs/waitress/proxy_headers.py b/libs/waitress/proxy_headers.py new file mode 100644 index 000000000..5d6164670 --- /dev/null +++ b/libs/waitress/proxy_headers.py @@ -0,0 +1,330 @@ +from collections import namedtuple + +from .utilities import BadRequest, logger, undquote + +PROXY_HEADERS = frozenset( + { + "X_FORWARDED_FOR", + "X_FORWARDED_HOST", + "X_FORWARDED_PROTO", + "X_FORWARDED_PORT", + "X_FORWARDED_BY", + "FORWARDED", + } +) + +Forwarded = namedtuple("Forwarded", ["by", "for_", "host", "proto"]) + + +class MalformedProxyHeader(Exception): + def __init__(self, header, reason, value): + self.header = header + self.reason = reason + self.value = value + super().__init__(header, reason, value) + + +def proxy_headers_middleware( + app, + trusted_proxy=None, + trusted_proxy_count=1, + trusted_proxy_headers=None, + clear_untrusted=True, + log_untrusted=False, + logger=logger, +): + def translate_proxy_headers(environ, start_response): + untrusted_headers = PROXY_HEADERS + remote_peer = environ["REMOTE_ADDR"] + if trusted_proxy == "*" or remote_peer == trusted_proxy: + try: + untrusted_headers = parse_proxy_headers( + environ, + trusted_proxy_count=trusted_proxy_count, + trusted_proxy_headers=trusted_proxy_headers, + logger=logger, + ) + except MalformedProxyHeader as ex: + logger.warning( + 'Malformed proxy header "%s" from "%s": %s value: %s', + ex.header, + remote_peer, + ex.reason, + ex.value, + ) + error = BadRequest('Header "{}" malformed.'.format(ex.header)) + return error.wsgi_response(environ, start_response) + + # Clear out the untrusted proxy headers + if clear_untrusted: + clear_untrusted_headers( + environ, untrusted_headers, log_warning=log_untrusted, logger=logger + ) + + return app(environ, start_response) + + return translate_proxy_headers + + +def parse_proxy_headers( + environ, trusted_proxy_count, trusted_proxy_headers, logger=logger +): + if trusted_proxy_headers is None: + trusted_proxy_headers = set() + + forwarded_for = [] + forwarded_host = forwarded_proto = forwarded_port = forwarded = "" + client_addr = None + untrusted_headers = set(PROXY_HEADERS) + + def raise_for_multiple_values(): + raise ValueError("Unspecified behavior for multiple values found in header") + + if "x-forwarded-for" in trusted_proxy_headers and "HTTP_X_FORWARDED_FOR" in environ: + try: + forwarded_for = [] + + for forward_hop in environ["HTTP_X_FORWARDED_FOR"].split(","): + forward_hop = forward_hop.strip() + forward_hop = undquote(forward_hop) + + # Make sure that all IPv6 addresses are surrounded by brackets, + # this is assuming that the IPv6 representation here does not + # include a port number. + + if "." not in forward_hop and ( + ":" in forward_hop and forward_hop[-1] != "]" + ): + forwarded_for.append("[{}]".format(forward_hop)) + else: + forwarded_for.append(forward_hop) + + forwarded_for = forwarded_for[-trusted_proxy_count:] + client_addr = forwarded_for[0] + + untrusted_headers.remove("X_FORWARDED_FOR") + except Exception as ex: + raise MalformedProxyHeader( + "X-Forwarded-For", str(ex), environ["HTTP_X_FORWARDED_FOR"] + ) + + if ( + "x-forwarded-host" in trusted_proxy_headers + and "HTTP_X_FORWARDED_HOST" in environ + ): + try: + forwarded_host_multiple = [] + + for forward_host in environ["HTTP_X_FORWARDED_HOST"].split(","): + forward_host = forward_host.strip() + forward_host = undquote(forward_host) + forwarded_host_multiple.append(forward_host) + + forwarded_host_multiple = forwarded_host_multiple[-trusted_proxy_count:] + forwarded_host = forwarded_host_multiple[0] + + untrusted_headers.remove("X_FORWARDED_HOST") + except Exception as ex: + raise MalformedProxyHeader( + "X-Forwarded-Host", str(ex), environ["HTTP_X_FORWARDED_HOST"] + ) + + if "x-forwarded-proto" in trusted_proxy_headers: + try: + forwarded_proto = undquote(environ.get("HTTP_X_FORWARDED_PROTO", "")) + if "," in forwarded_proto: + raise_for_multiple_values() + untrusted_headers.remove("X_FORWARDED_PROTO") + except Exception as ex: + raise MalformedProxyHeader( + "X-Forwarded-Proto", str(ex), environ["HTTP_X_FORWARDED_PROTO"] + ) + + if "x-forwarded-port" in trusted_proxy_headers: + try: + forwarded_port = undquote(environ.get("HTTP_X_FORWARDED_PORT", "")) + if "," in forwarded_port: + raise_for_multiple_values() + untrusted_headers.remove("X_FORWARDED_PORT") + except Exception as ex: + raise MalformedProxyHeader( + "X-Forwarded-Port", str(ex), environ["HTTP_X_FORWARDED_PORT"] + ) + + if "x-forwarded-by" in trusted_proxy_headers: + # Waitress itself does not use X-Forwarded-By, but we can not + # remove it so it can get set in the environ + untrusted_headers.remove("X_FORWARDED_BY") + + if "forwarded" in trusted_proxy_headers: + forwarded = environ.get("HTTP_FORWARDED", None) + untrusted_headers = PROXY_HEADERS - {"FORWARDED"} + + # If the Forwarded header exists, it gets priority + if forwarded: + proxies = [] + try: + for forwarded_element in forwarded.split(","): + # Remove whitespace that may have been introduced when + # appending a new entry + forwarded_element = forwarded_element.strip() + + forwarded_for = forwarded_host = forwarded_proto = "" + forwarded_port = forwarded_by = "" + + for pair in forwarded_element.split(";"): + pair = pair.lower() + + if not pair: + continue + + token, equals, value = pair.partition("=") + + if equals != "=": + raise ValueError('Invalid forwarded-pair missing "="') + + if token.strip() != token: + raise ValueError("Token may not be surrounded by whitespace") + + if value.strip() != value: + raise ValueError("Value may not be surrounded by whitespace") + + if token == "by": + forwarded_by = undquote(value) + + elif token == "for": + forwarded_for = undquote(value) + + elif token == "host": + forwarded_host = undquote(value) + + elif token == "proto": + forwarded_proto = undquote(value) + + else: + logger.warning("Unknown Forwarded token: %s" % token) + + proxies.append( + Forwarded( + forwarded_by, forwarded_for, forwarded_host, forwarded_proto + ) + ) + except Exception as ex: + raise MalformedProxyHeader("Forwarded", str(ex), environ["HTTP_FORWARDED"]) + + proxies = proxies[-trusted_proxy_count:] + + # Iterate backwards and fill in some values, the oldest entry that + # contains the information we expect is the one we use. We expect + # that intermediate proxies may re-write the host header or proto, + # but the oldest entry is the one that contains the information the + # client expects when generating URL's + # + # Forwarded: for="[2001:db8::1]";host="example.com:8443";proto="https" + # Forwarded: for=192.0.2.1;host="example.internal:8080" + # + # (After HTTPS header folding) should mean that we use as values: + # + # Host: example.com + # Protocol: https + # Port: 8443 + + for proxy in proxies[::-1]: + client_addr = proxy.for_ or client_addr + forwarded_host = proxy.host or forwarded_host + forwarded_proto = proxy.proto or forwarded_proto + + if forwarded_proto: + forwarded_proto = forwarded_proto.lower() + + if forwarded_proto not in {"http", "https"}: + raise MalformedProxyHeader( + "Forwarded Proto=" if forwarded else "X-Forwarded-Proto", + "unsupported proto value", + forwarded_proto, + ) + + # Set the URL scheme to the proxy provided proto + environ["wsgi.url_scheme"] = forwarded_proto + + if not forwarded_port: + if forwarded_proto == "http": + forwarded_port = "80" + + if forwarded_proto == "https": + forwarded_port = "443" + + if forwarded_host: + if ":" in forwarded_host and forwarded_host[-1] != "]": + host, port = forwarded_host.rsplit(":", 1) + host, port = host.strip(), str(port) + + # We trust the port in the Forwarded Host/X-Forwarded-Host over + # X-Forwarded-Port, or whatever we got from Forwarded + # Proto/X-Forwarded-Proto. + + if forwarded_port != port: + forwarded_port = port + + # We trust the proxy server's forwarded Host + environ["SERVER_NAME"] = host + environ["HTTP_HOST"] = forwarded_host + else: + # We trust the proxy server's forwarded Host + environ["SERVER_NAME"] = forwarded_host + environ["HTTP_HOST"] = forwarded_host + + if forwarded_port: + if forwarded_port not in {"443", "80"}: + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + elif forwarded_port == "80" and environ["wsgi.url_scheme"] != "http": + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + elif forwarded_port == "443" and environ["wsgi.url_scheme"] != "https": + environ["HTTP_HOST"] = "{}:{}".format( + forwarded_host, forwarded_port + ) + + if forwarded_port: + environ["SERVER_PORT"] = str(forwarded_port) + + if client_addr: + if ":" in client_addr and client_addr[-1] != "]": + addr, port = client_addr.rsplit(":", 1) + environ["REMOTE_ADDR"] = strip_brackets(addr.strip()) + environ["REMOTE_PORT"] = port.strip() + else: + environ["REMOTE_ADDR"] = strip_brackets(client_addr.strip()) + environ["REMOTE_HOST"] = environ["REMOTE_ADDR"] + + return untrusted_headers + + +def strip_brackets(addr): + if addr[0] == "[" and addr[-1] == "]": + return addr[1:-1] + return addr + + +def clear_untrusted_headers( + environ, untrusted_headers, log_warning=False, logger=logger +): + untrusted_headers_removed = [ + header + for header in untrusted_headers + if environ.pop("HTTP_" + header, False) is not False + ] + + if log_warning and untrusted_headers_removed: + untrusted_headers_removed = [ + "-".join(x.capitalize() for x in header.split("_")) + for header in untrusted_headers_removed + ] + logger.warning( + "Removed untrusted headers (%s). Waitress recommends these be " + "removed upstream.", + ", ".join(untrusted_headers_removed), + ) diff --git a/libs/waitress/receiver.py b/libs/waitress/receiver.py new file mode 100644 index 000000000..878528087 --- /dev/null +++ b/libs/waitress/receiver.py @@ -0,0 +1,186 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""Data Chunk Receiver +""" + +from waitress.utilities import BadRequest, find_double_newline + + +class FixedStreamReceiver: + + # See IStreamConsumer + completed = False + error = None + + def __init__(self, cl, buf): + self.remain = cl + self.buf = buf + + def __len__(self): + return self.buf.__len__() + + def received(self, data): + "See IStreamConsumer" + rm = self.remain + + if rm < 1: + self.completed = True # Avoid any chance of spinning + + return 0 + datalen = len(data) + + if rm <= datalen: + self.buf.append(data[:rm]) + self.remain = 0 + self.completed = True + + return rm + else: + self.buf.append(data) + self.remain -= datalen + + return datalen + + def getfile(self): + return self.buf.getfile() + + def getbuf(self): + return self.buf + + +class ChunkedReceiver: + + chunk_remainder = 0 + validate_chunk_end = False + control_line = b"" + chunk_end = b"" + all_chunks_received = False + trailer = b"" + completed = False + error = None + + # max_control_line = 1024 + # max_trailer = 65536 + + def __init__(self, buf): + self.buf = buf + + def __len__(self): + return self.buf.__len__() + + def received(self, s): + # Returns the number of bytes consumed. + + if self.completed: + return 0 + orig_size = len(s) + + while s: + rm = self.chunk_remainder + + if rm > 0: + # Receive the remainder of a chunk. + to_write = s[:rm] + self.buf.append(to_write) + written = len(to_write) + s = s[written:] + + self.chunk_remainder -= written + + if self.chunk_remainder == 0: + self.validate_chunk_end = True + elif self.validate_chunk_end: + s = self.chunk_end + s + + pos = s.find(b"\r\n") + + if pos < 0 and len(s) < 2: + self.chunk_end = s + s = b"" + else: + self.chunk_end = b"" + if pos == 0: + # Chop off the terminating CR LF from the chunk + s = s[2:] + else: + self.error = BadRequest("Chunk not properly terminated") + self.all_chunks_received = True + + # Always exit this loop + self.validate_chunk_end = False + elif not self.all_chunks_received: + # Receive a control line. + s = self.control_line + s + pos = s.find(b"\r\n") + + if pos < 0: + # Control line not finished. + self.control_line = s + s = b"" + else: + # Control line finished. + line = s[:pos] + s = s[pos + 2 :] + self.control_line = b"" + line = line.strip() + + if line: + # Begin a new chunk. + semi = line.find(b";") + + if semi >= 0: + # discard extension info. + line = line[:semi] + try: + sz = int(line.strip(), 16) # hexadecimal + except ValueError: # garbage in input + self.error = BadRequest("garbage in chunked encoding input") + sz = 0 + + if sz > 0: + # Start a new chunk. + self.chunk_remainder = sz + else: + # Finished chunks. + self.all_chunks_received = True + # else expect a control line. + else: + # Receive the trailer. + trailer = self.trailer + s + + if trailer.startswith(b"\r\n"): + # No trailer. + self.completed = True + + return orig_size - (len(trailer) - 2) + pos = find_double_newline(trailer) + + if pos < 0: + # Trailer not finished. + self.trailer = trailer + s = b"" + else: + # Finished the trailer. + self.completed = True + self.trailer = trailer[:pos] + + return orig_size - (len(trailer) - pos) + + return orig_size + + def getfile(self): + return self.buf.getfile() + + def getbuf(self): + return self.buf diff --git a/libs/waitress/rfc7230.py b/libs/waitress/rfc7230.py new file mode 100644 index 000000000..9b25fbd9a --- /dev/null +++ b/libs/waitress/rfc7230.py @@ -0,0 +1,50 @@ +""" +This contains a bunch of RFC7230 definitions and regular expressions that are +needed to properly parse HTTP messages. +""" + +import re + +WS = "[ \t]" +OWS = WS + "{0,}?" +RWS = WS + "{1,}?" +BWS = OWS + +# RFC 7230 Section 3.2.6 "Field Value Components": +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# obs-text = %x80-FF +TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]" +OBS_TEXT = r"\x80-\xff" + +TOKEN = TCHAR + "{1,}" + +# RFC 5234 Appendix B.1 "Core Rules": +# VCHAR = %x21-7E +# ; visible (printing) characters +VCHAR = r"\x21-\x7e" + +# header-field = field-name ":" OWS field-value OWS +# field-name = token +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text + +# Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# changes field-content to: +# +# field-content = field-vchar [ 1*( SP / HTAB / field-vchar ) +# field-vchar ] + +FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]" +# Field content is more greedy than the ABNF, in that it will match the whole value +FIELD_CONTENT = FIELD_VCHAR + "+(?:[ \t]+" + FIELD_VCHAR + "+)*" +# Which allows the field value here to just see if there is even a value in the first place +FIELD_VALUE = "(?:" + FIELD_CONTENT + ")?" + +HEADER_FIELD = re.compile( + ( + "^(?P" + TOKEN + "):" + OWS + "(?P" + FIELD_VALUE + ")" + OWS + "$" + ).encode("latin-1") +) diff --git a/libs/waitress/runner.py b/libs/waitress/runner.py new file mode 100644 index 000000000..949fdb9e9 --- /dev/null +++ b/libs/waitress/runner.py @@ -0,0 +1,299 @@ +############################################################################## +# +# Copyright (c) 2013 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""Command line runner. +""" + + +import getopt +import logging +import os +import os.path +import re +import sys + +from waitress import serve +from waitress.adjustments import Adjustments +from waitress.utilities import logger + +HELP = """\ +Usage: + + {0} [OPTS] MODULE:OBJECT + +Standard options: + + --help + Show this information. + + --call + Call the given object to get the WSGI application. + + --host=ADDR + Hostname or IP address on which to listen, default is '0.0.0.0', + which means "all IP addresses on this host". + + Note: May not be used together with --listen + + --port=PORT + TCP port on which to listen, default is '8080' + + Note: May not be used together with --listen + + --listen=ip:port + Tell waitress to listen on an ip port combination. + + Example: + + --listen=127.0.0.1:8080 + --listen=[::1]:8080 + --listen=*:8080 + + This option may be used multiple times to listen on multiple sockets. + A wildcard for the hostname is also supported and will bind to both + IPv4/IPv6 depending on whether they are enabled or disabled. + + --[no-]ipv4 + Toggle on/off IPv4 support. + + Example: + + --no-ipv4 + + This will disable IPv4 socket support. This affects wildcard matching + when generating the list of sockets. + + --[no-]ipv6 + Toggle on/off IPv6 support. + + Example: + + --no-ipv6 + + This will turn on IPv6 socket support. This affects wildcard matching + when generating a list of sockets. + + --unix-socket=PATH + Path of Unix socket. If a socket path is specified, a Unix domain + socket is made instead of the usual inet domain socket. + + Not available on Windows. + + --unix-socket-perms=PERMS + Octal permissions to use for the Unix domain socket, default is + '600'. + + --url-scheme=STR + Default wsgi.url_scheme value, default is 'http'. + + --url-prefix=STR + The ``SCRIPT_NAME`` WSGI environment value. Setting this to anything + except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be + the value passed minus any trailing slashes you add, and it will cause + the ``PATH_INFO`` of any request which is prefixed with this value to + be stripped of the prefix. Default is the empty string. + + --ident=STR + Server identity used in the 'Server' header in responses. Default + is 'waitress'. + +Tuning options: + + --threads=INT + Number of threads used to process application logic, default is 4. + + --backlog=INT + Connection backlog for the server. Default is 1024. + + --recv-bytes=INT + Number of bytes to request when calling socket.recv(). Default is + 8192. + + --send-bytes=INT + Number of bytes to send to socket.send(). Default is 18000. + Multiples of 9000 should avoid partly-filled TCP packets. + + --outbuf-overflow=INT + A temporary file should be created if the pending output is larger + than this. Default is 1048576 (1MB). + + --outbuf-high-watermark=INT + The app_iter will pause when pending output is larger than this value + and will resume once enough data is written to the socket to fall below + this threshold. Default is 16777216 (16MB). + + --inbuf-overflow=INT + A temporary file should be created if the pending input is larger + than this. Default is 524288 (512KB). + + --connection-limit=INT + Stop creating new channels if too many are already active. + Default is 100. + + --cleanup-interval=INT + Minimum seconds between cleaning up inactive channels. Default + is 30. See '--channel-timeout'. + + --channel-timeout=INT + Maximum number of seconds to leave inactive connections open. + Default is 120. 'Inactive' is defined as 'has received no data + from the client and has sent no data to the client'. + + --[no-]log-socket-errors + Toggle whether premature client disconnect tracebacks ought to be + logged. On by default. + + --max-request-header-size=INT + Maximum size of all request headers combined. Default is 262144 + (256KB). + + --max-request-body-size=INT + Maximum size of request body. Default is 1073741824 (1GB). + + --[no-]expose-tracebacks + Toggle whether to expose tracebacks of unhandled exceptions to the + client. Off by default. + + --asyncore-loop-timeout=INT + The timeout value in seconds passed to asyncore.loop(). Default is 1. + + --asyncore-use-poll + The use_poll argument passed to ``asyncore.loop()``. Helps overcome + open file descriptors limit. Default is False. + + --channel-request-lookahead=INT + Allows channels to stay readable and buffer more requests up to the + given maximum even if a request is already being processed. This allows + detecting if a client closed the connection while its request is being + processed. Default is 0. + +""" + +RUNNER_PATTERN = re.compile( + r""" + ^ + (?P + [a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)* + ) + : + (?P + [a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)* + ) + $ + """, + re.I | re.X, +) + + +def match(obj_name): + matches = RUNNER_PATTERN.match(obj_name) + if not matches: + raise ValueError("Malformed application '{}'".format(obj_name)) + return matches.group("module"), matches.group("object") + + +def resolve(module_name, object_name): + """Resolve a named object in a module.""" + # We cast each segments due to an issue that has been found to manifest + # in Python 2.6.6, but not 2.6.8, and may affect other revisions of Python + # 2.6 and 2.7, whereby ``__import__`` chokes if the list passed in the + # ``fromlist`` argument are unicode strings rather than 8-bit strings. + # The error triggered is "TypeError: Item in ``fromlist '' not a string". + # My guess is that this was fixed by checking against ``basestring`` + # rather than ``str`` sometime between the release of 2.6.6 and 2.6.8, + # but I've yet to go over the commits. I know, however, that the NEWS + # file makes no mention of such a change to the behaviour of + # ``__import__``. + segments = [str(segment) for segment in object_name.split(".")] + obj = __import__(module_name, fromlist=segments[:1]) + for segment in segments: + obj = getattr(obj, segment) + return obj + + +def show_help(stream, name, error=None): # pragma: no cover + if error is not None: + print("Error: {}\n".format(error), file=stream) + print(HELP.format(name), file=stream) + + +def show_exception(stream): + exc_type, exc_value = sys.exc_info()[:2] + args = getattr(exc_value, "args", None) + print( + ("There was an exception ({}) importing your module.\n").format( + exc_type.__name__, + ), + file=stream, + ) + if args: + print("It had these arguments: ", file=stream) + for idx, arg in enumerate(args, start=1): + print("{}. {}\n".format(idx, arg), file=stream) + else: + print("It had no arguments.", file=stream) + + +def run(argv=sys.argv, _serve=serve): + """Command line runner.""" + name = os.path.basename(argv[0]) + + try: + kw, args = Adjustments.parse_args(argv[1:]) + except getopt.GetoptError as exc: + show_help(sys.stderr, name, str(exc)) + return 1 + + if kw["help"]: + show_help(sys.stdout, name) + return 0 + + if len(args) != 1: + show_help(sys.stderr, name, "Specify one application only") + return 1 + + # set a default level for the logger only if it hasn't been set explicitly + # note that this level does not override any parent logger levels, + # handlers, etc but without it no log messages are emitted by default + if logger.level == logging.NOTSET: + logger.setLevel(logging.INFO) + + try: + module, obj_name = match(args[0]) + except ValueError as exc: + show_help(sys.stderr, name, str(exc)) + show_exception(sys.stderr) + return 1 + + # Add the current directory onto sys.path + sys.path.append(os.getcwd()) + + # Get the WSGI function. + try: + app = resolve(module, obj_name) + except ImportError: + show_help(sys.stderr, name, "Bad module '{}'".format(module)) + show_exception(sys.stderr) + return 1 + except AttributeError: + show_help(sys.stderr, name, "Bad object name '{}'".format(obj_name)) + show_exception(sys.stderr) + return 1 + if kw["call"]: + app = app() + + # These arguments are specific to the runner, not waitress itself. + del kw["call"], kw["help"] + + _serve(app, **kw) + return 0 diff --git a/libs/waitress/server.py b/libs/waitress/server.py new file mode 100644 index 000000000..55cffe9ba --- /dev/null +++ b/libs/waitress/server.py @@ -0,0 +1,417 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## + +import os +import os.path +import socket +import time + +from waitress import trigger +from waitress.adjustments import Adjustments +from waitress.channel import HTTPChannel +from waitress.compat import IPPROTO_IPV6, IPV6_V6ONLY +from waitress.task import ThreadedTaskDispatcher +from waitress.utilities import cleanup_unix_socket + +from . import wasyncore +from .proxy_headers import proxy_headers_middleware + + +def create_server( + application, + map=None, + _start=True, # test shim + _sock=None, # test shim + _dispatcher=None, # test shim + **kw # adjustments +): + """ + if __name__ == '__main__': + server = create_server(app) + server.run() + """ + if application is None: + raise ValueError( + 'The "app" passed to ``create_server`` was ``None``. You forgot ' + "to return a WSGI app within your application." + ) + adj = Adjustments(**kw) + + if map is None: # pragma: nocover + map = {} + + dispatcher = _dispatcher + if dispatcher is None: + dispatcher = ThreadedTaskDispatcher() + dispatcher.set_thread_count(adj.threads) + + if adj.unix_socket and hasattr(socket, "AF_UNIX"): + sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) + return UnixWSGIServer( + application, + map, + _start, + _sock, + dispatcher=dispatcher, + adj=adj, + sockinfo=sockinfo, + ) + + effective_listen = [] + last_serv = None + if not adj.sockets: + for sockinfo in adj.listen: + # When TcpWSGIServer is called, it registers itself in the map. This + # side-effect is all we need it for, so we don't store a reference to + # or return it to the user. + last_serv = TcpWSGIServer( + application, + map, + _start, + _sock, + dispatcher=dispatcher, + adj=adj, + sockinfo=sockinfo, + ) + effective_listen.append( + (last_serv.effective_host, last_serv.effective_port) + ) + + for sock in adj.sockets: + sockinfo = (sock.family, sock.type, sock.proto, sock.getsockname()) + if sock.family == socket.AF_INET or sock.family == socket.AF_INET6: + last_serv = TcpWSGIServer( + application, + map, + _start, + sock, + dispatcher=dispatcher, + adj=adj, + bind_socket=False, + sockinfo=sockinfo, + ) + effective_listen.append( + (last_serv.effective_host, last_serv.effective_port) + ) + elif hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: + last_serv = UnixWSGIServer( + application, + map, + _start, + sock, + dispatcher=dispatcher, + adj=adj, + bind_socket=False, + sockinfo=sockinfo, + ) + effective_listen.append( + (last_serv.effective_host, last_serv.effective_port) + ) + + # We are running a single server, so we can just return the last server, + # saves us from having to create one more object + if len(effective_listen) == 1: + # In this case we have no need to use a MultiSocketServer + return last_serv + + log_info = last_serv.log_info + # Return a class that has a utility function to print out the sockets it's + # listening on, and has a .run() function. All of the TcpWSGIServers + # registered themselves in the map above. + return MultiSocketServer(map, adj, effective_listen, dispatcher, log_info) + + +# This class is only ever used if we have multiple listen sockets. It allows +# the serve() API to call .run() which starts the wasyncore loop, and catches +# SystemExit/KeyboardInterrupt so that it can atempt to cleanly shut down. +class MultiSocketServer: + asyncore = wasyncore # test shim + + def __init__( + self, + map=None, + adj=None, + effective_listen=None, + dispatcher=None, + log_info=None, + ): + self.adj = adj + self.map = map + self.effective_listen = effective_listen + self.task_dispatcher = dispatcher + self.log_info = log_info + + def print_listen(self, format_str): # pragma: nocover + for l in self.effective_listen: + l = list(l) + + if ":" in l[0]: + l[0] = "[{}]".format(l[0]) + + self.log_info(format_str.format(*l)) + + def run(self): + try: + self.asyncore.loop( + timeout=self.adj.asyncore_loop_timeout, + map=self.map, + use_poll=self.adj.asyncore_use_poll, + ) + except (SystemExit, KeyboardInterrupt): + self.close() + + def close(self): + self.task_dispatcher.shutdown() + wasyncore.close_all(self.map) + + +class BaseWSGIServer(wasyncore.dispatcher): + + channel_class = HTTPChannel + next_channel_cleanup = 0 + socketmod = socket # test shim + asyncore = wasyncore # test shim + in_connection_overflow = False + + def __init__( + self, + application, + map=None, + _start=True, # test shim + _sock=None, # test shim + dispatcher=None, # dispatcher + adj=None, # adjustments + sockinfo=None, # opaque object + bind_socket=True, + **kw + ): + if adj is None: + adj = Adjustments(**kw) + + if adj.trusted_proxy or adj.clear_untrusted_proxy_headers: + # wrap the application to deal with proxy headers + # we wrap it here because webtest subclasses the TcpWSGIServer + # directly and thus doesn't run any code that's in create_server + application = proxy_headers_middleware( + application, + trusted_proxy=adj.trusted_proxy, + trusted_proxy_count=adj.trusted_proxy_count, + trusted_proxy_headers=adj.trusted_proxy_headers, + clear_untrusted=adj.clear_untrusted_proxy_headers, + log_untrusted=adj.log_untrusted_proxy_headers, + logger=self.logger, + ) + + if map is None: + # use a nonglobal socket map by default to hopefully prevent + # conflicts with apps and libs that use the wasyncore global socket + # map ala https://github.com/Pylons/waitress/issues/63 + map = {} + if sockinfo is None: + sockinfo = adj.listen[0] + + self.sockinfo = sockinfo + self.family = sockinfo[0] + self.socktype = sockinfo[1] + self.application = application + self.adj = adj + self.trigger = trigger.trigger(map) + if dispatcher is None: + dispatcher = ThreadedTaskDispatcher() + dispatcher.set_thread_count(self.adj.threads) + + self.task_dispatcher = dispatcher + self.asyncore.dispatcher.__init__(self, _sock, map=map) + if _sock is None: + self.create_socket(self.family, self.socktype) + if self.family == socket.AF_INET6: # pragma: nocover + self.socket.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) + + self.set_reuse_addr() + + if bind_socket: + self.bind_server_socket() + + self.effective_host, self.effective_port = self.getsockname() + self.server_name = adj.server_name + self.active_channels = {} + if _start: + self.accept_connections() + + def bind_server_socket(self): + raise NotImplementedError # pragma: no cover + + def getsockname(self): + raise NotImplementedError # pragma: no cover + + def accept_connections(self): + self.accepting = True + self.socket.listen(self.adj.backlog) # Get around asyncore NT limit + + def add_task(self, task): + self.task_dispatcher.add_task(task) + + def readable(self): + now = time.time() + if now >= self.next_channel_cleanup: + self.next_channel_cleanup = now + self.adj.cleanup_interval + self.maintenance(now) + + if self.accepting: + if ( + not self.in_connection_overflow + and len(self._map) >= self.adj.connection_limit + ): + self.in_connection_overflow = True + self.logger.warning( + "total open connections reached the connection limit, " + "no longer accepting new connections" + ) + elif ( + self.in_connection_overflow + and len(self._map) < self.adj.connection_limit + ): + self.in_connection_overflow = False + self.logger.info( + "total open connections dropped below the connection limit, " + "listening again" + ) + return not self.in_connection_overflow + return False + + def writable(self): + return False + + def handle_read(self): + pass + + def handle_connect(self): + pass + + def handle_accept(self): + try: + v = self.accept() + if v is None: + return + conn, addr = v + except OSError: + # Linux: On rare occasions we get a bogus socket back from + # accept. socketmodule.c:makesockaddr complains that the + # address family is unknown. We don't want the whole server + # to shut down because of this. + if self.adj.log_socket_errors: + self.logger.warning("server accept() threw an exception", exc_info=True) + return + self.set_socket_options(conn) + addr = self.fix_addr(addr) + self.channel_class(self, conn, addr, self.adj, map=self._map) + + def run(self): + try: + self.asyncore.loop( + timeout=self.adj.asyncore_loop_timeout, + map=self._map, + use_poll=self.adj.asyncore_use_poll, + ) + except (SystemExit, KeyboardInterrupt): + self.task_dispatcher.shutdown() + + def pull_trigger(self): + self.trigger.pull_trigger() + + def set_socket_options(self, conn): + pass + + def fix_addr(self, addr): + return addr + + def maintenance(self, now): + """ + Closes channels that have not had any activity in a while. + + The timeout is configured through adj.channel_timeout (seconds). + """ + cutoff = now - self.adj.channel_timeout + for channel in self.active_channels.values(): + if (not channel.requests) and channel.last_activity < cutoff: + channel.will_close = True + + def print_listen(self, format_str): # pragma: no cover + self.log_info(format_str.format(self.effective_host, self.effective_port)) + + def close(self): + self.trigger.close() + return wasyncore.dispatcher.close(self) + + +class TcpWSGIServer(BaseWSGIServer): + def bind_server_socket(self): + (_, _, _, sockaddr) = self.sockinfo + self.bind(sockaddr) + + def getsockname(self): + # Return the IP address, port as numeric + return self.socketmod.getnameinfo( + self.socket.getsockname(), + self.socketmod.NI_NUMERICHOST | self.socketmod.NI_NUMERICSERV, + ) + + def set_socket_options(self, conn): + for (level, optname, value) in self.adj.socket_options: + conn.setsockopt(level, optname, value) + + +if hasattr(socket, "AF_UNIX"): + + class UnixWSGIServer(BaseWSGIServer): + def __init__( + self, + application, + map=None, + _start=True, # test shim + _sock=None, # test shim + dispatcher=None, # dispatcher + adj=None, # adjustments + sockinfo=None, # opaque object + **kw + ): + if sockinfo is None: + sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None) + + super().__init__( + application, + map=map, + _start=_start, + _sock=_sock, + dispatcher=dispatcher, + adj=adj, + sockinfo=sockinfo, + **kw, + ) + + def bind_server_socket(self): + cleanup_unix_socket(self.adj.unix_socket) + self.bind(self.adj.unix_socket) + if os.path.exists(self.adj.unix_socket): + os.chmod(self.adj.unix_socket, self.adj.unix_socket_perms) + + def getsockname(self): + return ("unix", self.socket.getsockname()) + + def fix_addr(self, addr): + return ("localhost", None) + + +# Compatibility alias. +WSGIServer = TcpWSGIServer diff --git a/libs/waitress/task.py b/libs/waitress/task.py new file mode 100644 index 000000000..2ac8f4c81 --- /dev/null +++ b/libs/waitress/task.py @@ -0,0 +1,570 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## + +from collections import deque +import socket +import sys +import threading +import time + +from .buffers import ReadOnlyFileBasedBuffer +from .utilities import build_http_date, logger, queue_logger + +rename_headers = { # or keep them without the HTTP_ prefix added + "CONTENT_LENGTH": "CONTENT_LENGTH", + "CONTENT_TYPE": "CONTENT_TYPE", +} + +hop_by_hop = frozenset( + ( + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", + ) +) + + +class ThreadedTaskDispatcher: + """A Task Dispatcher that creates a thread for each task.""" + + stop_count = 0 # Number of threads that will stop soon. + active_count = 0 # Number of currently active threads + logger = logger + queue_logger = queue_logger + + def __init__(self): + self.threads = set() + self.queue = deque() + self.lock = threading.Lock() + self.queue_cv = threading.Condition(self.lock) + self.thread_exit_cv = threading.Condition(self.lock) + + def start_new_thread(self, target, thread_no): + t = threading.Thread( + target=target, name="waitress-{}".format(thread_no), args=(thread_no,) + ) + t.daemon = True + t.start() + + def handler_thread(self, thread_no): + while True: + with self.lock: + while not self.queue and self.stop_count == 0: + # Mark ourselves as idle before waiting to be + # woken up, then we will once again be active + self.active_count -= 1 + self.queue_cv.wait() + self.active_count += 1 + + if self.stop_count > 0: + self.active_count -= 1 + self.stop_count -= 1 + self.threads.discard(thread_no) + self.thread_exit_cv.notify() + break + + task = self.queue.popleft() + try: + task.service() + except BaseException: + self.logger.exception("Exception when servicing %r", task) + + def set_thread_count(self, count): + with self.lock: + threads = self.threads + thread_no = 0 + running = len(threads) - self.stop_count + while running < count: + # Start threads. + while thread_no in threads: + thread_no = thread_no + 1 + threads.add(thread_no) + running += 1 + self.start_new_thread(self.handler_thread, thread_no) + self.active_count += 1 + thread_no = thread_no + 1 + if running > count: + # Stop threads. + self.stop_count += running - count + self.queue_cv.notify_all() + + def add_task(self, task): + with self.lock: + self.queue.append(task) + self.queue_cv.notify() + queue_size = len(self.queue) + idle_threads = len(self.threads) - self.stop_count - self.active_count + if queue_size > idle_threads: + self.queue_logger.warning( + "Task queue depth is %d", queue_size - idle_threads + ) + + def shutdown(self, cancel_pending=True, timeout=5): + self.set_thread_count(0) + # Ensure the threads shut down. + threads = self.threads + expiration = time.time() + timeout + with self.lock: + while threads: + if time.time() >= expiration: + self.logger.warning("%d thread(s) still running", len(threads)) + break + self.thread_exit_cv.wait(0.1) + if cancel_pending: + # Cancel remaining tasks. + queue = self.queue + if len(queue) > 0: + self.logger.warning("Canceling %d pending task(s)", len(queue)) + while queue: + task = queue.popleft() + task.cancel() + self.queue_cv.notify_all() + return True + return False + + +class Task: + close_on_finish = False + status = "200 OK" + wrote_header = False + start_time = 0 + content_length = None + content_bytes_written = 0 + logged_write_excess = False + logged_write_no_body = False + complete = False + chunked_response = False + logger = logger + + def __init__(self, channel, request): + self.channel = channel + self.request = request + self.response_headers = [] + version = request.version + if version not in ("1.0", "1.1"): + # fall back to a version we support. + version = "1.0" + self.version = version + + def service(self): + try: + self.start() + self.execute() + self.finish() + except OSError: + self.close_on_finish = True + if self.channel.adj.log_socket_errors: + raise + + @property + def has_body(self): + return not ( + self.status.startswith("1") + or self.status.startswith("204") + or self.status.startswith("304") + ) + + def build_response_header(self): + version = self.version + # Figure out whether the connection should be closed. + connection = self.request.headers.get("CONNECTION", "").lower() + response_headers = [] + content_length_header = None + date_header = None + server_header = None + connection_close_header = None + + for (headername, headerval) in self.response_headers: + headername = "-".join([x.capitalize() for x in headername.split("-")]) + + if headername == "Content-Length": + if self.has_body: + content_length_header = headerval + else: + continue # pragma: no cover + + if headername == "Date": + date_header = headerval + + if headername == "Server": + server_header = headerval + + if headername == "Connection": + connection_close_header = headerval.lower() + # replace with properly capitalized version + response_headers.append((headername, headerval)) + + if ( + content_length_header is None + and self.content_length is not None + and self.has_body + ): + content_length_header = str(self.content_length) + response_headers.append(("Content-Length", content_length_header)) + + def close_on_finish(): + if connection_close_header is None: + response_headers.append(("Connection", "close")) + self.close_on_finish = True + + if version == "1.0": + if connection == "keep-alive": + if not content_length_header: + close_on_finish() + else: + response_headers.append(("Connection", "Keep-Alive")) + else: + close_on_finish() + + elif version == "1.1": + if connection == "close": + close_on_finish() + + if not content_length_header: + # RFC 7230: MUST NOT send Transfer-Encoding or Content-Length + # for any response with a status code of 1xx, 204 or 304. + + if self.has_body: + response_headers.append(("Transfer-Encoding", "chunked")) + self.chunked_response = True + + if not self.close_on_finish: + close_on_finish() + + # under HTTP 1.1 keep-alive is default, no need to set the header + else: + raise AssertionError("neither HTTP/1.0 or HTTP/1.1") + + # Set the Server and Date field, if not yet specified. This is needed + # if the server is used as a proxy. + ident = self.channel.server.adj.ident + + if not server_header: + if ident: + response_headers.append(("Server", ident)) + else: + response_headers.append(("Via", ident or "waitress")) + + if not date_header: + response_headers.append(("Date", build_http_date(self.start_time))) + + self.response_headers = response_headers + + first_line = "HTTP/%s %s" % (self.version, self.status) + # NB: sorting headers needs to preserve same-named-header order + # as per RFC 2616 section 4.2; thus the key=lambda x: x[0] here; + # rely on stable sort to keep relative position of same-named headers + next_lines = [ + "%s: %s" % hv for hv in sorted(self.response_headers, key=lambda x: x[0]) + ] + lines = [first_line] + next_lines + res = "%s\r\n\r\n" % "\r\n".join(lines) + + return res.encode("latin-1") + + def remove_content_length_header(self): + response_headers = [] + + for header_name, header_value in self.response_headers: + if header_name.lower() == "content-length": + continue # pragma: nocover + response_headers.append((header_name, header_value)) + + self.response_headers = response_headers + + def start(self): + self.start_time = time.time() + + def finish(self): + if not self.wrote_header: + self.write(b"") + if self.chunked_response: + # not self.write, it will chunk it! + self.channel.write_soon(b"0\r\n\r\n") + + def write(self, data): + if not self.complete: + raise RuntimeError("start_response was not called before body written") + channel = self.channel + if not self.wrote_header: + rh = self.build_response_header() + channel.write_soon(rh) + self.wrote_header = True + + if data and self.has_body: + towrite = data + cl = self.content_length + if self.chunked_response: + # use chunked encoding response + towrite = hex(len(data))[2:].upper().encode("latin-1") + b"\r\n" + towrite += data + b"\r\n" + elif cl is not None: + towrite = data[: cl - self.content_bytes_written] + self.content_bytes_written += len(towrite) + if towrite != data and not self.logged_write_excess: + self.logger.warning( + "application-written content exceeded the number of " + "bytes specified by Content-Length header (%s)" % cl + ) + self.logged_write_excess = True + if towrite: + channel.write_soon(towrite) + elif data: + # Cheat, and tell the application we have written all of the bytes, + # even though the response shouldn't have a body and we are + # ignoring it entirely. + self.content_bytes_written += len(data) + + if not self.logged_write_no_body: + self.logger.warning( + "application-written content was ignored due to HTTP " + "response that may not contain a message-body: (%s)" % self.status + ) + self.logged_write_no_body = True + + +class ErrorTask(Task): + """An error task produces an error response""" + + complete = True + + def execute(self): + e = self.request.error + status, headers, body = e.to_response() + self.status = status + self.response_headers.extend(headers) + # We need to explicitly tell the remote client we are closing the + # connection, because self.close_on_finish is set, and we are going to + # slam the door in the clients face. + self.response_headers.append(("Connection", "close")) + self.close_on_finish = True + self.content_length = len(body) + self.write(body.encode("latin-1")) + + +class WSGITask(Task): + """A WSGI task produces a response from a WSGI application.""" + + environ = None + + def execute(self): + environ = self.get_environment() + + def start_response(status, headers, exc_info=None): + if self.complete and not exc_info: + raise AssertionError( + "start_response called a second time without providing exc_info." + ) + if exc_info: + try: + if self.wrote_header: + # higher levels will catch and handle raised exception: + # 1. "service" method in task.py + # 2. "service" method in channel.py + # 3. "handler_thread" method in task.py + raise exc_info[1] + else: + # As per WSGI spec existing headers must be cleared + self.response_headers = [] + finally: + exc_info = None + + self.complete = True + + if not status.__class__ is str: + raise AssertionError("status %s is not a string" % status) + if "\n" in status or "\r" in status: + raise ValueError( + "carriage return/line feed character present in status" + ) + + self.status = status + + # Prepare the headers for output + for k, v in headers: + if not k.__class__ is str: + raise AssertionError( + "Header name %r is not a string in %r" % (k, (k, v)) + ) + if not v.__class__ is str: + raise AssertionError( + "Header value %r is not a string in %r" % (v, (k, v)) + ) + + if "\n" in v or "\r" in v: + raise ValueError( + "carriage return/line feed character present in header value" + ) + if "\n" in k or "\r" in k: + raise ValueError( + "carriage return/line feed character present in header name" + ) + + kl = k.lower() + if kl == "content-length": + self.content_length = int(v) + elif kl in hop_by_hop: + raise AssertionError( + '%s is a "hop-by-hop" header; it cannot be used by ' + "a WSGI application (see PEP 3333)" % k + ) + + self.response_headers.extend(headers) + + # Return a method used to write the response data. + return self.write + + # Call the application to handle the request and write a response + app_iter = self.channel.server.application(environ, start_response) + + can_close_app_iter = True + try: + if app_iter.__class__ is ReadOnlyFileBasedBuffer: + cl = self.content_length + size = app_iter.prepare(cl) + if size: + if cl != size: + if cl is not None: + self.remove_content_length_header() + self.content_length = size + self.write(b"") # generate headers + # if the write_soon below succeeds then the channel will + # take over closing the underlying file via the channel's + # _flush_some or handle_close so we intentionally avoid + # calling close in the finally block + self.channel.write_soon(app_iter) + can_close_app_iter = False + return + + first_chunk_len = None + for chunk in app_iter: + if first_chunk_len is None: + first_chunk_len = len(chunk) + # Set a Content-Length header if one is not supplied. + # start_response may not have been called until first + # iteration as per PEP, so we must reinterrogate + # self.content_length here + if self.content_length is None: + app_iter_len = None + if hasattr(app_iter, "__len__"): + app_iter_len = len(app_iter) + if app_iter_len == 1: + self.content_length = first_chunk_len + # transmit headers only after first iteration of the iterable + # that returns a non-empty bytestring (PEP 3333) + if chunk: + self.write(chunk) + + cl = self.content_length + if cl is not None: + if self.content_bytes_written != cl: + # close the connection so the client isn't sitting around + # waiting for more data when there are too few bytes + # to service content-length + self.close_on_finish = True + if self.request.command != "HEAD": + self.logger.warning( + "application returned too few bytes (%s) " + "for specified Content-Length (%s) via app_iter" + % (self.content_bytes_written, cl), + ) + finally: + if can_close_app_iter and hasattr(app_iter, "close"): + app_iter.close() + + def get_environment(self): + """Returns a WSGI environment.""" + environ = self.environ + if environ is not None: + # Return the cached copy. + return environ + + request = self.request + path = request.path + channel = self.channel + server = channel.server + url_prefix = server.adj.url_prefix + + if path.startswith("/"): + # strip extra slashes at the beginning of a path that starts + # with any number of slashes + path = "/" + path.lstrip("/") + + if url_prefix: + # NB: url_prefix is guaranteed by the configuration machinery to + # be either the empty string or a string that starts with a single + # slash and ends without any slashes + if path == url_prefix: + # if the path is the same as the url prefix, the SCRIPT_NAME + # should be the url_prefix and PATH_INFO should be empty + path = "" + else: + # if the path starts with the url prefix plus a slash, + # the SCRIPT_NAME should be the url_prefix and PATH_INFO should + # the value of path from the slash until its end + url_prefix_with_trailing_slash = url_prefix + "/" + if path.startswith(url_prefix_with_trailing_slash): + path = path[len(url_prefix) :] + + environ = { + "REMOTE_ADDR": channel.addr[0], + # Nah, we aren't actually going to look up the reverse DNS for + # REMOTE_ADDR, but we will happily set this environment variable + # for the WSGI application. Spec says we can just set this to + # REMOTE_ADDR, so we do. + "REMOTE_HOST": channel.addr[0], + # try and set the REMOTE_PORT to something useful, but maybe None + "REMOTE_PORT": str(channel.addr[1]), + "REQUEST_METHOD": request.command.upper(), + "SERVER_PORT": str(server.effective_port), + "SERVER_NAME": server.server_name, + "SERVER_SOFTWARE": server.adj.ident, + "SERVER_PROTOCOL": "HTTP/%s" % self.version, + "SCRIPT_NAME": url_prefix, + "PATH_INFO": path, + "QUERY_STRING": request.query, + "wsgi.url_scheme": request.url_scheme, + # the following environment variables are required by the WSGI spec + "wsgi.version": (1, 0), + # apps should use the logging module + "wsgi.errors": sys.stderr, + "wsgi.multithread": True, + "wsgi.multiprocess": False, + "wsgi.run_once": False, + "wsgi.input": request.get_body_stream(), + "wsgi.file_wrapper": ReadOnlyFileBasedBuffer, + "wsgi.input_terminated": True, # wsgi.input is EOF terminated + } + + for key, value in dict(request.headers).items(): + value = value.strip() + mykey = rename_headers.get(key, None) + if mykey is None: + mykey = "HTTP_" + key + if mykey not in environ: + environ[mykey] = value + + # Insert a callable into the environment that allows the application to + # check if the client disconnected. Only works with + # channel_request_lookahead larger than 0. + environ["waitress.client_disconnected"] = self.channel.check_client_disconnected + + # cache the environ for this request + self.environ = environ + return environ diff --git a/libs/waitress/trigger.py b/libs/waitress/trigger.py new file mode 100644 index 000000000..24c4d0d6b --- /dev/null +++ b/libs/waitress/trigger.py @@ -0,0 +1,203 @@ +############################################################################## +# +# Copyright (c) 2001-2005 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE +# +############################################################################## + +import errno +import os +import socket +import threading + +from . import wasyncore + +# Wake up a call to select() running in the main thread. +# +# This is useful in a context where you are using Medusa's I/O +# subsystem to deliver data, but the data is generated by another +# thread. Normally, if Medusa is in the middle of a call to +# select(), new output data generated by another thread will have +# to sit until the call to select() either times out or returns. +# If the trigger is 'pulled' by another thread, it should immediately +# generate a READ event on the trigger object, which will force the +# select() invocation to return. +# +# A common use for this facility: letting Medusa manage I/O for a +# large number of connections; but routing each request through a +# thread chosen from a fixed-size thread pool. When a thread is +# acquired, a transaction is performed, but output data is +# accumulated into buffers that will be emptied more efficiently +# by Medusa. [picture a server that can process database queries +# rapidly, but doesn't want to tie up threads waiting to send data +# to low-bandwidth connections] +# +# The other major feature provided by this class is the ability to +# move work back into the main thread: if you call pull_trigger() +# with a thunk argument, when select() wakes up and receives the +# event it will call your thunk from within that thread. The main +# purpose of this is to remove the need to wrap thread locks around +# Medusa's data structures, which normally do not need them. [To see +# why this is true, imagine this scenario: A thread tries to push some +# new data onto a channel's outgoing data queue at the same time that +# the main thread is trying to remove some] + + +class _triggerbase: + """OS-independent base class for OS-dependent trigger class.""" + + kind = None # subclass must set to "pipe" or "loopback"; used by repr + + def __init__(self): + self._closed = False + + # `lock` protects the `thunks` list from being traversed and + # appended to simultaneously. + self.lock = threading.Lock() + + # List of no-argument callbacks to invoke when the trigger is + # pulled. These run in the thread running the wasyncore mainloop, + # regardless of which thread pulls the trigger. + self.thunks = [] + + def readable(self): + return True + + def writable(self): + return False + + def handle_connect(self): + pass + + def handle_close(self): + self.close() + + # Override the wasyncore close() method, because it doesn't know about + # (so can't close) all the gimmicks we have open. Subclass must + # supply a _close() method to do platform-specific closing work. _close() + # will be called iff we're not already closed. + def close(self): + if not self._closed: + self._closed = True + self.del_channel() + self._close() # subclass does OS-specific stuff + + def pull_trigger(self, thunk=None): + if thunk: + with self.lock: + self.thunks.append(thunk) + self._physical_pull() + + def handle_read(self): + try: + self.recv(8192) + except OSError: + return + with self.lock: + for thunk in self.thunks: + try: + thunk() + except: + nil, t, v, tbinfo = wasyncore.compact_traceback() + self.log_info( + "exception in trigger thunk: (%s:%s %s)" % (t, v, tbinfo) + ) + self.thunks = [] + + +if os.name == "posix": + + class trigger(_triggerbase, wasyncore.file_dispatcher): + kind = "pipe" + + def __init__(self, map): + _triggerbase.__init__(self) + r, self.trigger = self._fds = os.pipe() + wasyncore.file_dispatcher.__init__(self, r, map=map) + + def _close(self): + for fd in self._fds: + os.close(fd) + self._fds = [] + wasyncore.file_dispatcher.close(self) + + def _physical_pull(self): + os.write(self.trigger, b"x") + + +else: # pragma: no cover + # Windows version; uses just sockets, because a pipe isn't select'able + # on Windows. + + class trigger(_triggerbase, wasyncore.dispatcher): + kind = "loopback" + + def __init__(self, map): + _triggerbase.__init__(self) + + # Get a pair of connected sockets. The trigger is the 'w' + # end of the pair, which is connected to 'r'. 'r' is put + # in the wasyncore socket map. "pulling the trigger" then + # means writing something on w, which will wake up r. + + w = socket.socket() + # Disable buffering -- pulling the trigger sends 1 byte, + # and we want that sent immediately, to wake up wasyncore's + # select() ASAP. + w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + count = 0 + while True: + count += 1 + # Bind to a local port; for efficiency, let the OS pick + # a free port for us. + # Unfortunately, stress tests showed that we may not + # be able to connect to that port ("Address already in + # use") despite that the OS picked it. This appears + # to be a race bug in the Windows socket implementation. + # So we loop until a connect() succeeds (almost always + # on the first try). See the long thread at + # http://mail.zope.org/pipermail/zope/2005-July/160433.html + # for hideous details. + a = socket.socket() + a.bind(("127.0.0.1", 0)) + connect_address = a.getsockname() # assigned (host, port) pair + a.listen(1) + try: + w.connect(connect_address) + break # success + except OSError as detail: + if detail[0] != errno.WSAEADDRINUSE: + # "Address already in use" is the only error + # I've seen on two WinXP Pro SP2 boxes, under + # Pythons 2.3.5 and 2.4.1. + raise + # (10048, 'Address already in use') + # assert count <= 2 # never triggered in Tim's tests + if count >= 10: # I've never seen it go above 2 + a.close() + w.close() + raise RuntimeError("Cannot bind trigger!") + # Close `a` and try again. Note: I originally put a short + # sleep() here, but it didn't appear to help or hurt. + a.close() + + r, addr = a.accept() # r becomes wasyncore's (self.)socket + a.close() + self.trigger = w + wasyncore.dispatcher.__init__(self, r, map=map) + + def _close(self): + # self.socket is r, and self.trigger is w, from __init__ + self.socket.close() + self.trigger.close() + + def _physical_pull(self): + self.trigger.send(b"x") diff --git a/libs/waitress/utilities.py b/libs/waitress/utilities.py new file mode 100644 index 000000000..3caaa336f --- /dev/null +++ b/libs/waitress/utilities.py @@ -0,0 +1,320 @@ +############################################################################## +# +# Copyright (c) 2004 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## +"""Utility functions +""" + +import calendar +import errno +import logging +import os +import re +import stat +import time + +from .rfc7230 import OBS_TEXT, VCHAR + +logger = logging.getLogger("waitress") +queue_logger = logging.getLogger("waitress.queue") + + +def find_double_newline(s): + """Returns the position just after a double newline in the given string.""" + pos = s.find(b"\r\n\r\n") + + if pos >= 0: + pos += 4 + + return pos + + +def concat(*args): + return "".join(args) + + +def join(seq, field=" "): + return field.join(seq) + + +def group(s): + return "(" + s + ")" + + +short_days = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] +long_days = [ + "sunday", + "monday", + "tuesday", + "wednesday", + "thursday", + "friday", + "saturday", +] + +short_day_reg = group(join(short_days, "|")) +long_day_reg = group(join(long_days, "|")) + +daymap = {} + +for i in range(7): + daymap[short_days[i]] = i + daymap[long_days[i]] = i + +hms_reg = join(3 * [group("[0-9][0-9]")], ":") + +months = [ + "jan", + "feb", + "mar", + "apr", + "may", + "jun", + "jul", + "aug", + "sep", + "oct", + "nov", + "dec", +] + +monmap = {} + +for i in range(12): + monmap[months[i]] = i + 1 + +months_reg = group(join(months, "|")) + +# From draft-ietf-http-v11-spec-07.txt/3.3.1 +# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 +# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 +# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format + +# rfc822 format +rfc822_date = join( + [ + concat(short_day_reg, ","), # day + group("[0-9][0-9]?"), # date + months_reg, # month + group("[0-9]+"), # year + hms_reg, # hour minute second + "gmt", + ], + " ", +) + +rfc822_reg = re.compile(rfc822_date) + + +def unpack_rfc822(m): + g = m.group + + return ( + int(g(4)), # year + monmap[g(3)], # month + int(g(2)), # day + int(g(5)), # hour + int(g(6)), # minute + int(g(7)), # second + 0, + 0, + 0, + ) + + +# rfc850 format +rfc850_date = join( + [ + concat(long_day_reg, ","), + join([group("[0-9][0-9]?"), months_reg, group("[0-9]+")], "-"), + hms_reg, + "gmt", + ], + " ", +) + +rfc850_reg = re.compile(rfc850_date) +# they actually unpack the same way +def unpack_rfc850(m): + g = m.group + yr = g(4) + + if len(yr) == 2: + yr = "19" + yr + + return ( + int(yr), # year + monmap[g(3)], # month + int(g(2)), # day + int(g(5)), # hour + int(g(6)), # minute + int(g(7)), # second + 0, + 0, + 0, + ) + + +# parsdate.parsedate - ~700/sec. +# parse_http_date - ~1333/sec. + +weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +monthname = [ + None, + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", +] + + +def build_http_date(when): + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when) + + return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + weekdayname[wd], + day, + monthname[month], + year, + hh, + mm, + ss, + ) + + +def parse_http_date(d): + d = d.lower() + m = rfc850_reg.match(d) + + if m and m.end() == len(d): + retval = int(calendar.timegm(unpack_rfc850(m))) + else: + m = rfc822_reg.match(d) + + if m and m.end() == len(d): + retval = int(calendar.timegm(unpack_rfc822(m))) + else: + return 0 + + return retval + + +# RFC 5234 Appendix B.1 "Core Rules": +# VCHAR = %x21-7E +# ; visible (printing) characters +vchar_re = VCHAR + +# RFC 7230 Section 3.2.6 "Field Value Components": +# quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE +# qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text +# obs-text = %x80-FF +# quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) +obs_text_re = OBS_TEXT + +# The '\\' between \x5b and \x5d is needed to escape \x5d (']') +qdtext_re = "[\t \x21\x23-\x5b\\\x5d-\x7e" + obs_text_re + "]" + +quoted_pair_re = r"\\" + "([\t " + vchar_re + obs_text_re + "])" +quoted_string_re = '"(?:(?:' + qdtext_re + ")|(?:" + quoted_pair_re + '))*"' + +quoted_string = re.compile(quoted_string_re) +quoted_pair = re.compile(quoted_pair_re) + + +def undquote(value): + if value.startswith('"') and value.endswith('"'): + # So it claims to be DQUOTE'ed, let's validate that + matches = quoted_string.match(value) + + if matches and matches.end() == len(value): + # Remove the DQUOTE's from the value + value = value[1:-1] + + # Remove all backslashes that are followed by a valid vchar or + # obs-text + value = quoted_pair.sub(r"\1", value) + + return value + elif not value.startswith('"') and not value.endswith('"'): + return value + + raise ValueError("Invalid quoting in value") + + +def cleanup_unix_socket(path): + try: + st = os.stat(path) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise # pragma: no cover + else: + if stat.S_ISSOCK(st.st_mode): + try: + os.remove(path) + except OSError: # pragma: no cover + # avoid race condition error during tests + pass + + +class Error: + code = 500 + reason = "Internal Server Error" + + def __init__(self, body): + self.body = body + + def to_response(self): + status = "%s %s" % (self.code, self.reason) + body = "%s\r\n\r\n%s" % (self.reason, self.body) + tag = "\r\n\r\n(generated by waitress)" + body = body + tag + headers = [("Content-Type", "text/plain")] + + return status, headers, body + + def wsgi_response(self, environ, start_response): + status, headers, body = self.to_response() + start_response(status, headers) + yield body + + +class BadRequest(Error): + code = 400 + reason = "Bad Request" + + +class RequestHeaderFieldsTooLarge(BadRequest): + code = 431 + reason = "Request Header Fields Too Large" + + +class RequestEntityTooLarge(BadRequest): + code = 413 + reason = "Request Entity Too Large" + + +class InternalServerError(Error): + code = 500 + reason = "Internal Server Error" + + +class ServerNotImplemented(Error): + code = 501 + reason = "Not Implemented" diff --git a/libs/waitress/wasyncore.py b/libs/waitress/wasyncore.py new file mode 100644 index 000000000..9a68c5171 --- /dev/null +++ b/libs/waitress/wasyncore.py @@ -0,0 +1,691 @@ +# -*- Mode: Python -*- +# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp +# Author: Sam Rushing + +# ====================================================================== +# Copyright 1996 by Sam Rushing +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of Sam +# Rushing not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + +"""Basic infrastructure for asynchronous socket service clients and servers. + +There are only two ways to have a program on a single processor do "more +than one thing at a time". Multi-threaded programming is the simplest and +most popular way to do it, but there is another very different technique, +that lets you have nearly all the advantages of multi-threading, without +actually using multiple threads. it's really only practical if your program +is largely I/O bound. If your program is CPU bound, then pre-emptive +scheduled threads are probably what you really need. Network servers are +rarely CPU-bound, however. + +If your operating system supports the select() system call in its I/O +library (and nearly all do), then you can use it to juggle multiple +communication channels at once; doing other work while your I/O is taking +place in the "background." Although this strategy can seem strange and +complex, especially at first, it is in many ways easier to understand and +control than multi-threaded programming. The module documented here solves +many of the difficult problems for you, making the task of building +sophisticated high-performance network servers and clients a snap. + +NB: this is a fork of asyncore from the stdlib that we've (the waitress +developers) named 'wasyncore' to ensure forward compatibility, as asyncore +in the stdlib will be dropped soon. It is neither a copy of the 2.7 asyncore +nor the 3.X asyncore; it is a version compatible with either 2.7 or 3.X. +""" + +from errno import ( + EAGAIN, + EALREADY, + EBADF, + ECONNABORTED, + ECONNRESET, + EINPROGRESS, + EINTR, + EINVAL, + EISCONN, + ENOTCONN, + EPIPE, + ESHUTDOWN, + EWOULDBLOCK, + errorcode, +) +import logging +import os +import select +import socket +import sys +import time +import warnings + +from . import compat, utilities + +_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, EBADF}) + +try: + socket_map +except NameError: + socket_map = {} + + +def _strerror(err): + try: + return os.strerror(err) + except (TypeError, ValueError, OverflowError, NameError): + return "Unknown error %s" % err + + +class ExitNow(Exception): + pass + + +_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit) + + +def read(obj): + try: + obj.handle_read_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + + +def write(obj): + try: + obj.handle_write_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + + +def _exception(obj): + try: + obj.handle_expt_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + + +def readwrite(obj, flags): + try: + if flags & select.POLLIN: + obj.handle_read_event() + if flags & select.POLLOUT: + obj.handle_write_event() + if flags & select.POLLPRI: + obj.handle_expt_event() + if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): + obj.handle_close() + except OSError as e: + if e.args[0] not in _DISCONNECTED: + obj.handle_error() + else: + obj.handle_close() + except _reraised_exceptions: + raise + except: + obj.handle_error() + + +def poll(timeout=0.0, map=None): + if map is None: # pragma: no cover + map = socket_map + if map: + r = [] + w = [] + e = [] + for fd, obj in list(map.items()): # list() call FBO py3 + is_r = obj.readable() + is_w = obj.writable() + if is_r: + r.append(fd) + # accepting sockets should not be writable + if is_w and not obj.accepting: + w.append(fd) + if is_r or is_w: + e.append(fd) + if [] == r == w == e: + time.sleep(timeout) + return + + try: + r, w, e = select.select(r, w, e, timeout) + except OSError as err: + if err.args[0] != EINTR: + raise + else: + return + + for fd in r: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + read(obj) + + for fd in w: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + write(obj) + + for fd in e: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + _exception(obj) + + +def poll2(timeout=0.0, map=None): + # Use the poll() support added to the select module in Python 2.0 + if map is None: # pragma: no cover + map = socket_map + if timeout is not None: + # timeout is in milliseconds + timeout = int(timeout * 1000) + pollster = select.poll() + if map: + for fd, obj in list(map.items()): + flags = 0 + if obj.readable(): + flags |= select.POLLIN | select.POLLPRI + # accepting sockets should not be writable + if obj.writable() and not obj.accepting: + flags |= select.POLLOUT + if flags: + pollster.register(fd, flags) + + try: + r = pollster.poll(timeout) + except OSError as err: + if err.args[0] != EINTR: + raise + r = [] + + for fd, flags in r: + obj = map.get(fd) + if obj is None: # pragma: no cover + continue + readwrite(obj, flags) + + +poll3 = poll2 # Alias for backward compatibility + + +def loop(timeout=30.0, use_poll=False, map=None, count=None): + if map is None: # pragma: no cover + map = socket_map + + if use_poll and hasattr(select, "poll"): + poll_fun = poll2 + else: + poll_fun = poll + + if count is None: # pragma: no cover + while map: + poll_fun(timeout, map) + + else: + while map and count > 0: + poll_fun(timeout, map) + count = count - 1 + + +def compact_traceback(): + t, v, tb = sys.exc_info() + tbinfo = [] + if not tb: # pragma: no cover + raise AssertionError("traceback does not exist") + while tb: + tbinfo.append( + ( + tb.tb_frame.f_code.co_filename, + tb.tb_frame.f_code.co_name, + str(tb.tb_lineno), + ) + ) + tb = tb.tb_next + + # just to be safe + del tb + + file, function, line = tbinfo[-1] + info = " ".join(["[%s|%s|%s]" % x for x in tbinfo]) + return (file, function, line), t, v, info + + +class dispatcher: + + debug = False + connected = False + accepting = False + connecting = False + closing = False + addr = None + ignore_log_types = frozenset({"warning"}) + logger = utilities.logger + compact_traceback = staticmethod(compact_traceback) # for testing + + def __init__(self, sock=None, map=None): + if map is None: # pragma: no cover + self._map = socket_map + else: + self._map = map + + self._fileno = None + + if sock: + # Set to nonblocking just to make sure for cases where we + # get a socket from a blocking source. + sock.setblocking(0) + self.set_socket(sock, map) + self.connected = True + # The constructor no longer requires that the socket + # passed be connected. + try: + self.addr = sock.getpeername() + except OSError as err: + if err.args[0] in (ENOTCONN, EINVAL): + # To handle the case where we got an unconnected + # socket. + self.connected = False + else: + # The socket is broken in some unknown way, alert + # the user and remove it from the map (to prevent + # polling of broken sockets). + self.del_channel(map) + raise + else: + self.socket = None + + def __repr__(self): + status = [self.__class__.__module__ + "." + self.__class__.__qualname__] + if self.accepting and self.addr: + status.append("listening") + elif self.connected: + status.append("connected") + if self.addr is not None: + try: + status.append("%s:%d" % self.addr) + except TypeError: # pragma: no cover + status.append(repr(self.addr)) + return "<%s at %#x>" % (" ".join(status), id(self)) + + __str__ = __repr__ + + def add_channel(self, map=None): + # self.log_info('adding channel %s' % self) + if map is None: + map = self._map + map[self._fileno] = self + + def del_channel(self, map=None): + fd = self._fileno + if map is None: + map = self._map + if fd in map: + # self.log_info('closing channel %d:%s' % (fd, self)) + del map[fd] + self._fileno = None + + def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM): + self.family_and_type = family, type + sock = socket.socket(family, type) + sock.setblocking(0) + self.set_socket(sock) + + def set_socket(self, sock, map=None): + self.socket = sock + self._fileno = sock.fileno() + self.add_channel(map) + + def set_reuse_addr(self): + # try to re-use a server port if possible + try: + self.socket.setsockopt( + socket.SOL_SOCKET, + socket.SO_REUSEADDR, + self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1, + ) + except OSError: + pass + + # ================================================== + # predicates for select() + # these are used as filters for the lists of sockets + # to pass to select(). + # ================================================== + + def readable(self): + return True + + def writable(self): + return True + + # ================================================== + # socket object methods. + # ================================================== + + def listen(self, num): + self.accepting = True + if os.name == "nt" and num > 5: # pragma: no cover + num = 5 + return self.socket.listen(num) + + def bind(self, addr): + self.addr = addr + return self.socket.bind(addr) + + def connect(self, address): + self.connected = False + self.connecting = True + err = self.socket.connect_ex(address) + if ( + err in (EINPROGRESS, EALREADY, EWOULDBLOCK) + or err == EINVAL + and os.name == "nt" + ): # pragma: no cover + self.addr = address + return + if err in (0, EISCONN): + self.addr = address + self.handle_connect_event() + else: + raise OSError(err, errorcode[err]) + + def accept(self): + # XXX can return either an address pair or None + try: + conn, addr = self.socket.accept() + except TypeError: + return None + except OSError as why: + if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN): + return None + else: + raise + else: + return conn, addr + + def send(self, data): + try: + result = self.socket.send(data) + return result + except OSError as why: + if why.args[0] == EWOULDBLOCK: + return 0 + elif why.args[0] in _DISCONNECTED: + self.handle_close() + return 0 + else: + raise + + def recv(self, buffer_size): + try: + data = self.socket.recv(buffer_size) + if not data: + # a closed connection is indicated by signaling + # a read condition, and having recv() return 0. + self.handle_close() + return b"" + else: + return data + except OSError as why: + # winsock sometimes raises ENOTCONN + if why.args[0] in _DISCONNECTED: + self.handle_close() + return b"" + else: + raise + + def close(self): + self.connected = False + self.accepting = False + self.connecting = False + self.del_channel() + if self.socket is not None: + try: + self.socket.close() + except OSError as why: + if why.args[0] not in (ENOTCONN, EBADF): + raise + + # log and log_info may be overridden to provide more sophisticated + # logging and warning methods. In general, log is for 'hit' logging + # and 'log_info' is for informational, warning and error logging. + + def log(self, message): + self.logger.log(logging.DEBUG, message) + + def log_info(self, message, type="info"): + severity = { + "info": logging.INFO, + "warning": logging.WARN, + "error": logging.ERROR, + } + self.logger.log(severity.get(type, logging.INFO), message) + + def handle_read_event(self): + if self.accepting: + # accepting sockets are never connected, they "spawn" new + # sockets that are connected + self.handle_accept() + elif not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_read() + else: + self.handle_read() + + def handle_connect_event(self): + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise OSError(err, _strerror(err)) + self.handle_connect() + self.connected = True + self.connecting = False + + def handle_write_event(self): + if self.accepting: + # Accepting sockets shouldn't get a write event. + # We will pretend it didn't happen. + return + + if not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_write() + + def handle_expt_event(self): + # handle_expt_event() is called if there might be an error on the + # socket, or if there is OOB data + # check for the error condition first + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # we can get here when select.select() says that there is an + # exceptional condition on the socket + # since there is an error, we'll go ahead and close the socket + # like we would in a subclassed handle_read() that received no + # data + self.handle_close() + else: + self.handle_expt() + + def handle_error(self): + nil, t, v, tbinfo = self.compact_traceback() + + # sometimes a user repr method will crash. + try: + self_repr = repr(self) + except: # pragma: no cover + self_repr = "<__repr__(self) failed for object at %0x>" % id(self) + + self.log_info( + "uncaptured python exception, closing channel %s (%s:%s %s)" + % (self_repr, t, v, tbinfo), + "error", + ) + self.handle_close() + + def handle_expt(self): + self.log_info("unhandled incoming priority event", "warning") + + def handle_read(self): + self.log_info("unhandled read event", "warning") + + def handle_write(self): + self.log_info("unhandled write event", "warning") + + def handle_connect(self): + self.log_info("unhandled connect event", "warning") + + def handle_accept(self): + pair = self.accept() + if pair is not None: + self.handle_accepted(*pair) + + def handle_accepted(self, sock, addr): + sock.close() + self.log_info("unhandled accepted event", "warning") + + def handle_close(self): + self.log_info("unhandled close event", "warning") + self.close() + + +# --------------------------------------------------------------------------- +# adds simple buffered output capability, useful for simple clients. +# [for more sophisticated usage use asynchat.async_chat] +# --------------------------------------------------------------------------- + + +class dispatcher_with_send(dispatcher): + def __init__(self, sock=None, map=None): + dispatcher.__init__(self, sock, map) + self.out_buffer = b"" + + def initiate_send(self): + num_sent = 0 + num_sent = dispatcher.send(self, self.out_buffer[:65536]) + self.out_buffer = self.out_buffer[num_sent:] + + handle_write = initiate_send + + def writable(self): + return (not self.connected) or len(self.out_buffer) + + def send(self, data): + if self.debug: # pragma: no cover + self.log_info("sending %s" % repr(data)) + self.out_buffer = self.out_buffer + data + self.initiate_send() + + +def close_all(map=None, ignore_all=False): + if map is None: # pragma: no cover + map = socket_map + for x in list(map.values()): # list() FBO py3 + try: + x.close() + except OSError as x: + if x.args[0] == EBADF: + pass + elif not ignore_all: + raise + except _reraised_exceptions: + raise + except: + if not ignore_all: + raise + map.clear() + + +# Asynchronous File I/O: +# +# After a little research (reading man pages on various unixen, and +# digging through the linux kernel), I've determined that select() +# isn't meant for doing asynchronous file i/o. +# Heartening, though - reading linux/mm/filemap.c shows that linux +# supports asynchronous read-ahead. So _MOST_ of the time, the data +# will be sitting in memory for us already when we go to read it. +# +# What other OS's (besides NT) support async file i/o? [VMS?] +# +# Regardless, this is useful for pipes, and stdin/stdout... + +if os.name == "posix": + + class file_wrapper: + # Here we override just enough to make a file + # look like a socket for the purposes of asyncore. + # The passed fd is automatically os.dup()'d + + def __init__(self, fd): + self.fd = os.dup(fd) + + def __del__(self): + if self.fd >= 0: + warnings.warn("unclosed file %r" % self, ResourceWarning) + self.close() + + def recv(self, *args): + return os.read(self.fd, *args) + + def send(self, *args): + return os.write(self.fd, *args) + + def getsockopt(self, level, optname, buflen=None): # pragma: no cover + if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen: + return 0 + raise NotImplementedError( + "Only asyncore specific behaviour " "implemented." + ) + + read = recv + write = send + + def close(self): + if self.fd < 0: + return + fd = self.fd + self.fd = -1 + os.close(fd) + + def fileno(self): + return self.fd + + class file_dispatcher(dispatcher): + def __init__(self, fd, map=None): + dispatcher.__init__(self, None, map) + self.connected = True + try: + fd = fd.fileno() + except AttributeError: + pass + self.set_file(fd) + # set it to non-blocking mode + os.set_blocking(fd, False) + + def set_file(self, fd): + self.socket = file_wrapper(fd) + self._fileno = self.socket.fileno() + self.add_channel() diff --git a/requirements.txt b/requirements.txt index 40076f2d4..c061d866e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,4 @@ setuptools -gevent>=21 -gevent-websocket>=0.10.1 lxml>=4.3.0 numpy>=1.12.0 webrtcvad-wheels>=2.0.10 \ No newline at end of file