mirror of https://github.com/morpheus65535/bazarr
Merge development into master
This commit is contained in:
commit
e1f836dfea
|
@ -23,6 +23,7 @@ from bs4 import BeautifulSoup as bso
|
||||||
from get_args import args
|
from get_args import args
|
||||||
from config import settings, base_url, save_settings, get_settings
|
from config import settings, base_url, save_settings, get_settings
|
||||||
from logger import empty_log
|
from logger import empty_log
|
||||||
|
from init import startTime
|
||||||
|
|
||||||
from init import *
|
from init import *
|
||||||
import logging
|
import logging
|
||||||
|
@ -606,6 +607,7 @@ class SystemStatus(Resource):
|
||||||
system_status.update({'python_version': platform.python_version()})
|
system_status.update({'python_version': platform.python_version()})
|
||||||
system_status.update({'bazarr_directory': os.path.dirname(os.path.dirname(__file__))})
|
system_status.update({'bazarr_directory': os.path.dirname(os.path.dirname(__file__))})
|
||||||
system_status.update({'bazarr_config_directory': args.config_dir})
|
system_status.update({'bazarr_config_directory': args.config_dir})
|
||||||
|
system_status.update({'start_time': startTime})
|
||||||
return jsonify(data=system_status)
|
return jsonify(data=system_status)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ def create_app():
|
||||||
else:
|
else:
|
||||||
app.config["DEBUG"] = False
|
app.config["DEBUG"] = False
|
||||||
|
|
||||||
socketio.init_app(app, path=base_url.rstrip('/')+'/api/socket.io', cors_allowed_origins='*', async_mode='gevent')
|
socketio.init_app(app, path=base_url.rstrip('/')+'/api/socket.io', cors_allowed_origins='*', async_mode='threading')
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -185,7 +185,10 @@ defaults = {
|
||||||
},
|
},
|
||||||
'titulky': {
|
'titulky': {
|
||||||
'username': '',
|
'username': '',
|
||||||
'password': ''
|
'password': '',
|
||||||
|
'skip_wrong_fps': 'False',
|
||||||
|
'approved_only': 'False',
|
||||||
|
'multithreading': 'True'
|
||||||
},
|
},
|
||||||
'subsync': {
|
'subsync': {
|
||||||
'use_subsync': 'False',
|
'use_subsync': 'False',
|
||||||
|
|
|
@ -3,7 +3,7 @@ import atexit
|
||||||
import json
|
import json
|
||||||
import ast
|
import ast
|
||||||
import logging
|
import logging
|
||||||
import gevent
|
import time
|
||||||
from peewee import *
|
from peewee import *
|
||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
@ -15,7 +15,7 @@ from config import settings, get_array_from
|
||||||
from get_args import args
|
from get_args import args
|
||||||
|
|
||||||
database = SqliteQueueDatabase(os.path.join(args.config_dir, 'db', 'bazarr.db'),
|
database = SqliteQueueDatabase(os.path.join(args.config_dir, 'db', 'bazarr.db'),
|
||||||
use_gevent=True,
|
use_gevent=False,
|
||||||
autostart=True,
|
autostart=True,
|
||||||
queue_max_size=256)
|
queue_max_size=256)
|
||||||
migrator = SqliteMigrator(database)
|
migrator = SqliteMigrator(database)
|
||||||
|
@ -284,7 +284,7 @@ def init_db():
|
||||||
if not System.select().count():
|
if not System.select().count():
|
||||||
System.insert({System.configured: '0', System.updated: '0'}).execute()
|
System.insert({System.configured: '0', System.updated: '0'}).execute()
|
||||||
except:
|
except:
|
||||||
gevent.sleep(0.1)
|
time.sleep(0.1)
|
||||||
else:
|
else:
|
||||||
tables_created = True
|
tables_created = True
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
import os
|
import os
|
||||||
import requests
|
import requests
|
||||||
import logging
|
import logging
|
||||||
from gevent import sleep
|
|
||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
from database import get_exclusion_clause, TableEpisodes, TableShows
|
from database import get_exclusion_clause, TableEpisodes, TableShows
|
||||||
|
@ -45,7 +44,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
|
|
||||||
series_count = len(seriesIdList)
|
series_count = len(seriesIdList)
|
||||||
for i, seriesId in enumerate(seriesIdList):
|
for i, seriesId in enumerate(seriesIdList):
|
||||||
sleep()
|
|
||||||
if send_event:
|
if send_event:
|
||||||
show_progress(id='episodes_progress',
|
show_progress(id='episodes_progress',
|
||||||
header='Syncing episodes...',
|
header='Syncing episodes...',
|
||||||
|
@ -70,7 +68,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
episode['episodeFile'] = item[0]
|
episode['episodeFile'] = item[0]
|
||||||
|
|
||||||
for episode in episodes:
|
for episode in episodes:
|
||||||
sleep()
|
|
||||||
if 'hasFile' in episode:
|
if 'hasFile' in episode:
|
||||||
if episode['hasFile'] is True:
|
if episode['hasFile'] is True:
|
||||||
if 'episodeFile' in episode:
|
if 'episodeFile' in episode:
|
||||||
|
@ -91,7 +88,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
removed_episodes = list(set(current_episodes_db_list) - set(current_episodes_sonarr))
|
removed_episodes = list(set(current_episodes_db_list) - set(current_episodes_sonarr))
|
||||||
|
|
||||||
for removed_episode in removed_episodes:
|
for removed_episode in removed_episodes:
|
||||||
sleep()
|
|
||||||
episode_to_delete = TableEpisodes.select(TableEpisodes.sonarrSeriesId, TableEpisodes.sonarrEpisodeId)\
|
episode_to_delete = TableEpisodes.select(TableEpisodes.sonarrSeriesId, TableEpisodes.sonarrEpisodeId)\
|
||||||
.where(TableEpisodes.sonarrEpisodeId == removed_episode)\
|
.where(TableEpisodes.sonarrEpisodeId == removed_episode)\
|
||||||
.dicts()\
|
.dicts()\
|
||||||
|
@ -124,7 +120,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
episodes_to_update_list = [i for i in episodes_to_update if i not in episode_in_db_list]
|
episodes_to_update_list = [i for i in episodes_to_update if i not in episode_in_db_list]
|
||||||
|
|
||||||
for updated_episode in episodes_to_update_list:
|
for updated_episode in episodes_to_update_list:
|
||||||
sleep()
|
|
||||||
TableEpisodes.update(updated_episode).where(TableEpisodes.sonarrEpisodeId ==
|
TableEpisodes.update(updated_episode).where(TableEpisodes.sonarrEpisodeId ==
|
||||||
updated_episode['sonarrEpisodeId']).execute()
|
updated_episode['sonarrEpisodeId']).execute()
|
||||||
altered_episodes.append([updated_episode['sonarrEpisodeId'],
|
altered_episodes.append([updated_episode['sonarrEpisodeId'],
|
||||||
|
@ -133,7 +128,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
|
|
||||||
# Insert new episodes in DB
|
# Insert new episodes in DB
|
||||||
for added_episode in episodes_to_add:
|
for added_episode in episodes_to_add:
|
||||||
sleep()
|
|
||||||
result = TableEpisodes.insert(added_episode).on_conflict(action='IGNORE').execute()
|
result = TableEpisodes.insert(added_episode).on_conflict(action='IGNORE').execute()
|
||||||
if result > 0:
|
if result > 0:
|
||||||
altered_episodes.append([added_episode['sonarrEpisodeId'],
|
altered_episodes.append([added_episode['sonarrEpisodeId'],
|
||||||
|
@ -147,7 +141,6 @@ def sync_episodes(series_id=None, send_event=True):
|
||||||
|
|
||||||
# Store subtitles for added or modified episodes
|
# Store subtitles for added or modified episodes
|
||||||
for i, altered_episode in enumerate(altered_episodes, 1):
|
for i, altered_episode in enumerate(altered_episodes, 1):
|
||||||
sleep()
|
|
||||||
store_subtitles(altered_episode[1], path_mappings.path_replace(altered_episode[1]))
|
store_subtitles(altered_episode[1], path_mappings.path_replace(altered_episode[1]))
|
||||||
|
|
||||||
logging.debug('BAZARR All episodes synced from Sonarr into database.')
|
logging.debug('BAZARR All episodes synced from Sonarr into database.')
|
||||||
|
|
|
@ -5,7 +5,6 @@ import requests
|
||||||
import logging
|
import logging
|
||||||
import operator
|
import operator
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from gevent import sleep
|
|
||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
from config import settings, url_radarr
|
from config import settings, url_radarr
|
||||||
|
@ -17,6 +16,7 @@ from get_rootfolder import check_radarr_rootfolder
|
||||||
from get_subtitle import movies_download_subtitles
|
from get_subtitle import movies_download_subtitles
|
||||||
from database import get_exclusion_clause, TableMovies
|
from database import get_exclusion_clause, TableMovies
|
||||||
from event_handler import event_stream, show_progress, hide_progress
|
from event_handler import event_stream, show_progress, hide_progress
|
||||||
|
from get_languages import language_from_alpha2
|
||||||
|
|
||||||
headers = {"User-Agent": os.environ["SZ_USER_AGENT"]}
|
headers = {"User-Agent": os.environ["SZ_USER_AGENT"]}
|
||||||
|
|
||||||
|
@ -64,7 +64,6 @@ def update_movies(send_event=True):
|
||||||
# Build new and updated movies
|
# Build new and updated movies
|
||||||
movies_count = len(movies)
|
movies_count = len(movies)
|
||||||
for i, movie in enumerate(movies):
|
for i, movie in enumerate(movies):
|
||||||
sleep()
|
|
||||||
if send_event:
|
if send_event:
|
||||||
show_progress(id='movies_progress',
|
show_progress(id='movies_progress',
|
||||||
header='Syncing movies...',
|
header='Syncing movies...',
|
||||||
|
@ -96,7 +95,6 @@ def update_movies(send_event=True):
|
||||||
removed_movies = list(set(current_movies_db_list) - set(current_movies_radarr))
|
removed_movies = list(set(current_movies_db_list) - set(current_movies_radarr))
|
||||||
|
|
||||||
for removed_movie in removed_movies:
|
for removed_movie in removed_movies:
|
||||||
sleep()
|
|
||||||
TableMovies.delete().where(TableMovies.tmdbId == removed_movie).execute()
|
TableMovies.delete().where(TableMovies.tmdbId == removed_movie).execute()
|
||||||
|
|
||||||
# Update movies in DB
|
# Update movies in DB
|
||||||
|
@ -129,7 +127,6 @@ def update_movies(send_event=True):
|
||||||
movies_to_update_list = [i for i in movies_to_update if i not in movies_in_db_list]
|
movies_to_update_list = [i for i in movies_to_update if i not in movies_in_db_list]
|
||||||
|
|
||||||
for updated_movie in movies_to_update_list:
|
for updated_movie in movies_to_update_list:
|
||||||
sleep()
|
|
||||||
TableMovies.update(updated_movie).where(TableMovies.tmdbId == updated_movie['tmdbId']).execute()
|
TableMovies.update(updated_movie).where(TableMovies.tmdbId == updated_movie['tmdbId']).execute()
|
||||||
altered_movies.append([updated_movie['tmdbId'],
|
altered_movies.append([updated_movie['tmdbId'],
|
||||||
updated_movie['path'],
|
updated_movie['path'],
|
||||||
|
@ -138,7 +135,6 @@ def update_movies(send_event=True):
|
||||||
|
|
||||||
# Insert new movies in DB
|
# Insert new movies in DB
|
||||||
for added_movie in movies_to_add:
|
for added_movie in movies_to_add:
|
||||||
sleep()
|
|
||||||
result = TableMovies.insert(added_movie).on_conflict(action='IGNORE').execute()
|
result = TableMovies.insert(added_movie).on_conflict(action='IGNORE').execute()
|
||||||
if result > 0:
|
if result > 0:
|
||||||
altered_movies.append([added_movie['tmdbId'],
|
altered_movies.append([added_movie['tmdbId'],
|
||||||
|
@ -153,7 +149,6 @@ def update_movies(send_event=True):
|
||||||
|
|
||||||
# Store subtitles for added or modified movies
|
# Store subtitles for added or modified movies
|
||||||
for i, altered_movie in enumerate(altered_movies, 1):
|
for i, altered_movie in enumerate(altered_movies, 1):
|
||||||
sleep()
|
|
||||||
store_subtitles_movie(altered_movie[1], path_mappings.path_replace_movie(altered_movie[1]))
|
store_subtitles_movie(altered_movie[1], path_mappings.path_replace_movie(altered_movie[1]))
|
||||||
|
|
||||||
logging.debug('BAZARR All movies synced from Radarr into database.')
|
logging.debug('BAZARR All movies synced from Radarr into database.')
|
||||||
|
@ -456,7 +451,10 @@ def movieParser(movie, action, tags_dict, movie_default_profile, audio_profiles)
|
||||||
for item in movie['movieFile']['languages']:
|
for item in movie['movieFile']['languages']:
|
||||||
if isinstance(item, dict):
|
if isinstance(item, dict):
|
||||||
if 'name' in item:
|
if 'name' in item:
|
||||||
audio_language.append(item['name'])
|
language = item['name']
|
||||||
|
if item['name'] == 'Portuguese (Brazil)':
|
||||||
|
language = language_from_alpha2('pb')
|
||||||
|
audio_language.append(language)
|
||||||
|
|
||||||
tags = [d['label'] for d in tags_dict if d['id'] in movie['tags']]
|
tags = [d['label'] for d in tags_dict if d['id'] in movie['tags']]
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
|
import pytz
|
||||||
import logging
|
import logging
|
||||||
import subliminal_patch
|
import subliminal_patch
|
||||||
import pretty
|
import pretty
|
||||||
|
@ -29,6 +30,9 @@ def time_until_end_of_day(dt=None):
|
||||||
tomorrow = dt + datetime.timedelta(days=1)
|
tomorrow = dt + datetime.timedelta(days=1)
|
||||||
return datetime.datetime.combine(tomorrow, datetime.time.min) - dt
|
return datetime.datetime.combine(tomorrow, datetime.time.min) - dt
|
||||||
|
|
||||||
|
# Titulky resets its download limits at the start of a new day from its perspective - the Europe/Prague timezone
|
||||||
|
titulky_server_local_time = datetime.datetime.now(tz=pytz.timezone('Europe/Prague')).replace(tzinfo=None) # Needs to convert to offset-naive dt
|
||||||
|
titulky_limit_reset_datetime = time_until_end_of_day(dt=titulky_server_local_time)
|
||||||
|
|
||||||
hours_until_end_of_day = time_until_end_of_day().seconds // 3600 + 1
|
hours_until_end_of_day = time_until_end_of_day().seconds // 3600 + 1
|
||||||
|
|
||||||
|
@ -65,8 +69,7 @@ PROVIDER_THROTTLE_MAP = {
|
||||||
IPAddressBlocked : (datetime.timedelta(hours=1), "1 hours"),
|
IPAddressBlocked : (datetime.timedelta(hours=1), "1 hours"),
|
||||||
},
|
},
|
||||||
"titulky" : {
|
"titulky" : {
|
||||||
DownloadLimitExceeded: (
|
DownloadLimitExceeded: (titulky_limit_reset_datetime, f"{titulky_limit_reset_datetime.seconds // 3600 + 1} hours")
|
||||||
datetime.timedelta(hours=hours_until_end_of_day), "{} hours".format(str(hours_until_end_of_day)))
|
|
||||||
},
|
},
|
||||||
"legendasdivx" : {
|
"legendasdivx" : {
|
||||||
TooManyRequests : (datetime.timedelta(hours=3), "3 hours"),
|
TooManyRequests : (datetime.timedelta(hours=3), "3 hours"),
|
||||||
|
@ -183,6 +186,9 @@ def get_providers_auth():
|
||||||
'titulky' : {
|
'titulky' : {
|
||||||
'username': settings.titulky.username,
|
'username': settings.titulky.username,
|
||||||
'password': settings.titulky.password,
|
'password': settings.titulky.password,
|
||||||
|
'skip_wrong_fps': settings.titulky.getboolean('skip_wrong_fps'),
|
||||||
|
'approved_only': settings.titulky.getboolean('approved_only'),
|
||||||
|
'multithreading': settings.titulky.getboolean('multithreading'),
|
||||||
},
|
},
|
||||||
'titlovi' : {
|
'titlovi' : {
|
||||||
'username': settings.titlovi.username,
|
'username': settings.titlovi.username,
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
import os
|
import os
|
||||||
import requests
|
import requests
|
||||||
import logging
|
import logging
|
||||||
from gevent import sleep
|
|
||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
from config import settings, url_sonarr
|
from config import settings, url_sonarr
|
||||||
|
@ -51,7 +50,6 @@ def update_series(send_event=True):
|
||||||
|
|
||||||
series_count = len(series)
|
series_count = len(series)
|
||||||
for i, show in enumerate(series):
|
for i, show in enumerate(series):
|
||||||
sleep()
|
|
||||||
if send_event:
|
if send_event:
|
||||||
show_progress(id='series_progress',
|
show_progress(id='series_progress',
|
||||||
header='Syncing series...',
|
header='Syncing series...',
|
||||||
|
@ -78,7 +76,6 @@ def update_series(send_event=True):
|
||||||
removed_series = list(set(current_shows_db_list) - set(current_shows_sonarr))
|
removed_series = list(set(current_shows_db_list) - set(current_shows_sonarr))
|
||||||
|
|
||||||
for series in removed_series:
|
for series in removed_series:
|
||||||
sleep()
|
|
||||||
TableShows.delete().where(TableShows.sonarrSeriesId == series).execute()
|
TableShows.delete().where(TableShows.sonarrSeriesId == series).execute()
|
||||||
if send_event:
|
if send_event:
|
||||||
event_stream(type='series', action='delete', payload=series)
|
event_stream(type='series', action='delete', payload=series)
|
||||||
|
@ -106,7 +103,6 @@ def update_series(send_event=True):
|
||||||
series_to_update_list = [i for i in series_to_update if i not in series_in_db_list]
|
series_to_update_list = [i for i in series_to_update if i not in series_in_db_list]
|
||||||
|
|
||||||
for updated_series in series_to_update_list:
|
for updated_series in series_to_update_list:
|
||||||
sleep()
|
|
||||||
TableShows.update(updated_series).where(TableShows.sonarrSeriesId ==
|
TableShows.update(updated_series).where(TableShows.sonarrSeriesId ==
|
||||||
updated_series['sonarrSeriesId']).execute()
|
updated_series['sonarrSeriesId']).execute()
|
||||||
if send_event:
|
if send_event:
|
||||||
|
@ -114,7 +110,6 @@ def update_series(send_event=True):
|
||||||
|
|
||||||
# Insert new series in DB
|
# Insert new series in DB
|
||||||
for added_series in series_to_add:
|
for added_series in series_to_add:
|
||||||
sleep()
|
|
||||||
result = TableShows.insert(added_series).on_conflict(action='IGNORE').execute()
|
result = TableShows.insert(added_series).on_conflict(action='IGNORE').execute()
|
||||||
if result:
|
if result:
|
||||||
list_missing_subtitles(no=added_series['sonarrSeriesId'])
|
list_missing_subtitles(no=added_series['sonarrSeriesId'])
|
||||||
|
|
|
@ -271,7 +271,9 @@ def download_subtitle(path, language, audio_language, hi, forced, providers, pro
|
||||||
reversed_path = path_mappings.path_replace_reverse(path)
|
reversed_path = path_mappings.path_replace_reverse(path)
|
||||||
reversed_subtitles_path = path_mappings.path_replace_reverse(downloaded_path)
|
reversed_subtitles_path = path_mappings.path_replace_reverse(downloaded_path)
|
||||||
notify_sonarr(episode_metadata['sonarrSeriesId'])
|
notify_sonarr(episode_metadata['sonarrSeriesId'])
|
||||||
event_stream(type='episode-wanted', action='delete', payload=episode_metadata['sonarrEpisodeId'])
|
event_stream(type='series', action='update', payload=episode_metadata['sonarrSeriesId'])
|
||||||
|
event_stream(type='episode-wanted', action='delete',
|
||||||
|
payload=episode_metadata['sonarrEpisodeId'])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
reversed_path = path_mappings.path_replace_reverse_movie(path)
|
reversed_path = path_mappings.path_replace_reverse_movie(path)
|
||||||
|
@ -717,10 +719,14 @@ def manual_upload_subtitle(path, language, forced, hi, title, scene_name, media_
|
||||||
reversed_path = path_mappings.path_replace_reverse(path)
|
reversed_path = path_mappings.path_replace_reverse(path)
|
||||||
reversed_subtitles_path = path_mappings.path_replace_reverse(subtitle_path)
|
reversed_subtitles_path = path_mappings.path_replace_reverse(subtitle_path)
|
||||||
notify_sonarr(episode_metadata['sonarrSeriesId'])
|
notify_sonarr(episode_metadata['sonarrSeriesId'])
|
||||||
|
event_stream(type='series', action='update', payload=episode_metadata['sonarrSeriesId'])
|
||||||
|
event_stream(type='episode-wanted', action='delete', payload=episode_metadata['sonarrEpisodeId'])
|
||||||
else:
|
else:
|
||||||
reversed_path = path_mappings.path_replace_reverse_movie(path)
|
reversed_path = path_mappings.path_replace_reverse_movie(path)
|
||||||
reversed_subtitles_path = path_mappings.path_replace_reverse_movie(subtitle_path)
|
reversed_subtitles_path = path_mappings.path_replace_reverse_movie(subtitle_path)
|
||||||
notify_radarr(movie_metadata['radarrId'])
|
notify_radarr(movie_metadata['radarrId'])
|
||||||
|
event_stream(type='movie', action='update', payload=movie_metadata['radarrId'])
|
||||||
|
event_stream(type='movie-wanted', action='delete', payload=movie_metadata['radarrId'])
|
||||||
|
|
||||||
return message, reversed_path, reversed_subtitles_path
|
return message, reversed_path, reversed_subtitles_path
|
||||||
|
|
||||||
|
@ -1066,6 +1072,7 @@ def wanted_download_subtitles(sonarr_episode_id):
|
||||||
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']))
|
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']))
|
||||||
history_log(1, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path,
|
history_log(1, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path,
|
||||||
language_code, provider, score, subs_id, subs_path)
|
language_code, provider, score, subs_id, subs_path)
|
||||||
|
event_stream(type='series', action='update', payload=episode['sonarrSeriesId'])
|
||||||
event_stream(type='episode-wanted', action='delete', payload=episode['sonarrEpisodeId'])
|
event_stream(type='episode-wanted', action='delete', payload=episode['sonarrEpisodeId'])
|
||||||
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message)
|
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -14,6 +14,11 @@ from helper import path_mappings
|
||||||
from dogpile.cache.region import register_backend as register_cache_backend
|
from dogpile.cache.region import register_backend as register_cache_backend
|
||||||
import subliminal
|
import subliminal
|
||||||
import datetime
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
|
# set start time global variable as epoch
|
||||||
|
global startTime
|
||||||
|
startTime = time.time()
|
||||||
|
|
||||||
# set subliminal_patch user agent
|
# set subliminal_patch user agent
|
||||||
os.environ["SZ_USER_AGENT"] = "Bazarr/{}".format(os.environ["BAZARR_VERSION"])
|
os.environ["SZ_USER_AGENT"] = "Bazarr/{}".format(os.environ["BAZARR_VERSION"])
|
||||||
|
@ -54,7 +59,7 @@ def is_virtualenv():
|
||||||
# deploy requirements.txt
|
# deploy requirements.txt
|
||||||
if not args.no_update:
|
if not args.no_update:
|
||||||
try:
|
try:
|
||||||
import lxml, numpy, webrtcvad, gevent, geventwebsocket, setuptools
|
import lxml, numpy, webrtcvad, setuptools
|
||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
import pip
|
import pip
|
||||||
|
|
|
@ -8,7 +8,6 @@ import re
|
||||||
from guess_language import guess_language
|
from guess_language import guess_language
|
||||||
from subliminal_patch import core, search_external_subtitles
|
from subliminal_patch import core, search_external_subtitles
|
||||||
from subzero.language import Language
|
from subzero.language import Language
|
||||||
from gevent import sleep
|
|
||||||
|
|
||||||
from custom_lang import CustomLanguage
|
from custom_lang import CustomLanguage
|
||||||
from database import get_profiles_list, get_profile_cutoff, TableEpisodes, TableShows, TableMovies
|
from database import get_profiles_list, get_profile_cutoff, TableEpisodes, TableShows, TableMovies
|
||||||
|
@ -19,6 +18,7 @@ from helper import path_mappings, get_subtitle_destination_folder
|
||||||
from embedded_subs_reader import embedded_subs_reader
|
from embedded_subs_reader import embedded_subs_reader
|
||||||
from event_handler import event_stream, show_progress, hide_progress
|
from event_handler import event_stream, show_progress, hide_progress
|
||||||
from charamel import Detector
|
from charamel import Detector
|
||||||
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
gc.enable()
|
gc.enable()
|
||||||
|
|
||||||
|
@ -37,33 +37,39 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
|
||||||
.where(TableEpisodes.path == original_path)\
|
.where(TableEpisodes.path == original_path)\
|
||||||
.dicts()\
|
.dicts()\
|
||||||
.get()
|
.get()
|
||||||
subtitle_languages = embedded_subs_reader(reversed_path,
|
except DoesNotExist:
|
||||||
file_size=item['file_size'],
|
logging.exception(f"BAZARR error when trying to select this episode from database: {reversed_path}")
|
||||||
episode_file_id=item['episode_file_id'],
|
else:
|
||||||
use_cache=use_cache)
|
try:
|
||||||
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
|
subtitle_languages = embedded_subs_reader(reversed_path,
|
||||||
try:
|
file_size=item['file_size'],
|
||||||
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
|
episode_file_id=item['episode_file_id'],
|
||||||
(settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
|
use_cache=use_cache)
|
||||||
"vobsub") or \
|
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
|
||||||
(settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
|
try:
|
||||||
"ass"):
|
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
|
||||||
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
|
(settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
|
||||||
continue
|
"vobsub") or \
|
||||||
|
(settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
|
||||||
|
"ass"):
|
||||||
|
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
|
||||||
|
continue
|
||||||
|
|
||||||
if alpha2_from_alpha3(subtitle_language) is not None:
|
if alpha2_from_alpha3(subtitle_language) is not None:
|
||||||
lang = str(alpha2_from_alpha3(subtitle_language))
|
lang = str(alpha2_from_alpha3(subtitle_language))
|
||||||
if subtitle_forced:
|
if subtitle_forced:
|
||||||
lang = lang + ":forced"
|
lang = lang + ":forced"
|
||||||
if subtitle_hi:
|
if subtitle_hi:
|
||||||
lang = lang + ":hi"
|
lang = lang + ":hi"
|
||||||
logging.debug("BAZARR embedded subtitles detected: " + lang)
|
logging.debug("BAZARR embedded subtitles detected: " + lang)
|
||||||
actual_subtitles.append([lang, None])
|
actual_subtitles.append([lang, None])
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logging.debug("BAZARR unable to index this unrecognized language: %s (%s)", subtitle_language, error)
|
logging.debug("BAZARR unable to index this unrecognized language: %s (%s)", subtitle_language, error)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(
|
logging.exception(
|
||||||
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path))
|
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1],
|
||||||
|
reversed_path))
|
||||||
|
pass
|
||||||
try:
|
try:
|
||||||
dest_folder = get_subtitle_destination_folder()
|
dest_folder = get_subtitle_destination_folder()
|
||||||
core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
|
core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
|
||||||
|
@ -131,35 +137,40 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True):
|
||||||
.where(TableMovies.path == original_path)\
|
.where(TableMovies.path == original_path)\
|
||||||
.dicts()\
|
.dicts()\
|
||||||
.get()
|
.get()
|
||||||
subtitle_languages = embedded_subs_reader(reversed_path,
|
except DoesNotExist:
|
||||||
file_size=item['file_size'],
|
logging.exception(f"BAZARR error when trying to select this movie from database: {reversed_path}")
|
||||||
movie_file_id=item['movie_file_id'],
|
else:
|
||||||
use_cache=use_cache)
|
try:
|
||||||
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
|
subtitle_languages = embedded_subs_reader(reversed_path,
|
||||||
try:
|
file_size=item['file_size'],
|
||||||
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
|
movie_file_id=item['movie_file_id'],
|
||||||
(settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
|
use_cache=use_cache)
|
||||||
"vobsub") or \
|
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
|
||||||
(settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
|
try:
|
||||||
"ass"):
|
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
|
||||||
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
|
(settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
|
||||||
continue
|
"vobsub") or \
|
||||||
|
(settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
|
||||||
|
"ass"):
|
||||||
|
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
|
||||||
|
continue
|
||||||
|
|
||||||
if alpha2_from_alpha3(subtitle_language) is not None:
|
if alpha2_from_alpha3(subtitle_language) is not None:
|
||||||
lang = str(alpha2_from_alpha3(subtitle_language))
|
lang = str(alpha2_from_alpha3(subtitle_language))
|
||||||
if subtitle_forced:
|
if subtitle_forced:
|
||||||
lang = lang + ':forced'
|
lang = lang + ':forced'
|
||||||
if subtitle_hi:
|
if subtitle_hi:
|
||||||
lang = lang + ':hi'
|
lang = lang + ':hi'
|
||||||
logging.debug("BAZARR embedded subtitles detected: " + lang)
|
logging.debug("BAZARR embedded subtitles detected: " + lang)
|
||||||
actual_subtitles.append([lang, None])
|
actual_subtitles.append([lang, None])
|
||||||
except:
|
except:
|
||||||
logging.debug("BAZARR unable to index this unrecognized language: " + subtitle_language)
|
logging.debug("BAZARR unable to index this unrecognized language: " + subtitle_language)
|
||||||
pass
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.exception(
|
logging.exception(
|
||||||
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path))
|
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1],
|
||||||
pass
|
reversed_path))
|
||||||
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
dest_folder = get_subtitle_destination_folder() or ''
|
dest_folder = get_subtitle_destination_folder() or ''
|
||||||
|
@ -237,7 +248,6 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
|
||||||
use_embedded_subs = settings.general.getboolean('use_embedded_subs')
|
use_embedded_subs = settings.general.getboolean('use_embedded_subs')
|
||||||
|
|
||||||
for episode_subtitles in episodes_subtitles:
|
for episode_subtitles in episodes_subtitles:
|
||||||
sleep()
|
|
||||||
missing_subtitles_text = '[]'
|
missing_subtitles_text = '[]'
|
||||||
if episode_subtitles['profileId']:
|
if episode_subtitles['profileId']:
|
||||||
# get desired subtitles
|
# get desired subtitles
|
||||||
|
@ -348,7 +358,6 @@ def list_missing_subtitles_movies(no=None, send_event=True):
|
||||||
use_embedded_subs = settings.general.getboolean('use_embedded_subs')
|
use_embedded_subs = settings.general.getboolean('use_embedded_subs')
|
||||||
|
|
||||||
for movie_subtitles in movies_subtitles:
|
for movie_subtitles in movies_subtitles:
|
||||||
sleep()
|
|
||||||
missing_subtitles_text = '[]'
|
missing_subtitles_text = '[]'
|
||||||
if movie_subtitles['profileId']:
|
if movie_subtitles['profileId']:
|
||||||
# get desired subtitles
|
# get desired subtitles
|
||||||
|
@ -416,7 +425,7 @@ def list_missing_subtitles_movies(no=None, send_event=True):
|
||||||
|
|
||||||
# remove missing that have forced or hi subtitles for this language in existing
|
# remove missing that have forced or hi subtitles for this language in existing
|
||||||
for item in actual_subtitles_list:
|
for item in actual_subtitles_list:
|
||||||
if item[1] == 'True' or item[2] == 'True':
|
if item[2] == 'True':
|
||||||
try:
|
try:
|
||||||
missing_subtitles_list.remove([item[0], 'False', 'False'])
|
missing_subtitles_list.remove([item[0], 'False', 'False'])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
@ -450,7 +459,6 @@ def series_full_scan_subtitles():
|
||||||
|
|
||||||
count_episodes = len(episodes)
|
count_episodes = len(episodes)
|
||||||
for i, episode in enumerate(episodes):
|
for i, episode in enumerate(episodes):
|
||||||
sleep()
|
|
||||||
show_progress(id='episodes_disk_scan',
|
show_progress(id='episodes_disk_scan',
|
||||||
header='Full disk scan...',
|
header='Full disk scan...',
|
||||||
name='Episodes subtitles',
|
name='Episodes subtitles',
|
||||||
|
@ -470,7 +478,6 @@ def movies_full_scan_subtitles():
|
||||||
|
|
||||||
count_movies = len(movies)
|
count_movies = len(movies)
|
||||||
for i, movie in enumerate(movies):
|
for i, movie in enumerate(movies):
|
||||||
sleep()
|
|
||||||
show_progress(id='movies_disk_scan',
|
show_progress(id='movies_disk_scan',
|
||||||
header='Full disk scan...',
|
header='Full disk scan...',
|
||||||
name='Movies subtitles',
|
name='Movies subtitles',
|
||||||
|
@ -491,7 +498,6 @@ def series_scan_subtitles(no):
|
||||||
.dicts()
|
.dicts()
|
||||||
|
|
||||||
for episode in episodes:
|
for episode in episodes:
|
||||||
sleep()
|
|
||||||
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']), use_cache=False)
|
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']), use_cache=False)
|
||||||
|
|
||||||
|
|
||||||
|
@ -502,7 +508,6 @@ def movies_scan_subtitles(no):
|
||||||
.dicts()
|
.dicts()
|
||||||
|
|
||||||
for movie in movies:
|
for movie in movies:
|
||||||
sleep()
|
|
||||||
store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']), use_cache=False)
|
store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']), use_cache=False)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -117,10 +117,8 @@ def configure_logging(debug=False):
|
||||||
logging.getLogger("srt").setLevel(logging.ERROR)
|
logging.getLogger("srt").setLevel(logging.ERROR)
|
||||||
logging.getLogger("SignalRCoreClient").setLevel(logging.CRITICAL)
|
logging.getLogger("SignalRCoreClient").setLevel(logging.CRITICAL)
|
||||||
logging.getLogger("websocket").setLevel(logging.CRITICAL)
|
logging.getLogger("websocket").setLevel(logging.CRITICAL)
|
||||||
logging.getLogger("geventwebsocket.handler").setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
logging.getLogger("geventwebsocket.handler").setLevel(logging.WARNING)
|
logging.getLogger("waitress").setLevel(logging.ERROR)
|
||||||
logging.getLogger("engineio.server").setLevel(logging.WARNING)
|
|
||||||
logging.getLogger("knowit").setLevel(logging.CRITICAL)
|
logging.getLogger("knowit").setLevel(logging.CRITICAL)
|
||||||
logging.getLogger("enzyme").setLevel(logging.CRITICAL)
|
logging.getLogger("enzyme").setLevel(logging.CRITICAL)
|
||||||
logging.getLogger("guessit").setLevel(logging.WARNING)
|
logging.getLogger("guessit").setLevel(logging.WARNING)
|
||||||
|
|
|
@ -1,13 +1,5 @@
|
||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
|
||||||
# Gevent monkey patch if gevent available. If not, it will be installed on during the init process.
|
|
||||||
try:
|
|
||||||
from gevent import monkey, Greenlet, joinall
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
monkey.patch_all()
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
bazarr_version = 'unknown'
|
bazarr_version = 'unknown'
|
||||||
|
@ -34,6 +26,7 @@ from urllib.parse import unquote
|
||||||
from get_languages import load_language_in_db
|
from get_languages import load_language_in_db
|
||||||
from flask import make_response, request, redirect, abort, render_template, Response, session, flash, url_for, \
|
from flask import make_response, request, redirect, abort, render_template, Response, session, flash, url_for, \
|
||||||
send_file, stream_with_context
|
send_file, stream_with_context
|
||||||
|
from threading import Thread
|
||||||
|
|
||||||
from get_series import *
|
from get_series import *
|
||||||
from get_episodes import *
|
from get_episodes import *
|
||||||
|
@ -202,11 +195,10 @@ def proxy(protocol, url):
|
||||||
return dict(status=False, error=result.raise_for_status())
|
return dict(status=False, error=result.raise_for_status())
|
||||||
|
|
||||||
|
|
||||||
greenlets = []
|
|
||||||
if settings.general.getboolean('use_sonarr'):
|
if settings.general.getboolean('use_sonarr'):
|
||||||
greenlets.append(Greenlet.spawn(sonarr_signalr_client.start))
|
Thread(target=sonarr_signalr_client.start).start()
|
||||||
if settings.general.getboolean('use_radarr'):
|
if settings.general.getboolean('use_radarr'):
|
||||||
greenlets.append(Greenlet.spawn(radarr_signalr_client.start))
|
Thread(target=radarr_signalr_client.start).start()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -12,7 +12,7 @@ if not args.no_update:
|
||||||
from check_update import check_if_new_update, check_releases
|
from check_update import check_if_new_update, check_releases
|
||||||
else:
|
else:
|
||||||
from check_update import check_releases
|
from check_update import check_releases
|
||||||
from apscheduler.schedulers.gevent import GeventScheduler
|
from apscheduler.schedulers.background import BackgroundScheduler
|
||||||
from apscheduler.triggers.interval import IntervalTrigger
|
from apscheduler.triggers.interval import IntervalTrigger
|
||||||
from apscheduler.triggers.cron import CronTrigger
|
from apscheduler.triggers.cron import CronTrigger
|
||||||
from apscheduler.triggers.date import DateTrigger
|
from apscheduler.triggers.date import DateTrigger
|
||||||
|
@ -30,7 +30,7 @@ class Scheduler:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.__running_tasks = []
|
self.__running_tasks = []
|
||||||
|
|
||||||
self.aps_scheduler = GeventScheduler()
|
self.aps_scheduler = BackgroundScheduler()
|
||||||
|
|
||||||
# task listener
|
# task listener
|
||||||
def task_listener_add(event):
|
def task_listener_add(event):
|
||||||
|
|
|
@ -4,8 +4,7 @@ import warnings
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import io
|
import io
|
||||||
from gevent import pywsgi
|
from waitress.server import create_server
|
||||||
from geventwebsocket.handler import WebSocketHandler
|
|
||||||
|
|
||||||
from get_args import args
|
from get_args import args
|
||||||
from config import settings, base_url
|
from config import settings, base_url
|
||||||
|
@ -27,23 +26,23 @@ class Server:
|
||||||
# Mute Python3 BrokenPipeError
|
# Mute Python3 BrokenPipeError
|
||||||
warnings.simplefilter("ignore", BrokenPipeError)
|
warnings.simplefilter("ignore", BrokenPipeError)
|
||||||
|
|
||||||
self.server = pywsgi.WSGIServer((str(settings.general.ip),
|
self.server = create_server(app,
|
||||||
int(args.port) if args.port else int(settings.general.port)),
|
host=str(settings.general.ip),
|
||||||
app,
|
port=int(args.port) if args.port else int(settings.general.port),
|
||||||
handler_class=WebSocketHandler)
|
threads=100)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
try:
|
try:
|
||||||
logging.info(
|
logging.info(
|
||||||
'BAZARR is started and waiting for request on http://' + str(settings.general.ip) + ':' + (str(
|
'BAZARR is started and waiting for request on http://' + str(settings.general.ip) + ':' + (str(
|
||||||
args.port) if args.port else str(settings.general.port)) + str(base_url))
|
args.port) if args.port else str(settings.general.port)) + str(base_url))
|
||||||
self.server.serve_forever()
|
self.server.run()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
self.shutdown()
|
self.shutdown()
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
try:
|
try:
|
||||||
self.server.stop()
|
self.server.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error('BAZARR Cannot stop Waitress: ' + repr(e))
|
logging.error('BAZARR Cannot stop Waitress: ' + repr(e))
|
||||||
else:
|
else:
|
||||||
|
@ -60,7 +59,7 @@ class Server:
|
||||||
|
|
||||||
def restart(self):
|
def restart(self):
|
||||||
try:
|
try:
|
||||||
self.server.stop()
|
self.server.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error('BAZARR Cannot stop Waitress: ' + repr(e))
|
logging.error('BAZARR Cannot stop Waitress: ' + repr(e))
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import gevent
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import time
|
||||||
from requests import Session
|
from requests import Session
|
||||||
from signalr import Connection
|
from signalr import Connection
|
||||||
from requests.exceptions import ConnectionError
|
from requests.exceptions import ConnectionError
|
||||||
|
@ -36,7 +36,6 @@ class SonarrSignalrClient:
|
||||||
if get_sonarr_info.is_legacy():
|
if get_sonarr_info.is_legacy():
|
||||||
logging.warning('BAZARR can only sync from Sonarr v3 SignalR feed to get real-time update. You should '
|
logging.warning('BAZARR can only sync from Sonarr v3 SignalR feed to get real-time update. You should '
|
||||||
'consider upgrading your version({}).'.format(get_sonarr_info.version()))
|
'consider upgrading your version({}).'.format(get_sonarr_info.version()))
|
||||||
raise gevent.GreenletExit
|
|
||||||
else:
|
else:
|
||||||
logging.info('BAZARR trying to connect to Sonarr SignalR feed...')
|
logging.info('BAZARR trying to connect to Sonarr SignalR feed...')
|
||||||
self.configure()
|
self.configure()
|
||||||
|
@ -44,14 +43,13 @@ class SonarrSignalrClient:
|
||||||
try:
|
try:
|
||||||
self.connection.start()
|
self.connection.start()
|
||||||
except ConnectionError:
|
except ConnectionError:
|
||||||
gevent.sleep(5)
|
time.sleep(5)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
logging.error("BAZARR cannot parse JSON returned by SignalR feed. This is caused by a permissions "
|
logging.error("BAZARR cannot parse JSON returned by SignalR feed. This is caused by a permissions "
|
||||||
"issue when Sonarr try to access its /config/.config directory. You should fix "
|
"issue when Sonarr try to access its /config/.config directory. You should fix "
|
||||||
"permissions on that directory and restart Sonarr. Also, if you're a Docker image "
|
"permissions on that directory and restart Sonarr. Also, if you're a Docker image "
|
||||||
"user, you should make sure you properly defined PUID/PGID environment variables. "
|
"user, you should make sure you properly defined PUID/PGID environment variables. "
|
||||||
"Otherwise, please contact Sonarr support.")
|
"Otherwise, please contact Sonarr support.")
|
||||||
raise gevent.GreenletExit
|
|
||||||
else:
|
else:
|
||||||
logging.info('BAZARR SignalR client for Sonarr is connected and waiting for events.')
|
logging.info('BAZARR SignalR client for Sonarr is connected and waiting for events.')
|
||||||
finally:
|
finally:
|
||||||
|
@ -107,7 +105,7 @@ class RadarrSignalrClient:
|
||||||
try:
|
try:
|
||||||
self.connection.start()
|
self.connection.start()
|
||||||
except ConnectionError:
|
except ConnectionError:
|
||||||
gevent.sleep(5)
|
time.sleep(5)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
logging.info('BAZARR SignalR client for Radarr is now disconnected.')
|
logging.info('BAZARR SignalR client for Radarr is now disconnected.')
|
||||||
|
|
|
@ -315,8 +315,10 @@ class GetRadarrInfo:
|
||||||
if 'version' in radarr_json:
|
if 'version' in radarr_json:
|
||||||
radarr_version = radarr_json['version']
|
radarr_version = radarr_json['version']
|
||||||
else:
|
else:
|
||||||
rv = url_radarr() + "/api/v3/system/status?apikey=" + settings.radarr.apikey
|
raise json.decoder.JSONDecodeError
|
||||||
radarr_version = requests.get(rv, timeout=60, verify=False, headers=headers).json()['version']
|
except json.decoder.JSONDecodeError:
|
||||||
|
rv = url_radarr() + "/api/v3/system/status?apikey=" + settings.radarr.apikey
|
||||||
|
radarr_version = requests.get(rv, timeout=60, verify=False, headers=headers).json()['version']
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.debug('BAZARR cannot get Radarr version')
|
logging.debug('BAZARR cannot get Radarr version')
|
||||||
radarr_version = 'unknown'
|
radarr_version = 'unknown'
|
||||||
|
@ -384,6 +386,7 @@ def delete_subtitles(media_type, language, forced, hi, media_path, subtitles_pat
|
||||||
subtitles_path=path_mappings.path_replace_reverse(subtitles_path))
|
subtitles_path=path_mappings.path_replace_reverse(subtitles_path))
|
||||||
store_subtitles(path_mappings.path_replace_reverse(media_path), media_path)
|
store_subtitles(path_mappings.path_replace_reverse(media_path), media_path)
|
||||||
notify_sonarr(sonarr_series_id)
|
notify_sonarr(sonarr_series_id)
|
||||||
|
event_stream(type='series', action='update', payload=sonarr_series_id)
|
||||||
event_stream(type='episode-wanted', action='update', payload=sonarr_episode_id)
|
event_stream(type='episode-wanted', action='update', payload=sonarr_episode_id)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -14,16 +14,18 @@
|
||||||
"private": true,
|
"private": true,
|
||||||
"homepage": "./",
|
"homepage": "./",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@fontsource/roboto": "^4.2.2",
|
"@fontsource/roboto": "^4.5.1",
|
||||||
"@fortawesome/fontawesome-svg-core": "^1.2",
|
"@fortawesome/fontawesome-svg-core": "^1.2",
|
||||||
"@fortawesome/free-brands-svg-icons": "^5.15",
|
"@fortawesome/free-brands-svg-icons": "^5.15",
|
||||||
"@fortawesome/free-regular-svg-icons": "^5.15",
|
"@fortawesome/free-regular-svg-icons": "^5.15",
|
||||||
"@fortawesome/free-solid-svg-icons": "^5.15",
|
"@fortawesome/free-solid-svg-icons": "^5.15",
|
||||||
"@fortawesome/react-fontawesome": "^0.1.11",
|
"@fortawesome/react-fontawesome": "^0.1.11",
|
||||||
"@reduxjs/toolkit": "^1.6",
|
"@reduxjs/toolkit": "^1.6",
|
||||||
"axios": "^0.21",
|
"axios": "^0.23",
|
||||||
"bootstrap": "^4",
|
"bootstrap": "^4",
|
||||||
"lodash": "^4",
|
"lodash": "^4",
|
||||||
|
"moment": "^2.29.1",
|
||||||
|
"package.json": "^2.0.1",
|
||||||
"rc-slider": "^9.7",
|
"rc-slider": "^9.7",
|
||||||
"react": "^17",
|
"react": "^17",
|
||||||
"react-bootstrap": "^1",
|
"react-bootstrap": "^1",
|
||||||
|
@ -32,7 +34,7 @@
|
||||||
"react-redux": "^7.2",
|
"react-redux": "^7.2",
|
||||||
"react-router-dom": "^5.3",
|
"react-router-dom": "^5.3",
|
||||||
"react-scripts": "^4",
|
"react-scripts": "^4",
|
||||||
"react-select": "^4",
|
"react-select": "^5.0.1",
|
||||||
"react-table": "^7",
|
"react-table": "^7",
|
||||||
"recharts": "^2.0.8",
|
"recharts": "^2.0.8",
|
||||||
"rooks": "^5.7.1",
|
"rooks": "^5.7.1",
|
||||||
|
@ -49,7 +51,7 @@
|
||||||
"@types/react-dom": "^17",
|
"@types/react-dom": "^17",
|
||||||
"@types/react-helmet": "^6.1",
|
"@types/react-helmet": "^6.1",
|
||||||
"@types/react-router-dom": "^5",
|
"@types/react-router-dom": "^5",
|
||||||
"@types/react-select": "^4.0.3",
|
"@types/react-select": "^5.0.1",
|
||||||
"@types/react-table": "^7",
|
"@types/react-table": "^7",
|
||||||
"http-proxy-middleware": "^2",
|
"http-proxy-middleware": "^2",
|
||||||
"husky": "^7",
|
"husky": "^7",
|
||||||
|
|
|
@ -16,6 +16,7 @@ declare namespace System {
|
||||||
python_version: string;
|
python_version: string;
|
||||||
radarr_version: string;
|
radarr_version: string;
|
||||||
sonarr_version: string;
|
sonarr_version: string;
|
||||||
|
start_time: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface Health {
|
interface Health {
|
||||||
|
|
|
@ -94,7 +94,7 @@ const MovieDetailView: FunctionComponent<Props> = ({ match }) => {
|
||||||
MoviesApi.action.bind(MoviesApi),
|
MoviesApi.action.bind(MoviesApi),
|
||||||
{ action: "scan-disk", radarrid: id }
|
{ action: "scan-disk", radarrid: id }
|
||||||
);
|
);
|
||||||
dispatchTask("Scaning Disk...", [task], "Scaning...");
|
dispatchTask("Scanning Disk...", [task], "Scanning...");
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
Scan Disk
|
Scan Disk
|
||||||
|
|
|
@ -92,8 +92,7 @@ const NotificationModal: FunctionComponent<ModalProps & BaseModalProps> = ({
|
||||||
variant="danger"
|
variant="danger"
|
||||||
onClick={() => {
|
onClick={() => {
|
||||||
if (current) {
|
if (current) {
|
||||||
current.enabled = false;
|
update({ ...current, enabled: false });
|
||||||
update(current);
|
|
||||||
}
|
}
|
||||||
closeModal();
|
closeModal();
|
||||||
}}
|
}}
|
||||||
|
|
|
@ -2,12 +2,13 @@ import { capitalize, isArray, isBoolean } from "lodash";
|
||||||
import React, {
|
import React, {
|
||||||
FunctionComponent,
|
FunctionComponent,
|
||||||
useCallback,
|
useCallback,
|
||||||
|
useEffect,
|
||||||
useMemo,
|
useMemo,
|
||||||
useState,
|
useState,
|
||||||
} from "react";
|
} from "react";
|
||||||
import { Button, Col, Container, Row } from "react-bootstrap";
|
import { Button, Col, Container, Row } from "react-bootstrap";
|
||||||
import { components } from "react-select";
|
import { components } from "react-select";
|
||||||
import { SelectComponents } from "react-select/src/components";
|
import { SelectComponents } from "react-select/dist/declarations/src/components";
|
||||||
import {
|
import {
|
||||||
BaseModal,
|
BaseModal,
|
||||||
Selector,
|
Selector,
|
||||||
|
@ -81,6 +82,10 @@ export const ProviderModal: FunctionComponent = () => {
|
||||||
|
|
||||||
const [staged, setChange] = useState<LooseObject>({});
|
const [staged, setChange] = useState<LooseObject>({});
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setInfo(payload);
|
||||||
|
}, [payload]);
|
||||||
|
|
||||||
const [info, setInfo] = useState<Nullable<ProviderInfo>>(payload);
|
const [info, setInfo] = useState<Nullable<ProviderInfo>>(payload);
|
||||||
|
|
||||||
useOnModalShow<ProviderInfo>((p) => setInfo(p), ModalKey);
|
useOnModalShow<ProviderInfo>((p) => setInfo(p), ModalKey);
|
||||||
|
@ -213,11 +218,12 @@ export const ProviderModal: FunctionComponent = () => {
|
||||||
}, [info]);
|
}, [info]);
|
||||||
|
|
||||||
const selectorComponents = useMemo<
|
const selectorComponents = useMemo<
|
||||||
Partial<SelectComponents<ProviderInfo, false>>
|
Partial<SelectComponents<ProviderInfo, false, any>>
|
||||||
>(
|
>(
|
||||||
() => ({
|
() => ({
|
||||||
Option: ({ data, ...other }) => {
|
Option: ({ data, ...other }) => {
|
||||||
const { label, value } = data as SelectorOption<ProviderInfo>;
|
const { label, value } =
|
||||||
|
data as unknown as SelectorOption<ProviderInfo>;
|
||||||
return (
|
return (
|
||||||
<components.Option data={data} {...other}>
|
<components.Option data={data} {...other}>
|
||||||
{label}
|
{label}
|
||||||
|
|
|
@ -220,7 +220,8 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
|
||||||
key: "tusubtitulo",
|
key: "tusubtitulo",
|
||||||
name: "Tusubtitulo.com",
|
name: "Tusubtitulo.com",
|
||||||
description:
|
description:
|
||||||
"LATAM Spanish / Spanish / English Subtitles Provider for TV Shows",
|
"Provider requested to be removed from Bazarr so it will always return no subtitles. Could potentially come back in the future with an upcoming premium account.",
|
||||||
|
// "LATAM Spanish / Spanish / English Subtitles Provider for TV Shows",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "titulky",
|
key: "titulky",
|
||||||
|
@ -229,6 +230,14 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
|
||||||
defaultKey: {
|
defaultKey: {
|
||||||
username: "",
|
username: "",
|
||||||
password: "",
|
password: "",
|
||||||
|
skip_wrong_fps: false,
|
||||||
|
approved_only: false,
|
||||||
|
multithreading: true,
|
||||||
|
},
|
||||||
|
keyNameOverride: {
|
||||||
|
skip_wrong_fps: "Skip mismatching FPS",
|
||||||
|
approved_only: "Skip unapproved subtitles",
|
||||||
|
multithreading: "Enable multithreading",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{ key: "tvsubtitles", name: "TVSubtitles" },
|
{ key: "tvsubtitles", name: "TVSubtitles" },
|
||||||
|
|
|
@ -6,9 +6,11 @@ import {
|
||||||
} from "@fortawesome/free-brands-svg-icons";
|
} from "@fortawesome/free-brands-svg-icons";
|
||||||
import { faPaperPlane } from "@fortawesome/free-solid-svg-icons";
|
import { faPaperPlane } from "@fortawesome/free-solid-svg-icons";
|
||||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||||
import React, { FunctionComponent } from "react";
|
import moment from "moment";
|
||||||
|
import React, { FunctionComponent, useState } from "react";
|
||||||
import { Col, Container, Row } from "react-bootstrap";
|
import { Col, Container, Row } from "react-bootstrap";
|
||||||
import { Helmet } from "react-helmet";
|
import { Helmet } from "react-helmet";
|
||||||
|
import { useIntervalWhen } from "rooks";
|
||||||
import { useSystemHealth, useSystemStatus } from "../../@redux/hooks";
|
import { useSystemHealth, useSystemStatus } from "../../@redux/hooks";
|
||||||
import { AsyncOverlay } from "../../components";
|
import { AsyncOverlay } from "../../components";
|
||||||
import { GithubRepoRoot } from "../../constants";
|
import { GithubRepoRoot } from "../../constants";
|
||||||
|
@ -69,6 +71,28 @@ const SystemStatusView: FunctionComponent<Props> = () => {
|
||||||
const health = useSystemHealth();
|
const health = useSystemHealth();
|
||||||
const status = useSystemStatus();
|
const status = useSystemStatus();
|
||||||
|
|
||||||
|
const [uptime, setState] = useState<string>();
|
||||||
|
const [intervalWhenState] = useState(true);
|
||||||
|
|
||||||
|
useIntervalWhen(
|
||||||
|
() => {
|
||||||
|
if (status) {
|
||||||
|
let duration = moment.duration(
|
||||||
|
moment().utc().unix() - status.start_time,
|
||||||
|
"seconds"
|
||||||
|
),
|
||||||
|
days = duration.days(),
|
||||||
|
hours = duration.hours().toString().padStart(2, "0"),
|
||||||
|
minutes = duration.minutes().toString().padStart(2, "0"),
|
||||||
|
seconds = duration.seconds().toString().padStart(2, "0");
|
||||||
|
setState(days + "d " + hours + ":" + minutes + ":" + seconds);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
1000,
|
||||||
|
intervalWhenState,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Container className="p-5">
|
<Container className="p-5">
|
||||||
<Helmet>
|
<Helmet>
|
||||||
|
@ -106,6 +130,9 @@ const SystemStatusView: FunctionComponent<Props> = () => {
|
||||||
<CRow title="Bazarr Config Directory">
|
<CRow title="Bazarr Config Directory">
|
||||||
<span>{status?.bazarr_config_directory}</span>
|
<span>{status?.bazarr_config_directory}</span>
|
||||||
</CRow>
|
</CRow>
|
||||||
|
<CRow title="Uptime">
|
||||||
|
<span>{uptime}</span>
|
||||||
|
</CRow>
|
||||||
</InfoContainer>
|
</InfoContainer>
|
||||||
</Row>
|
</Row>
|
||||||
<Row>
|
<Row>
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import { isArray } from "lodash";
|
import { isArray } from "lodash";
|
||||||
import React, { useCallback, useMemo } from "react";
|
import React, { useCallback, useMemo } from "react";
|
||||||
import ReactSelect from "react-select";
|
import Select from "react-select";
|
||||||
import { SelectComponents } from "react-select/src/components";
|
import { SelectComponents } from "react-select/dist/declarations/src/components";
|
||||||
import "./selector.scss";
|
import "./selector.scss";
|
||||||
|
|
||||||
export interface SelectorProps<T, M extends boolean> {
|
export interface SelectorProps<T, M extends boolean> {
|
||||||
|
@ -17,7 +17,7 @@ export interface SelectorProps<T, M extends boolean> {
|
||||||
label?: (item: T) => string;
|
label?: (item: T) => string;
|
||||||
defaultValue?: SelectorValueType<T, M>;
|
defaultValue?: SelectorValueType<T, M>;
|
||||||
value?: SelectorValueType<T, M>;
|
value?: SelectorValueType<T, M>;
|
||||||
components?: Partial<SelectComponents<T, M>>;
|
components?: Partial<SelectComponents<T, M, any>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function Selector<T = string, M extends boolean = false>(
|
export function Selector<T = string, M extends boolean = false>(
|
||||||
|
@ -69,15 +69,15 @@ export function Selector<T = string, M extends boolean = false>(
|
||||||
[label, multiple, nameFromItems]
|
[label, multiple, nameFromItems]
|
||||||
);
|
);
|
||||||
|
|
||||||
const defaultWrapper = useMemo(() => wrapper(defaultValue), [
|
const defaultWrapper = useMemo(
|
||||||
defaultValue,
|
() => wrapper(defaultValue),
|
||||||
wrapper,
|
[defaultValue, wrapper]
|
||||||
]);
|
);
|
||||||
|
|
||||||
const valueWrapper = useMemo(() => wrapper(value), [wrapper, value]);
|
const valueWrapper = useMemo(() => wrapper(value), [wrapper, value]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ReactSelect
|
<Select
|
||||||
isLoading={loading}
|
isLoading={loading}
|
||||||
placeholder={placeholder}
|
placeholder={placeholder}
|
||||||
isSearchable={options.length >= 10}
|
isSearchable={options.length >= 10}
|
||||||
|
@ -92,7 +92,7 @@ export function Selector<T = string, M extends boolean = false>(
|
||||||
className={`custom-selector w-100 ${className ?? ""}`}
|
className={`custom-selector w-100 ${className ?? ""}`}
|
||||||
classNamePrefix="selector"
|
classNamePrefix="selector"
|
||||||
onFocus={onFocus}
|
onFocus={onFocus}
|
||||||
onChange={(v) => {
|
onChange={(v: SelectorOption<T>[]) => {
|
||||||
if (onChange) {
|
if (onChange) {
|
||||||
let res: T | T[] | null = null;
|
let res: T | T[] | null = null;
|
||||||
if (isArray(v)) {
|
if (isArray(v)) {
|
||||||
|
@ -106,6 +106,6 @@ export function Selector<T = string, M extends boolean = false>(
|
||||||
onChange(res as any);
|
onChange(res as any);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
></ReactSelect>
|
></Select>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ __all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDUL
|
||||||
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED',
|
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED',
|
||||||
'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED',
|
'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED',
|
||||||
'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES',
|
'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES',
|
||||||
'SchedulerEvent', 'JobEvent', 'JobExecutionEvent')
|
'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent')
|
||||||
|
|
||||||
|
|
||||||
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
|
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
|
||||||
|
|
|
@ -3,12 +3,11 @@ from __future__ import absolute_import
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from apscheduler.executors.base import BaseExecutor, run_job
|
from apscheduler.executors.base import BaseExecutor, run_job
|
||||||
|
from apscheduler.util import iscoroutinefunction_partial
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from asyncio import iscoroutinefunction
|
|
||||||
from apscheduler.executors.base_py3 import run_coroutine_job
|
from apscheduler.executors.base_py3 import run_coroutine_job
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from trollius import iscoroutinefunction
|
|
||||||
run_coroutine_job = None
|
run_coroutine_job = None
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,7 +45,7 @@ class AsyncIOExecutor(BaseExecutor):
|
||||||
else:
|
else:
|
||||||
self._run_job_success(job.id, events)
|
self._run_job_success(job.id, events)
|
||||||
|
|
||||||
if iscoroutinefunction(job.func):
|
if iscoroutinefunction_partial(job.func):
|
||||||
if run_coroutine_job is not None:
|
if run_coroutine_job is not None:
|
||||||
coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
|
coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
|
||||||
f = self._eventloop.create_task(coro)
|
f = self._eventloop.create_task(coro)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
import traceback
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from traceback import format_tb
|
from traceback import format_tb
|
||||||
|
|
||||||
|
@ -33,6 +34,7 @@ async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
|
||||||
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
|
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
|
||||||
exception=exc, traceback=formatted_tb))
|
exception=exc, traceback=formatted_tb))
|
||||||
logger.exception('Job "%s" raised an exception', job)
|
logger.exception('Job "%s" raised an exception', job)
|
||||||
|
traceback.clear_frames(tb)
|
||||||
else:
|
else:
|
||||||
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
|
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
|
||||||
retval=retval))
|
retval=retval))
|
||||||
|
|
|
@ -3,6 +3,11 @@ import concurrent.futures
|
||||||
|
|
||||||
from apscheduler.executors.base import BaseExecutor, run_job
|
from apscheduler.executors.base import BaseExecutor, run_job
|
||||||
|
|
||||||
|
try:
|
||||||
|
from concurrent.futures.process import BrokenProcessPool
|
||||||
|
except ImportError:
|
||||||
|
BrokenProcessPool = None
|
||||||
|
|
||||||
|
|
||||||
class BasePoolExecutor(BaseExecutor):
|
class BasePoolExecutor(BaseExecutor):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -19,7 +24,13 @@ class BasePoolExecutor(BaseExecutor):
|
||||||
else:
|
else:
|
||||||
self._run_job_success(job.id, f.result())
|
self._run_job_success(job.id, f.result())
|
||||||
|
|
||||||
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
|
try:
|
||||||
|
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
|
||||||
|
except BrokenProcessPool:
|
||||||
|
self._logger.warning('Process pool is broken; replacing pool with a fresh instance')
|
||||||
|
self._pool = self._pool.__class__(self._pool._max_workers)
|
||||||
|
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
|
||||||
|
|
||||||
f.add_done_callback(callback)
|
f.add_done_callback(callback)
|
||||||
|
|
||||||
def shutdown(self, wait=True):
|
def shutdown(self, wait=True):
|
||||||
|
@ -33,10 +44,13 @@ class ThreadPoolExecutor(BasePoolExecutor):
|
||||||
Plugin alias: ``threadpool``
|
Plugin alias: ``threadpool``
|
||||||
|
|
||||||
:param max_workers: the maximum number of spawned threads.
|
:param max_workers: the maximum number of spawned threads.
|
||||||
|
:param pool_kwargs: dict of keyword arguments to pass to the underlying
|
||||||
|
ThreadPoolExecutor constructor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, max_workers=10):
|
def __init__(self, max_workers=10, pool_kwargs=None):
|
||||||
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers))
|
pool_kwargs = pool_kwargs or {}
|
||||||
|
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
|
||||||
super(ThreadPoolExecutor, self).__init__(pool)
|
super(ThreadPoolExecutor, self).__init__(pool)
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,8 +61,11 @@ class ProcessPoolExecutor(BasePoolExecutor):
|
||||||
Plugin alias: ``processpool``
|
Plugin alias: ``processpool``
|
||||||
|
|
||||||
:param max_workers: the maximum number of spawned processes.
|
:param max_workers: the maximum number of spawned processes.
|
||||||
|
:param pool_kwargs: dict of keyword arguments to pass to the underlying
|
||||||
|
ProcessPoolExecutor constructor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, max_workers=10):
|
def __init__(self, max_workers=10, pool_kwargs=None):
|
||||||
pool = concurrent.futures.ProcessPoolExecutor(int(max_workers))
|
pool_kwargs = pool_kwargs or {}
|
||||||
|
pool = concurrent.futures.ProcessPoolExecutor(int(max_workers), **pool_kwargs)
|
||||||
super(ProcessPoolExecutor, self).__init__(pool)
|
super(ProcessPoolExecutor, self).__init__(pool)
|
||||||
|
|
|
@ -8,10 +8,10 @@ from tornado.gen import convert_yielded
|
||||||
from apscheduler.executors.base import BaseExecutor, run_job
|
from apscheduler.executors.base import BaseExecutor, run_job
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from inspect import iscoroutinefunction
|
|
||||||
from apscheduler.executors.base_py3 import run_coroutine_job
|
from apscheduler.executors.base_py3 import run_coroutine_job
|
||||||
|
from apscheduler.util import iscoroutinefunction_partial
|
||||||
except ImportError:
|
except ImportError:
|
||||||
def iscoroutinefunction(func):
|
def iscoroutinefunction_partial(func):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ class TornadoExecutor(BaseExecutor):
|
||||||
else:
|
else:
|
||||||
self._run_job_success(job.id, events)
|
self._run_job_success(job.id, events)
|
||||||
|
|
||||||
if iscoroutinefunction(job.func):
|
if iscoroutinefunction_partial(job.func):
|
||||||
f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
|
f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
|
||||||
else:
|
else:
|
||||||
f = self.executor.submit(run_job, job, job._jobstore_alias, run_times,
|
f = self.executor.submit(run_job, job, job._jobstore_alias, run_times,
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
from collections import Iterable, Mapping
|
|
||||||
from inspect import ismethod, isclass
|
from inspect import ismethod, isclass
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
|
@ -9,6 +8,11 @@ from apscheduler.util import (
|
||||||
ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args,
|
ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args,
|
||||||
convert_to_datetime)
|
convert_to_datetime)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections.abc import Iterable, Mapping
|
||||||
|
except ImportError:
|
||||||
|
from collections import Iterable, Mapping
|
||||||
|
|
||||||
|
|
||||||
class Job(object):
|
class Job(object):
|
||||||
"""
|
"""
|
||||||
|
@ -24,7 +28,7 @@ class Job(object):
|
||||||
:var trigger: the trigger object that controls the schedule of this job
|
:var trigger: the trigger object that controls the schedule of this job
|
||||||
:var str executor: the name of the executor that will run this job
|
:var str executor: the name of the executor that will run this job
|
||||||
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
|
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
|
||||||
be late
|
be late (``None`` means "allow the job to run no matter how late it is")
|
||||||
:var int max_instances: the maximum number of concurrently executing instances allowed for this
|
:var int max_instances: the maximum number of concurrently executing instances allowed for this
|
||||||
job
|
job
|
||||||
:var datetime.datetime next_run_time: the next scheduled run time of this job
|
:var datetime.datetime next_run_time: the next scheduled run time of this job
|
||||||
|
@ -36,7 +40,7 @@ class Job(object):
|
||||||
|
|
||||||
__slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref',
|
__slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref',
|
||||||
'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances',
|
'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances',
|
||||||
'next_run_time')
|
'next_run_time', '__weakref__')
|
||||||
|
|
||||||
def __init__(self, scheduler, id=None, **kwargs):
|
def __init__(self, scheduler, id=None, **kwargs):
|
||||||
super(Job, self).__init__()
|
super(Job, self).__init__()
|
||||||
|
@ -238,8 +242,9 @@ class Job(object):
|
||||||
|
|
||||||
# Instance methods cannot survive serialization as-is, so store the "self" argument
|
# Instance methods cannot survive serialization as-is, so store the "self" argument
|
||||||
# explicitly
|
# explicitly
|
||||||
if ismethod(self.func) and not isclass(self.func.__self__):
|
func = self.func
|
||||||
args = (self.func.__self__,) + tuple(self.args)
|
if ismethod(func) and not isclass(func.__self__) and obj_to_ref(func) == self.func_ref:
|
||||||
|
args = (func.__self__,) + tuple(self.args)
|
||||||
else:
|
else:
|
||||||
args = self.args
|
args = self.args
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ class MongoDBJobStore(BaseJobStore):
|
||||||
|
|
||||||
def start(self, scheduler, alias):
|
def start(self, scheduler, alias):
|
||||||
super(MongoDBJobStore, self).start(scheduler, alias)
|
super(MongoDBJobStore, self).start(scheduler, alias)
|
||||||
self.collection.ensure_index('next_run_time', sparse=True)
|
self.collection.create_index('next_run_time', sparse=True)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def connection(self):
|
def connection(self):
|
||||||
|
@ -83,7 +83,7 @@ class MongoDBJobStore(BaseJobStore):
|
||||||
|
|
||||||
def add_job(self, job):
|
def add_job(self, job):
|
||||||
try:
|
try:
|
||||||
self.collection.insert({
|
self.collection.insert_one({
|
||||||
'_id': job.id,
|
'_id': job.id,
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||||
|
@ -96,13 +96,13 @@ class MongoDBJobStore(BaseJobStore):
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||||
}
|
}
|
||||||
result = self.collection.update({'_id': job.id}, {'$set': changes})
|
result = self.collection.update_one({'_id': job.id}, {'$set': changes})
|
||||||
if result and result['n'] == 0:
|
if result and result.matched_count == 0:
|
||||||
raise JobLookupError(job.id)
|
raise JobLookupError(job.id)
|
||||||
|
|
||||||
def remove_job(self, job_id):
|
def remove_job(self, job_id):
|
||||||
result = self.collection.remove(job_id)
|
result = self.collection.delete_one({'_id': job_id})
|
||||||
if result and result['n'] == 0:
|
if result and result.deleted_count == 0:
|
||||||
raise JobLookupError(job_id)
|
raise JobLookupError(job_id)
|
||||||
|
|
||||||
def remove_all_jobs(self):
|
def remove_all_jobs(self):
|
||||||
|
|
|
@ -14,7 +14,7 @@ except ImportError: # pragma: nocover
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from redis import StrictRedis
|
from redis import Redis
|
||||||
except ImportError: # pragma: nocover
|
except ImportError: # pragma: nocover
|
||||||
raise ImportError('RedisJobStore requires redis installed')
|
raise ImportError('RedisJobStore requires redis installed')
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ class RedisJobStore(BaseJobStore):
|
||||||
self.pickle_protocol = pickle_protocol
|
self.pickle_protocol = pickle_protocol
|
||||||
self.jobs_key = jobs_key
|
self.jobs_key = jobs_key
|
||||||
self.run_times_key = run_times_key
|
self.run_times_key = run_times_key
|
||||||
self.redis = StrictRedis(db=int(db), **connect_args)
|
self.redis = Redis(db=int(db), **connect_args)
|
||||||
|
|
||||||
def lookup_job(self, job_id):
|
def lookup_job(self, job_id):
|
||||||
job_state = self.redis.hget(self.jobs_key, job_id)
|
job_state = self.redis.hget(self.jobs_key, job_id)
|
||||||
|
@ -81,7 +81,9 @@ class RedisJobStore(BaseJobStore):
|
||||||
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
||||||
self.pickle_protocol))
|
self.pickle_protocol))
|
||||||
if job.next_run_time:
|
if job.next_run_time:
|
||||||
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
pipe.zadd(self.run_times_key,
|
||||||
|
{job.id: datetime_to_utc_timestamp(job.next_run_time)})
|
||||||
|
|
||||||
pipe.execute()
|
pipe.execute()
|
||||||
|
|
||||||
def update_job(self, job):
|
def update_job(self, job):
|
||||||
|
@ -92,9 +94,11 @@ class RedisJobStore(BaseJobStore):
|
||||||
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
||||||
self.pickle_protocol))
|
self.pickle_protocol))
|
||||||
if job.next_run_time:
|
if job.next_run_time:
|
||||||
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
pipe.zadd(self.run_times_key,
|
||||||
|
{job.id: datetime_to_utc_timestamp(job.next_run_time)})
|
||||||
else:
|
else:
|
||||||
pipe.zrem(self.run_times_key, job.id)
|
pipe.zrem(self.run_times_key, job.id)
|
||||||
|
|
||||||
pipe.execute()
|
pipe.execute()
|
||||||
|
|
||||||
def remove_job(self, job_id):
|
def remove_job(self, job_id):
|
||||||
|
|
|
@ -10,7 +10,7 @@ except ImportError: # pragma: nocover
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import rethinkdb as r
|
from rethinkdb import RethinkDB
|
||||||
except ImportError: # pragma: nocover
|
except ImportError: # pragma: nocover
|
||||||
raise ImportError('RethinkDBJobStore requires rethinkdb installed')
|
raise ImportError('RethinkDBJobStore requires rethinkdb installed')
|
||||||
|
|
||||||
|
@ -40,10 +40,12 @@ class RethinkDBJobStore(BaseJobStore):
|
||||||
raise ValueError('The "table" parameter must not be empty')
|
raise ValueError('The "table" parameter must not be empty')
|
||||||
|
|
||||||
self.database = database
|
self.database = database
|
||||||
self.table = table
|
self.table_name = table
|
||||||
|
self.table = None
|
||||||
self.client = client
|
self.client = client
|
||||||
self.pickle_protocol = pickle_protocol
|
self.pickle_protocol = pickle_protocol
|
||||||
self.connect_args = connect_args
|
self.connect_args = connect_args
|
||||||
|
self.r = RethinkDB()
|
||||||
self.conn = None
|
self.conn = None
|
||||||
|
|
||||||
def start(self, scheduler, alias):
|
def start(self, scheduler, alias):
|
||||||
|
@ -52,31 +54,31 @@ class RethinkDBJobStore(BaseJobStore):
|
||||||
if self.client:
|
if self.client:
|
||||||
self.conn = maybe_ref(self.client)
|
self.conn = maybe_ref(self.client)
|
||||||
else:
|
else:
|
||||||
self.conn = r.connect(db=self.database, **self.connect_args)
|
self.conn = self.r.connect(db=self.database, **self.connect_args)
|
||||||
|
|
||||||
if self.database not in r.db_list().run(self.conn):
|
if self.database not in self.r.db_list().run(self.conn):
|
||||||
r.db_create(self.database).run(self.conn)
|
self.r.db_create(self.database).run(self.conn)
|
||||||
|
|
||||||
if self.table not in r.table_list().run(self.conn):
|
if self.table_name not in self.r.table_list().run(self.conn):
|
||||||
r.table_create(self.table).run(self.conn)
|
self.r.table_create(self.table_name).run(self.conn)
|
||||||
|
|
||||||
if 'next_run_time' not in r.table(self.table).index_list().run(self.conn):
|
if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn):
|
||||||
r.table(self.table).index_create('next_run_time').run(self.conn)
|
self.r.table(self.table_name).index_create('next_run_time').run(self.conn)
|
||||||
|
|
||||||
self.table = r.db(self.database).table(self.table)
|
self.table = self.r.db(self.database).table(self.table_name)
|
||||||
|
|
||||||
def lookup_job(self, job_id):
|
def lookup_job(self, job_id):
|
||||||
results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn))
|
results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn))
|
||||||
return self._reconstitute_job(results[0]['job_state']) if results else None
|
return self._reconstitute_job(results[0]['job_state']) if results else None
|
||||||
|
|
||||||
def get_due_jobs(self, now):
|
def get_due_jobs(self, now):
|
||||||
return self._get_jobs(r.row['next_run_time'] <= datetime_to_utc_timestamp(now))
|
return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now))
|
||||||
|
|
||||||
def get_next_run_time(self):
|
def get_next_run_time(self):
|
||||||
results = list(
|
results = list(
|
||||||
self.table
|
self.table
|
||||||
.filter(r.row['next_run_time'] != None) # flake8: noqa
|
.filter(self.r.row['next_run_time'] != None) # noqa
|
||||||
.order_by(r.asc('next_run_time'))
|
.order_by(self.r.asc('next_run_time'))
|
||||||
.map(lambda x: x['next_run_time'])
|
.map(lambda x: x['next_run_time'])
|
||||||
.limit(1)
|
.limit(1)
|
||||||
.run(self.conn)
|
.run(self.conn)
|
||||||
|
@ -92,7 +94,7 @@ class RethinkDBJobStore(BaseJobStore):
|
||||||
job_dict = {
|
job_dict = {
|
||||||
'id': job.id,
|
'id': job.id,
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||||
}
|
}
|
||||||
results = self.table.insert(job_dict).run(self.conn)
|
results = self.table.insert(job_dict).run(self.conn)
|
||||||
if results['errors'] > 0:
|
if results['errors'] > 0:
|
||||||
|
@ -101,7 +103,7 @@ class RethinkDBJobStore(BaseJobStore):
|
||||||
def update_job(self, job):
|
def update_job(self, job):
|
||||||
changes = {
|
changes = {
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||||
}
|
}
|
||||||
results = self.table.get_all(job.id).update(changes).run(self.conn)
|
results = self.table.get_all(job.id).update(changes).run(self.conn)
|
||||||
skipped = False in map(lambda x: results[x] == 0, results.keys())
|
skipped = False in map(lambda x: results[x] == 0, results.keys())
|
||||||
|
@ -130,20 +132,20 @@ class RethinkDBJobStore(BaseJobStore):
|
||||||
def _get_jobs(self, predicate=None):
|
def _get_jobs(self, predicate=None):
|
||||||
jobs = []
|
jobs = []
|
||||||
failed_job_ids = []
|
failed_job_ids = []
|
||||||
query = (self.table.filter(r.row['next_run_time'] != None).filter(predicate) if
|
query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa
|
||||||
predicate else self.table)
|
if predicate else self.table)
|
||||||
query = query.order_by('next_run_time', 'id').pluck('id', 'job_state')
|
query = query.order_by('next_run_time', 'id').pluck('id', 'job_state')
|
||||||
|
|
||||||
for document in query.run(self.conn):
|
for document in query.run(self.conn):
|
||||||
try:
|
try:
|
||||||
jobs.append(self._reconstitute_job(document['job_state']))
|
jobs.append(self._reconstitute_job(document['job_state']))
|
||||||
except:
|
except Exception:
|
||||||
self._logger.exception('Unable to restore job "%s" -- removing it', document['id'])
|
self._logger.exception('Unable to restore job "%s" -- removing it', document['id'])
|
||||||
failed_job_ids.append(document['id'])
|
failed_job_ids.append(document['id'])
|
||||||
|
|
||||||
# Remove all the jobs we failed to restore
|
# Remove all the jobs we failed to restore
|
||||||
if failed_job_ids:
|
if failed_job_ids:
|
||||||
r.expr(failed_job_ids).for_each(
|
self.r.expr(failed_job_ids).for_each(
|
||||||
lambda job_id: self.table.get_all(job_id).delete()).run(self.conn)
|
lambda job_id: self.table.get_all(job_id).delete()).run(self.conn)
|
||||||
|
|
||||||
return jobs
|
return jobs
|
||||||
|
|
|
@ -11,7 +11,7 @@ except ImportError: # pragma: nocover
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from sqlalchemy import (
|
from sqlalchemy import (
|
||||||
create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select)
|
create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select, and_)
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
from sqlalchemy.sql.expression import null
|
from sqlalchemy.sql.expression import null
|
||||||
except ImportError: # pragma: nocover
|
except ImportError: # pragma: nocover
|
||||||
|
@ -106,7 +106,7 @@ class SQLAlchemyJobStore(BaseJobStore):
|
||||||
}).where(self.jobs_t.c.id == job.id)
|
}).where(self.jobs_t.c.id == job.id)
|
||||||
result = self.engine.execute(update)
|
result = self.engine.execute(update)
|
||||||
if result.rowcount == 0:
|
if result.rowcount == 0:
|
||||||
raise JobLookupError(id)
|
raise JobLookupError(job.id)
|
||||||
|
|
||||||
def remove_job(self, job_id):
|
def remove_job(self, job_id):
|
||||||
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
|
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
|
||||||
|
@ -134,7 +134,7 @@ class SQLAlchemyJobStore(BaseJobStore):
|
||||||
jobs = []
|
jobs = []
|
||||||
selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\
|
selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\
|
||||||
order_by(self.jobs_t.c.next_run_time)
|
order_by(self.jobs_t.c.next_run_time)
|
||||||
selectable = selectable.where(*conditions) if conditions else selectable
|
selectable = selectable.where(and_(*conditions)) if conditions else selectable
|
||||||
failed_job_ids = set()
|
failed_job_ids = set()
|
||||||
for row in self.engine.execute(selectable):
|
for row in self.engine.execute(selectable):
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import os
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from pytz import utc
|
from pytz import utc
|
||||||
|
@ -65,7 +64,7 @@ class ZooKeeperJobStore(BaseJobStore):
|
||||||
|
|
||||||
def lookup_job(self, job_id):
|
def lookup_job(self, job_id):
|
||||||
self._ensure_paths()
|
self._ensure_paths()
|
||||||
node_path = os.path.join(self.path, job_id)
|
node_path = self.path + "/" + str(job_id)
|
||||||
try:
|
try:
|
||||||
content, _ = self.client.get(node_path)
|
content, _ = self.client.get(node_path)
|
||||||
doc = pickle.loads(content)
|
doc = pickle.loads(content)
|
||||||
|
@ -92,7 +91,7 @@ class ZooKeeperJobStore(BaseJobStore):
|
||||||
|
|
||||||
def add_job(self, job):
|
def add_job(self, job):
|
||||||
self._ensure_paths()
|
self._ensure_paths()
|
||||||
node_path = os.path.join(self.path, str(job.id))
|
node_path = self.path + "/" + str(job.id)
|
||||||
value = {
|
value = {
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': job.__getstate__()
|
'job_state': job.__getstate__()
|
||||||
|
@ -105,7 +104,7 @@ class ZooKeeperJobStore(BaseJobStore):
|
||||||
|
|
||||||
def update_job(self, job):
|
def update_job(self, job):
|
||||||
self._ensure_paths()
|
self._ensure_paths()
|
||||||
node_path = os.path.join(self.path, str(job.id))
|
node_path = self.path + "/" + str(job.id)
|
||||||
changes = {
|
changes = {
|
||||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||||
'job_state': job.__getstate__()
|
'job_state': job.__getstate__()
|
||||||
|
@ -118,7 +117,7 @@ class ZooKeeperJobStore(BaseJobStore):
|
||||||
|
|
||||||
def remove_job(self, job_id):
|
def remove_job(self, job_id):
|
||||||
self._ensure_paths()
|
self._ensure_paths()
|
||||||
node_path = os.path.join(self.path, str(job_id))
|
node_path = self.path + "/" + str(job_id)
|
||||||
try:
|
try:
|
||||||
self.client.delete(node_path)
|
self.client.delete(node_path)
|
||||||
except NoNodeError:
|
except NoNodeError:
|
||||||
|
@ -151,7 +150,7 @@ class ZooKeeperJobStore(BaseJobStore):
|
||||||
all_ids = self.client.get_children(self.path)
|
all_ids = self.client.get_children(self.path)
|
||||||
for node_name in all_ids:
|
for node_name in all_ids:
|
||||||
try:
|
try:
|
||||||
node_path = os.path.join(self.path, node_name)
|
node_path = self.path + "/" + node_name
|
||||||
content, _ = self.client.get(node_path)
|
content, _ = self.client.get(node_path)
|
||||||
doc = pickle.loads(content)
|
doc = pickle.loads(content)
|
||||||
job_def = {
|
job_def = {
|
||||||
|
|
|
@ -38,13 +38,19 @@ class AsyncIOScheduler(BaseScheduler):
|
||||||
_eventloop = None
|
_eventloop = None
|
||||||
_timeout = None
|
_timeout = None
|
||||||
|
|
||||||
|
def start(self, paused=False):
|
||||||
|
if not self._eventloop:
|
||||||
|
self._eventloop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
super(AsyncIOScheduler, self).start(paused)
|
||||||
|
|
||||||
@run_in_event_loop
|
@run_in_event_loop
|
||||||
def shutdown(self, wait=True):
|
def shutdown(self, wait=True):
|
||||||
super(AsyncIOScheduler, self).shutdown(wait)
|
super(AsyncIOScheduler, self).shutdown(wait)
|
||||||
self._stop_timer()
|
self._stop_timer()
|
||||||
|
|
||||||
def _configure(self, config):
|
def _configure(self, config):
|
||||||
self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
|
self._eventloop = maybe_ref(config.pop('event_loop', None))
|
||||||
super(AsyncIOScheduler, self)._configure(config)
|
super(AsyncIOScheduler, self)._configure(config)
|
||||||
|
|
||||||
def _start_timer(self, wait_seconds):
|
def _start_timer(self, wait_seconds):
|
||||||
|
|
|
@ -29,7 +29,9 @@ class BackgroundScheduler(BlockingScheduler):
|
||||||
super(BackgroundScheduler, self)._configure(config)
|
super(BackgroundScheduler, self)._configure(config)
|
||||||
|
|
||||||
def start(self, *args, **kwargs):
|
def start(self, *args, **kwargs):
|
||||||
self._event = Event()
|
if self._event is None or self._event.is_set():
|
||||||
|
self._event = Event()
|
||||||
|
|
||||||
BaseScheduler.start(self, *args, **kwargs)
|
BaseScheduler.start(self, *args, **kwargs)
|
||||||
self._thread = Thread(target=self._main_loop, name='APScheduler')
|
self._thread = Thread(target=self._main_loop, name='APScheduler')
|
||||||
self._thread.daemon = self._daemon
|
self._thread.daemon = self._daemon
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
from abc import ABCMeta, abstractmethod
|
from abc import ABCMeta, abstractmethod
|
||||||
from collections import MutableMapping
|
|
||||||
from threading import RLock
|
from threading import RLock
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from logging import getLogger
|
from logging import getLogger
|
||||||
|
@ -27,6 +26,11 @@ from apscheduler.events import (
|
||||||
EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED,
|
EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED,
|
||||||
EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED)
|
EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections.abc import MutableMapping
|
||||||
|
except ImportError:
|
||||||
|
from collections import MutableMapping
|
||||||
|
|
||||||
#: constant indicating a scheduler's stopped state
|
#: constant indicating a scheduler's stopped state
|
||||||
STATE_STOPPED = 0
|
STATE_STOPPED = 0
|
||||||
#: constant indicating a scheduler's running state (started and processing jobs)
|
#: constant indicating a scheduler's running state (started and processing jobs)
|
||||||
|
@ -82,6 +86,11 @@ class BaseScheduler(six.with_metaclass(ABCMeta)):
|
||||||
self.state = STATE_STOPPED
|
self.state = STATE_STOPPED
|
||||||
self.configure(gconfig, **options)
|
self.configure(gconfig, **options)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
raise TypeError("Schedulers cannot be serialized. Ensure that you are not passing a "
|
||||||
|
"scheduler instance as an argument to a job, or scheduling an instance "
|
||||||
|
"method where the instance contains a scheduler as an attribute.")
|
||||||
|
|
||||||
def configure(self, gconfig={}, prefix='apscheduler.', **options):
|
def configure(self, gconfig={}, prefix='apscheduler.', **options):
|
||||||
"""
|
"""
|
||||||
Reconfigures the scheduler with the given options.
|
Reconfigures the scheduler with the given options.
|
||||||
|
@ -398,7 +407,7 @@ class BaseScheduler(six.with_metaclass(ABCMeta)):
|
||||||
:param str|unicode id: explicit identifier for the job (for modifying it later)
|
:param str|unicode id: explicit identifier for the job (for modifying it later)
|
||||||
:param str|unicode name: textual description of the job
|
:param str|unicode name: textual description of the job
|
||||||
:param int misfire_grace_time: seconds after the designated runtime that the job is still
|
:param int misfire_grace_time: seconds after the designated runtime that the job is still
|
||||||
allowed to be run
|
allowed to be run (or ``None`` to allow the job to run no matter how late it is)
|
||||||
:param bool coalesce: run once instead of many times if the scheduler determines that the
|
:param bool coalesce: run once instead of many times if the scheduler determines that the
|
||||||
job should be run more than once in succession
|
job should be run more than once in succession
|
||||||
:param int max_instances: maximum number of concurrently running instances allowed for this
|
:param int max_instances: maximum number of concurrently running instances allowed for this
|
||||||
|
@ -594,14 +603,13 @@ class BaseScheduler(six.with_metaclass(ABCMeta)):
|
||||||
"""
|
"""
|
||||||
jobstore_alias = None
|
jobstore_alias = None
|
||||||
with self._jobstores_lock:
|
with self._jobstores_lock:
|
||||||
|
# Check if the job is among the pending jobs
|
||||||
if self.state == STATE_STOPPED:
|
if self.state == STATE_STOPPED:
|
||||||
# Check if the job is among the pending jobs
|
for i, (job, alias, replace_existing) in enumerate(self._pending_jobs):
|
||||||
if self.state == STATE_STOPPED:
|
if job.id == job_id and jobstore in (None, alias):
|
||||||
for i, (job, alias, replace_existing) in enumerate(self._pending_jobs):
|
del self._pending_jobs[i]
|
||||||
if job.id == job_id and jobstore in (None, alias):
|
jobstore_alias = alias
|
||||||
del self._pending_jobs[i]
|
break
|
||||||
jobstore_alias = alias
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
# Otherwise, try to remove it from each store until it succeeds or we run out of
|
# Otherwise, try to remove it from each store until it succeeds or we run out of
|
||||||
# stores to check
|
# stores to check
|
||||||
|
|
|
@ -14,7 +14,9 @@ class BlockingScheduler(BaseScheduler):
|
||||||
_event = None
|
_event = None
|
||||||
|
|
||||||
def start(self, *args, **kwargs):
|
def start(self, *args, **kwargs):
|
||||||
self._event = Event()
|
if self._event is None or self._event.is_set():
|
||||||
|
self._event = Event()
|
||||||
|
|
||||||
super(BlockingScheduler, self).start(*args, **kwargs)
|
super(BlockingScheduler, self).start(*args, **kwargs)
|
||||||
self._main_loop()
|
self._main_loop()
|
||||||
|
|
||||||
|
|
|
@ -9,9 +9,13 @@ except (ImportError, RuntimeError): # pragma: nocover
|
||||||
from PyQt4.QtCore import QObject, QTimer
|
from PyQt4.QtCore import QObject, QTimer
|
||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
from PySide.QtCore import QObject, QTimer # flake8: noqa
|
from PySide2.QtCore import QObject, QTimer # noqa
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed')
|
try:
|
||||||
|
from PySide.QtCore import QObject, QTimer # noqa
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('QtScheduler requires either PyQt5, PyQt4, PySide2 '
|
||||||
|
'or PySide installed')
|
||||||
|
|
||||||
|
|
||||||
class QtScheduler(BaseScheduler):
|
class QtScheduler(BaseScheduler):
|
||||||
|
@ -26,7 +30,8 @@ class QtScheduler(BaseScheduler):
|
||||||
def _start_timer(self, wait_seconds):
|
def _start_timer(self, wait_seconds):
|
||||||
self._stop_timer()
|
self._stop_timer()
|
||||||
if wait_seconds is not None:
|
if wait_seconds is not None:
|
||||||
self._timer = QTimer.singleShot(wait_seconds * 1000, self._process_jobs)
|
wait_time = min(wait_seconds * 1000, 2147483647)
|
||||||
|
self._timer = QTimer.singleShot(wait_time, self._process_jobs)
|
||||||
|
|
||||||
def _stop_timer(self):
|
def _stop_timer(self):
|
||||||
if self._timer:
|
if self._timer:
|
||||||
|
|
|
@ -22,27 +22,16 @@ class BaseTrigger(six.with_metaclass(ABCMeta)):
|
||||||
|
|
||||||
def _apply_jitter(self, next_fire_time, jitter, now):
|
def _apply_jitter(self, next_fire_time, jitter, now):
|
||||||
"""
|
"""
|
||||||
Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the
|
Randomize ``next_fire_time`` by adding a random value (the jitter).
|
||||||
resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter.
|
|
||||||
|
|
||||||
``next_fire_time - jitter <= result <= next_fire_time + jitter``
|
|
||||||
|
|
||||||
:param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
|
:param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
|
||||||
``None``, returns ``None``.
|
``None``, returns ``None``.
|
||||||
:param int|None jitter: maximum number of seconds to add or subtract to
|
:param int|None jitter: maximum number of seconds to add to ``next_fire_time``
|
||||||
``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time``
|
(if ``None`` or ``0``, returns ``next_fire_time``)
|
||||||
:param datetime.datetime now: current datetime
|
:param datetime.datetime now: current datetime
|
||||||
:return datetime.datetime|None: next fire time with a jitter.
|
:return datetime.datetime|None: next fire time with a jitter.
|
||||||
"""
|
"""
|
||||||
if next_fire_time is None or not jitter:
|
if next_fire_time is None or not jitter:
|
||||||
return next_fire_time
|
return next_fire_time
|
||||||
|
|
||||||
next_fire_time_with_jitter = next_fire_time + timedelta(
|
return next_fire_time + timedelta(seconds=random.uniform(0, jitter))
|
||||||
seconds=random.uniform(-jitter, jitter))
|
|
||||||
|
|
||||||
if next_fire_time_with_jitter < now:
|
|
||||||
# Next fire time with jitter is in the past.
|
|
||||||
# Ignore jitter to avoid false misfire.
|
|
||||||
return next_fire_time
|
|
||||||
|
|
||||||
return next_fire_time_with_jitter
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ class AndTrigger(BaseCombiningTrigger):
|
||||||
Trigger alias: ``and``
|
Trigger alias: ``and``
|
||||||
|
|
||||||
:param list triggers: triggers to combine
|
:param list triggers: triggers to combine
|
||||||
:param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
|
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
@ -73,7 +73,7 @@ class OrTrigger(BaseCombiningTrigger):
|
||||||
Trigger alias: ``or``
|
Trigger alias: ``or``
|
||||||
|
|
||||||
:param list triggers: triggers to combine
|
:param list triggers: triggers to combine
|
||||||
:param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
|
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||||
|
|
||||||
.. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
|
.. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
|
||||||
seem to behave strangely since they are always passed the previous fire time produced by
|
seem to behave strangely since they are always passed the previous fire time produced by
|
||||||
|
|
|
@ -16,7 +16,7 @@ class CronTrigger(BaseTrigger):
|
||||||
|
|
||||||
:param int|str year: 4-digit year
|
:param int|str year: 4-digit year
|
||||||
:param int|str month: month (1-12)
|
:param int|str month: month (1-12)
|
||||||
:param int|str day: day of the (1-31)
|
:param int|str day: day of month (1-31)
|
||||||
:param int|str week: ISO week (1-53)
|
:param int|str week: ISO week (1-53)
|
||||||
:param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
|
:param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
|
||||||
:param int|str hour: hour (0-23)
|
:param int|str hour: hour (0-23)
|
||||||
|
@ -26,7 +26,7 @@ class CronTrigger(BaseTrigger):
|
||||||
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
|
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
|
||||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
|
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
|
||||||
to scheduler timezone)
|
to scheduler timezone)
|
||||||
:param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
|
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||||
|
|
||||||
.. note:: The first weekday is always **monday**.
|
.. note:: The first weekday is always **monday**.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -20,7 +20,7 @@ class IntervalTrigger(BaseTrigger):
|
||||||
:param datetime|str start_date: starting point for the interval calculation
|
:param datetime|str start_date: starting point for the interval calculation
|
||||||
:param datetime|str end_date: latest possible date/time to trigger on
|
:param datetime|str end_date: latest possible date/time to trigger on
|
||||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
|
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
|
||||||
:param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
|
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter'
|
__slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter'
|
||||||
|
|
|
@ -5,8 +5,9 @@ from __future__ import division
|
||||||
from datetime import date, datetime, time, timedelta, tzinfo
|
from datetime import date, datetime, time, timedelta, tzinfo
|
||||||
from calendar import timegm
|
from calendar import timegm
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from inspect import isclass
|
from inspect import isclass, ismethod
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
from pytz import timezone, utc, FixedOffset
|
from pytz import timezone, utc, FixedOffset
|
||||||
import six
|
import six
|
||||||
|
@ -21,6 +22,15 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows
|
TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows
|
||||||
|
|
||||||
|
try:
|
||||||
|
from asyncio import iscoroutinefunction
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from trollius import iscoroutinefunction
|
||||||
|
except ImportError:
|
||||||
|
def iscoroutinefunction(func):
|
||||||
|
return False
|
||||||
|
|
||||||
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
|
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
|
||||||
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name',
|
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name',
|
||||||
'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args',
|
'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args',
|
||||||
|
@ -263,7 +273,18 @@ def obj_to_ref(obj):
|
||||||
if '<locals>' in name:
|
if '<locals>' in name:
|
||||||
raise ValueError('Cannot create a reference to a nested function')
|
raise ValueError('Cannot create a reference to a nested function')
|
||||||
|
|
||||||
return '%s:%s' % (obj.__module__, name)
|
if ismethod(obj):
|
||||||
|
if hasattr(obj, 'im_self') and obj.im_self:
|
||||||
|
# bound method
|
||||||
|
module = obj.im_self.__module__
|
||||||
|
elif hasattr(obj, 'im_class') and obj.im_class:
|
||||||
|
# unbound method
|
||||||
|
module = obj.im_class.__module__
|
||||||
|
else:
|
||||||
|
module = obj.__module__
|
||||||
|
else:
|
||||||
|
module = obj.__module__
|
||||||
|
return '%s:%s' % (module, name)
|
||||||
|
|
||||||
|
|
||||||
def ref_to_obj(ref):
|
def ref_to_obj(ref):
|
||||||
|
@ -332,7 +353,10 @@ def check_callable_args(func, args, kwargs):
|
||||||
has_varargs = has_var_kwargs = False
|
has_varargs = has_var_kwargs = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sig = signature(func)
|
if sys.version_info >= (3, 5):
|
||||||
|
sig = signature(func, follow_wrapped=False)
|
||||||
|
else:
|
||||||
|
sig = signature(func)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# signature() doesn't work against every kind of callable
|
# signature() doesn't work against every kind of callable
|
||||||
return
|
return
|
||||||
|
@ -398,3 +422,12 @@ def check_callable_args(func, args, kwargs):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'The target callable does not accept the following keyword arguments: %s' %
|
'The target callable does not accept the following keyword arguments: %s' %
|
||||||
', '.join(unmatched_kwargs))
|
', '.join(unmatched_kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def iscoroutinefunction_partial(f):
|
||||||
|
while isinstance(f, partial):
|
||||||
|
f = f.func
|
||||||
|
|
||||||
|
# The asyncio version of iscoroutinefunction includes testing for @coroutine
|
||||||
|
# decorations vs. the inspect version which does not.
|
||||||
|
return iscoroutinefunction(f)
|
||||||
|
|
|
@ -1,8 +1,3 @@
|
||||||
from gevent import monkey
|
|
||||||
|
|
||||||
monkey.patch_socket()
|
|
||||||
monkey.patch_ssl()
|
|
||||||
|
|
||||||
from ._connection import Connection
|
from ._connection import Connection
|
||||||
|
|
||||||
__version__ = '0.0.7'
|
__version__ = '0.0.12'
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import json
|
import json
|
||||||
import gevent
|
|
||||||
import sys
|
import sys
|
||||||
|
from threading import Thread
|
||||||
from signalr.events import EventHook
|
from signalr.events import EventHook
|
||||||
from signalr.hubs import Hub
|
from signalr.hubs import Hub
|
||||||
from signalr.transports import AutoTransport
|
from signalr.transports import AutoTransport
|
||||||
|
@ -15,14 +15,16 @@ class Connection:
|
||||||
self.qs = {}
|
self.qs = {}
|
||||||
self.__send_counter = -1
|
self.__send_counter = -1
|
||||||
self.token = None
|
self.token = None
|
||||||
|
self.id = None
|
||||||
self.data = None
|
self.data = None
|
||||||
self.received = EventHook()
|
self.received = EventHook()
|
||||||
self.error = EventHook()
|
self.error = EventHook()
|
||||||
self.starting = EventHook()
|
self.starting = EventHook()
|
||||||
self.stopping = EventHook()
|
self.stopping = EventHook()
|
||||||
self.exception = EventHook()
|
self.exception = EventHook()
|
||||||
|
self.is_open = False
|
||||||
self.__transport = AutoTransport(session, self)
|
self.__transport = AutoTransport(session, self)
|
||||||
self.__greenlet = None
|
self.__listener_thread = None
|
||||||
self.started = False
|
self.started = False
|
||||||
|
|
||||||
def handle_error(**kwargs):
|
def handle_error(**kwargs):
|
||||||
|
@ -48,27 +50,32 @@ class Connection:
|
||||||
|
|
||||||
negotiate_data = self.__transport.negotiate()
|
negotiate_data = self.__transport.negotiate()
|
||||||
self.token = negotiate_data['ConnectionToken']
|
self.token = negotiate_data['ConnectionToken']
|
||||||
|
self.id = negotiate_data['ConnectionId']
|
||||||
|
|
||||||
listener = self.__transport.start()
|
listener = self.__transport.start()
|
||||||
|
|
||||||
def wrapped_listener():
|
def wrapped_listener():
|
||||||
try:
|
while self.is_open:
|
||||||
listener()
|
try:
|
||||||
gevent.sleep()
|
listener()
|
||||||
except:
|
except:
|
||||||
self.exception.fire(*sys.exc_info())
|
self.exception.fire(*sys.exc_info())
|
||||||
|
self.is_open = False
|
||||||
|
|
||||||
self.__greenlet = gevent.spawn(wrapped_listener)
|
self.is_open = True
|
||||||
|
self.__listener_thread = Thread(target=wrapped_listener)
|
||||||
|
self.__listener_thread.start()
|
||||||
self.started = True
|
self.started = True
|
||||||
|
|
||||||
def wait(self, timeout=30):
|
def wait(self, timeout=30):
|
||||||
gevent.joinall([self.__greenlet], timeout)
|
Thread.join(self.__listener_thread, timeout)
|
||||||
|
|
||||||
def send(self, data):
|
def send(self, data):
|
||||||
self.__transport.send(data)
|
self.__transport.send(data)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
gevent.kill(self.__greenlet)
|
self.is_open = False
|
||||||
|
self.__listener_thread.join()
|
||||||
self.__transport.close()
|
self.__transport.close()
|
||||||
|
|
||||||
def register_hub(self, name):
|
def register_hub(self, name):
|
||||||
|
|
|
@ -12,11 +12,16 @@ class ServerSentEventsTransport(Transport):
|
||||||
return 'serverSentEvents'
|
return 'serverSentEvents'
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.__response = sseclient.SSEClient(self._get_url('connect'), session=self._session)
|
connect_url = self._get_url('connect')
|
||||||
|
self.__response = iter(sseclient.SSEClient(connect_url, session=self._session))
|
||||||
self._session.get(self._get_url('start'))
|
self._session.get(self._get_url('start'))
|
||||||
|
|
||||||
def _receive():
|
def _receive():
|
||||||
for notification in self.__response:
|
try:
|
||||||
|
notification = next(self.__response)
|
||||||
|
except StopIteration:
|
||||||
|
return
|
||||||
|
else:
|
||||||
if notification.data != 'initialized':
|
if notification.data != 'initialized':
|
||||||
self._handle_notification(notification.data)
|
self._handle_notification(notification.data)
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
import threading
|
||||||
if sys.version_info[0] < 3:
|
if sys.version_info[0] < 3:
|
||||||
from urllib import quote_plus
|
from urllib import quote_plus
|
||||||
else:
|
else:
|
||||||
from urllib.parse import quote_plus
|
from urllib.parse import quote_plus
|
||||||
|
|
||||||
import gevent
|
|
||||||
|
|
||||||
|
|
||||||
class Transport:
|
class Transport:
|
||||||
|
@ -48,7 +47,7 @@ class Transport:
|
||||||
if len(message) > 0:
|
if len(message) > 0:
|
||||||
data = json.loads(message)
|
data = json.loads(message)
|
||||||
self._connection.received.fire(**data)
|
self._connection.received.fire(**data)
|
||||||
gevent.sleep()
|
#thread.sleep() #TODO: investigate if we should sleep here
|
||||||
|
|
||||||
def _get_url(self, action, **kwargs):
|
def _get_url(self, action, **kwargs):
|
||||||
args = kwargs.copy()
|
args = kwargs.copy()
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import gevent
|
|
||||||
|
|
||||||
if sys.version_info[0] < 3:
|
if sys.version_info[0] < 3:
|
||||||
from urlparse import urlparse, urlunparse
|
from urlparse import urlparse, urlunparse
|
||||||
|
@ -39,14 +38,14 @@ class WebSocketsTransport(Transport):
|
||||||
self._session.get(self._get_url('start'))
|
self._session.get(self._get_url('start'))
|
||||||
|
|
||||||
def _receive():
|
def _receive():
|
||||||
for notification in self.ws:
|
notification = self.ws.recv()
|
||||||
self._handle_notification(notification)
|
self._handle_notification(notification)
|
||||||
|
|
||||||
return _receive
|
return _receive
|
||||||
|
|
||||||
def send(self, data):
|
def send(self, data):
|
||||||
self.ws.send(json.dumps(data))
|
self.ws.send(json.dumps(data))
|
||||||
gevent.sleep()
|
#thread.sleep() #TODO: inveistage if we should sleep here or not
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.ws.close()
|
self.ws.close()
|
||||||
|
|
|
@ -45,7 +45,7 @@ movie_scores = {'hash': 119, 'title': 60, 'year': 30, 'release_group': 15,
|
||||||
'source': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1}
|
'source': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1}
|
||||||
|
|
||||||
#: Equivalent release groups
|
#: Equivalent release groups
|
||||||
equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'})
|
equivalent_release_groups = ({'FraMeSToR', 'W4NK3R', 'BHDStudio'}, {'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'})
|
||||||
|
|
||||||
|
|
||||||
def get_equivalent_release_groups(release_group):
|
def get_equivalent_release_groups(release_group):
|
||||||
|
|
|
@ -21,6 +21,7 @@ from bs4 import UnicodeDammit
|
||||||
from babelfish import LanguageReverseError
|
from babelfish import LanguageReverseError
|
||||||
from guessit.jsonutils import GuessitEncoder
|
from guessit.jsonutils import GuessitEncoder
|
||||||
from subliminal import ProviderError, refiner_manager
|
from subliminal import ProviderError, refiner_manager
|
||||||
|
from concurrent.futures import as_completed
|
||||||
|
|
||||||
from .extensions import provider_registry
|
from .extensions import provider_registry
|
||||||
from subliminal.exceptions import ServiceUnavailable, DownloadLimitExceeded
|
from subliminal.exceptions import ServiceUnavailable, DownloadLimitExceeded
|
||||||
|
@ -427,6 +428,58 @@ class SZProviderPool(ProviderPool):
|
||||||
|
|
||||||
return downloaded_subtitles
|
return downloaded_subtitles
|
||||||
|
|
||||||
|
def list_supported_languages(self):
|
||||||
|
"""List supported languages.
|
||||||
|
|
||||||
|
:return: languages supported by the providers.
|
||||||
|
:rtype: list of dicts
|
||||||
|
|
||||||
|
"""
|
||||||
|
languages = []
|
||||||
|
|
||||||
|
for name in self.providers:
|
||||||
|
# list supported languages for a single provider
|
||||||
|
try:
|
||||||
|
provider_languages = self[name].languages
|
||||||
|
except AttributeError:
|
||||||
|
logger.exception(f"{name} provider doesn't have a languages attribute")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if provider_languages is None:
|
||||||
|
logger.info(f"Skipping provider {name} because it doesn't support any languages.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add the languages for this provider
|
||||||
|
languages.append({'provider': name, 'languages': provider_languages})
|
||||||
|
|
||||||
|
return languages
|
||||||
|
|
||||||
|
def list_supported_video_types(self):
|
||||||
|
"""List supported video types.
|
||||||
|
|
||||||
|
:return: video types supported by the providers.
|
||||||
|
:rtype: tuple of video types
|
||||||
|
|
||||||
|
"""
|
||||||
|
video_types = []
|
||||||
|
|
||||||
|
for name in self.providers:
|
||||||
|
# list supported video types for a single provider
|
||||||
|
try:
|
||||||
|
provider_video_type = self[name].video_types
|
||||||
|
except AttributeError:
|
||||||
|
logger.exception(f"{name} provider doesn't have a video_types method")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if provider_video_type is None:
|
||||||
|
logger.info(f"Skipping provider {name} because it doesn't support any video type.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add the video types for this provider
|
||||||
|
video_types.append({'provider': name, 'video_types': provider_video_type})
|
||||||
|
|
||||||
|
return video_types
|
||||||
|
|
||||||
|
|
||||||
class SZAsyncProviderPool(SZProviderPool):
|
class SZAsyncProviderPool(SZProviderPool):
|
||||||
"""Subclass of :class:`ProviderPool` with asynchronous support for :meth:`~ProviderPool.list_subtitles`.
|
"""Subclass of :class:`ProviderPool` with asynchronous support for :meth:`~ProviderPool.list_subtitles`.
|
||||||
|
@ -474,6 +527,65 @@ class SZAsyncProviderPool(SZProviderPool):
|
||||||
|
|
||||||
return subtitles
|
return subtitles
|
||||||
|
|
||||||
|
def list_supported_languages(self):
|
||||||
|
"""List supported languages asynchronously.
|
||||||
|
|
||||||
|
:return: languages supported by the providers.
|
||||||
|
:rtype: list of dicts
|
||||||
|
|
||||||
|
"""
|
||||||
|
languages = []
|
||||||
|
|
||||||
|
def get_providers_languages(provider_name):
|
||||||
|
provider_languages = None
|
||||||
|
try:
|
||||||
|
provider_languages = {'provider': provider_name, 'languages': self[provider_name].languages}
|
||||||
|
except AttributeError:
|
||||||
|
logger.exception(f"{provider_name} provider doesn't have a languages attribute")
|
||||||
|
|
||||||
|
return provider_languages
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(self.max_workers) as executor:
|
||||||
|
for future in as_completed([executor.submit(get_providers_languages, x) for x in self.providers]):
|
||||||
|
provider_languages = future.result()
|
||||||
|
if provider_languages is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add the languages for this provider
|
||||||
|
languages.append(provider_languages)
|
||||||
|
|
||||||
|
return languages
|
||||||
|
|
||||||
|
def list_supported_video_types(self):
|
||||||
|
"""List supported video types asynchronously.
|
||||||
|
|
||||||
|
:return: video types supported by the providers.
|
||||||
|
:rtype: tuple of video types
|
||||||
|
|
||||||
|
"""
|
||||||
|
video_types = []
|
||||||
|
|
||||||
|
def get_providers_video_types(provider_name):
|
||||||
|
provider_video_types = None
|
||||||
|
try:
|
||||||
|
provider_video_types = {'provider': provider_name,
|
||||||
|
'video_types': self[provider_name].video_types}
|
||||||
|
except AttributeError:
|
||||||
|
logger.exception(f"{provider_name} provider doesn't have a video_types attribute")
|
||||||
|
|
||||||
|
return provider_video_types
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(self.max_workers) as executor:
|
||||||
|
for future in as_completed([executor.submit(get_providers_video_types, x) for x in self.providers]):
|
||||||
|
provider_video_types = future.result()
|
||||||
|
if provider_video_types is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add the languages for this provider
|
||||||
|
video_types.append(provider_video_types)
|
||||||
|
|
||||||
|
return video_types
|
||||||
|
|
||||||
|
|
||||||
if is_windows_special_path:
|
if is_windows_special_path:
|
||||||
SZAsyncProviderPool = SZProviderPool
|
SZAsyncProviderPool = SZProviderPool
|
||||||
|
@ -758,6 +870,16 @@ def list_all_subtitles(videos, languages, **kwargs):
|
||||||
return listed_subtitles
|
return listed_subtitles
|
||||||
|
|
||||||
|
|
||||||
|
def list_supported_languages(pool_class, **kwargs):
|
||||||
|
with pool_class(**kwargs) as pool:
|
||||||
|
return pool.list_supported_languages()
|
||||||
|
|
||||||
|
|
||||||
|
def list_supported_video_types(pool_class, **kwargs):
|
||||||
|
with pool_class(**kwargs) as pool:
|
||||||
|
return pool.list_supported_video_types()
|
||||||
|
|
||||||
|
|
||||||
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
|
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
|
||||||
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
|
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
|
||||||
|
|
||||||
|
|
|
@ -109,6 +109,7 @@ class AssrtSubtitle(Subtitle):
|
||||||
class AssrtProvider(Provider):
|
class AssrtProvider(Provider):
|
||||||
"""Assrt Provider."""
|
"""Assrt Provider."""
|
||||||
languages = {Language(*l) for l in supported_languages}
|
languages = {Language(*l) for l in supported_languages}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def __init__(self, token=None):
|
def __init__(self, token=None):
|
||||||
if not token:
|
if not token:
|
||||||
|
|
|
@ -72,6 +72,7 @@ class BSPlayerProvider(Provider):
|
||||||
'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por',
|
'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por',
|
||||||
'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho'
|
'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho'
|
||||||
]}
|
]}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
SEARCH_THROTTLE = 8
|
SEARCH_THROTTLE = 8
|
||||||
hash_verifiable = True
|
hash_verifiable = True
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
|
@ -54,6 +54,7 @@ class GreekSubsSubtitle(Subtitle):
|
||||||
class GreekSubsProvider(Provider):
|
class GreekSubsProvider(Provider):
|
||||||
"""GreekSubs Provider."""
|
"""GreekSubs Provider."""
|
||||||
languages = {Language('ell')}
|
languages = {Language('ell')}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = 'https://greeksubs.net/'
|
server_url = 'https://greeksubs.net/'
|
||||||
subtitle_class = GreekSubsSubtitle
|
subtitle_class = GreekSubsSubtitle
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,7 @@ class GreekSubtitlesSubtitle(Subtitle):
|
||||||
class GreekSubtitlesProvider(Provider):
|
class GreekSubtitlesProvider(Provider):
|
||||||
"""GreekSubtitles Provider."""
|
"""GreekSubtitles Provider."""
|
||||||
languages = {Language(l) for l in ['ell', 'eng']}
|
languages = {Language(l) for l in ['ell', 'eng']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = 'http://gr.greek-subtitles.com/'
|
server_url = 'http://gr.greek-subtitles.com/'
|
||||||
search_url = 'search.php?name={}'
|
search_url = 'search.php?name={}'
|
||||||
download_url = 'http://www.greeksubtitles.info/getp.php?id={:d}'
|
download_url = 'http://www.greeksubtitles.info/getp.php?id={:d}'
|
||||||
|
|
|
@ -110,6 +110,7 @@ class KtuvitProvider(Provider):
|
||||||
"""Ktuvit Provider."""
|
"""Ktuvit Provider."""
|
||||||
|
|
||||||
languages = {Language(l) for l in ["heb"]}
|
languages = {Language(l) for l in ["heb"]}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = "https://www.ktuvit.me/"
|
server_url = "https://www.ktuvit.me/"
|
||||||
sign_in_url = "Services/MembershipService.svc/Login"
|
sign_in_url = "Services/MembershipService.svc/Login"
|
||||||
search_url = "Services/ContentProvider.svc/SearchPage_search"
|
search_url = "Services/ContentProvider.svc/SearchPage_search"
|
||||||
|
|
|
@ -121,6 +121,7 @@ class LegendasdivxSubtitle(Subtitle):
|
||||||
class LegendasdivxProvider(Provider):
|
class LegendasdivxProvider(Provider):
|
||||||
"""Legendasdivx Provider."""
|
"""Legendasdivx Provider."""
|
||||||
languages = {Language('por', 'BR')} | {Language('por')}
|
languages = {Language('por', 'BR')} | {Language('por')}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
SEARCH_THROTTLE = 8
|
SEARCH_THROTTLE = 8
|
||||||
site = 'https://www.legendasdivx.pt'
|
site = 'https://www.legendasdivx.pt'
|
||||||
headers = {
|
headers = {
|
||||||
|
@ -272,7 +273,7 @@ class LegendasdivxProvider(Provider):
|
||||||
querytext = video.imdb_id if video.imdb_id else video.title
|
querytext = video.imdb_id if video.imdb_id else video.title
|
||||||
|
|
||||||
if isinstance(video, Episode):
|
if isinstance(video, Episode):
|
||||||
querytext = '{} S{:02d}E{:02d}'.format(video.series, video.season, video.episode)
|
querytext = '%22{}%20S{:02d}E{:02d}%22'.format(video.series, video.season, video.episode)
|
||||||
querytext = quote(querytext.lower())
|
querytext = quote(querytext.lower())
|
||||||
|
|
||||||
# language query filter
|
# language query filter
|
||||||
|
@ -430,13 +431,16 @@ class LegendasdivxProvider(Provider):
|
||||||
|
|
||||||
_guess = guessit(name)
|
_guess = guessit(name)
|
||||||
if isinstance(subtitle.video, Episode):
|
if isinstance(subtitle.video, Episode):
|
||||||
logger.debug("Legendasdivx.pt :: guessing %s", name)
|
if all(key in _guess for key in ('season', 'episode')):
|
||||||
logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode)
|
logger.debug("Legendasdivx.pt :: guessing %s", name)
|
||||||
|
logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode)
|
||||||
|
|
||||||
if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']:
|
if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']:
|
||||||
logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping')
|
logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping')
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
logger.debug('Legendasdivx.pt :: no "season" and/or "episode" on "_guess" , skipping')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
matches = set()
|
matches = set()
|
||||||
matches |= guess_matches(subtitle.video, _guess)
|
matches |= guess_matches(subtitle.video, _guess)
|
||||||
logger.debug('Legendasdivx.pt :: sub matches: %s', matches)
|
logger.debug('Legendasdivx.pt :: sub matches: %s', matches)
|
||||||
|
|
|
@ -68,6 +68,7 @@ class LegendasTVSubtitle(_LegendasTVSubtitle):
|
||||||
|
|
||||||
class LegendasTVProvider(_LegendasTVProvider):
|
class LegendasTVProvider(_LegendasTVProvider):
|
||||||
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
|
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
subtitle_class = LegendasTVSubtitle
|
subtitle_class = LegendasTVSubtitle
|
||||||
|
|
||||||
def __init__(self, username=None, password=None, featured_only=False):
|
def __init__(self, username=None, password=None, featured_only=False):
|
||||||
|
|
|
@ -5,6 +5,7 @@ import logging
|
||||||
from subliminal.providers.napiprojekt import NapiProjektProvider as _NapiProjektProvider, \
|
from subliminal.providers.napiprojekt import NapiProjektProvider as _NapiProjektProvider, \
|
||||||
NapiProjektSubtitle as _NapiProjektSubtitle, get_subhash
|
NapiProjektSubtitle as _NapiProjektSubtitle, get_subhash
|
||||||
from subzero.language import Language
|
from subzero.language import Language
|
||||||
|
from subliminal.video import Episode, Movie
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -21,6 +22,7 @@ class NapiProjektSubtitle(_NapiProjektSubtitle):
|
||||||
|
|
||||||
class NapiProjektProvider(_NapiProjektProvider):
|
class NapiProjektProvider(_NapiProjektProvider):
|
||||||
languages = {Language.fromalpha2(l) for l in ['pl']}
|
languages = {Language.fromalpha2(l) for l in ['pl']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
subtitle_class = NapiProjektSubtitle
|
subtitle_class = NapiProjektSubtitle
|
||||||
|
|
||||||
def query(self, language, hash):
|
def query(self, language, hash):
|
||||||
|
|
|
@ -12,6 +12,7 @@ from subliminal import __short_version__
|
||||||
from subliminal.exceptions import AuthenticationError, ConfigurationError
|
from subliminal.exceptions import AuthenticationError, ConfigurationError
|
||||||
from subliminal.subtitle import fix_line_ending
|
from subliminal.subtitle import fix_line_ending
|
||||||
from subzero.language import Language
|
from subzero.language import Language
|
||||||
|
from subliminal.video import Episode, Movie
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -47,6 +48,7 @@ class Napisy24Subtitle(Subtitle):
|
||||||
class Napisy24Provider(Provider):
|
class Napisy24Provider(Provider):
|
||||||
'''Napisy24 Provider.'''
|
'''Napisy24 Provider.'''
|
||||||
languages = {Language(l) for l in ['pol']}
|
languages = {Language(l) for l in ['pol']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
required_hash = 'napisy24'
|
required_hash = 'napisy24'
|
||||||
api_url = 'http://napisy24.pl/run/CheckSubAgent.php'
|
api_url = 'http://napisy24.pl/run/CheckSubAgent.php'
|
||||||
|
|
||||||
|
|
|
@ -104,6 +104,7 @@ class NekurProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
"""Nekur Provider."""
|
"""Nekur Provider."""
|
||||||
subtitle_class = NekurSubtitle
|
subtitle_class = NekurSubtitle
|
||||||
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
|
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
|
||||||
|
video_types = (Movie,)
|
||||||
server_url = 'http://subtitri.nekur.net/'
|
server_url = 'http://subtitri.nekur.net/'
|
||||||
search_url = server_url + 'modules/Subtitles.php'
|
search_url = server_url + 'modules/Subtitles.php'
|
||||||
|
|
||||||
|
|
|
@ -140,6 +140,8 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider):
|
||||||
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
||||||
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
||||||
|
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def __init__(self, username=None, password=None, use_tag_search=False, only_foreign=False, also_foreign=False,
|
def __init__(self, username=None, password=None, use_tag_search=False, only_foreign=False, also_foreign=False,
|
||||||
skip_wrong_fps=True, is_vip=False, use_ssl=True, timeout=15):
|
skip_wrong_fps=True, is_vip=False, use_ssl=True, timeout=15):
|
||||||
if any((username, password)) and not all((username, password)):
|
if any((username, password)) and not all((username, password)):
|
||||||
|
|
|
@ -51,7 +51,7 @@ class OpenSubtitlesComSubtitle(Subtitle):
|
||||||
hash_verifiable = False
|
hash_verifiable = False
|
||||||
|
|
||||||
def __init__(self, language, forced, hearing_impaired, page_link, file_id, releases, uploader, title, year,
|
def __init__(self, language, forced, hearing_impaired, page_link, file_id, releases, uploader, title, year,
|
||||||
hash_matched, hash=None, season=None, episode=None):
|
hash_matched, file_hash=None, season=None, episode=None):
|
||||||
language = Language.rebuild(language, hi=hearing_impaired, forced=forced)
|
language = Language.rebuild(language, hi=hearing_impaired, forced=forced)
|
||||||
|
|
||||||
self.title = title
|
self.title = title
|
||||||
|
@ -68,7 +68,7 @@ class OpenSubtitlesComSubtitle(Subtitle):
|
||||||
self.download_link = None
|
self.download_link = None
|
||||||
self.uploader = uploader
|
self.uploader = uploader
|
||||||
self.matches = None
|
self.matches = None
|
||||||
self.hash = hash
|
self.hash = file_hash
|
||||||
self.encoding = 'utf-8'
|
self.encoding = 'utf-8'
|
||||||
self.hash_matched = hash_matched
|
self.hash_matched = hash_matched
|
||||||
|
|
||||||
|
@ -123,8 +123,10 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
|
||||||
"""OpenSubtitlesCom Provider"""
|
"""OpenSubtitlesCom Provider"""
|
||||||
server_url = 'https://api.opensubtitles.com/api/v1/'
|
server_url = 'https://api.opensubtitles.com/api/v1/'
|
||||||
|
|
||||||
languages = {Language.fromopensubtitles(l) for l in language_converters['szopensubtitles'].codes}
|
languages = {Language.fromopensubtitles(lang) for lang in language_converters['szopensubtitles'].codes}
|
||||||
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
|
||||||
|
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def __init__(self, username=None, password=None, use_hash=True, api_key=None):
|
def __init__(self, username=None, password=None, use_hash=True, api_key=None):
|
||||||
if not all((username, password)):
|
if not all((username, password)):
|
||||||
|
@ -183,26 +185,16 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def sanitize_external_ids(external_id):
|
def sanitize_external_ids(external_id):
|
||||||
if isinstance(external_id, str):
|
if isinstance(external_id, str):
|
||||||
external_id = external_id.lower().lstrip('tt')
|
external_id = external_id.lower().lstrip('tt').lstrip('0')
|
||||||
sanitized_id = external_id[:-1].lstrip('0') + external_id[-1]
|
sanitized_id = external_id[:-1].lstrip('0') + external_id[-1]
|
||||||
return int(sanitized_id)
|
return int(sanitized_id)
|
||||||
|
|
||||||
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
|
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
|
||||||
def search_titles(self, title):
|
def search_titles(self, title):
|
||||||
title_id = None
|
title_id = None
|
||||||
imdb_id = None
|
|
||||||
|
|
||||||
if isinstance(self.video, Episode) and self.video.series_imdb_id:
|
parameters = {'query': title.lower()}
|
||||||
imdb_id = self.sanitize_external_ids(self.video.series_imdb_id)
|
logging.debug('Searching using this title: {}'.format(title))
|
||||||
elif isinstance(self.video, Movie) and self.video.imdb_id:
|
|
||||||
imdb_id = self.sanitize_external_ids(self.video.imdb_id)
|
|
||||||
|
|
||||||
if imdb_id:
|
|
||||||
parameters = {'imdb_id': imdb_id}
|
|
||||||
logging.debug('Searching using this IMDB id: {}'.format(imdb_id))
|
|
||||||
else:
|
|
||||||
parameters = {'query': title.lower()}
|
|
||||||
logging.debug('Searching using this title: {}'.format(title))
|
|
||||||
|
|
||||||
results = self.session.get(self.server_url + 'features', params=parameters, timeout=30)
|
results = self.session.get(self.server_url + 'features', params=parameters, timeout=30)
|
||||||
|
|
||||||
|
@ -230,10 +222,19 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
|
||||||
else:
|
else:
|
||||||
# loop over results
|
# loop over results
|
||||||
for result in results_dict:
|
for result in results_dict:
|
||||||
if fix_tv_naming(title).lower() == result['attributes']['title'].lower() and \
|
if 'title' in result['attributes']:
|
||||||
(not self.video.year or self.video.year == int(result['attributes']['year'])):
|
if isinstance(self.video, Episode):
|
||||||
title_id = result['id']
|
if fix_tv_naming(title).lower() == result['attributes']['title'].lower() and \
|
||||||
break
|
(not self.video.year or self.video.year == int(result['attributes']['year'])):
|
||||||
|
title_id = result['id']
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if fix_movie_naming(title).lower() == result['attributes']['title'].lower() and \
|
||||||
|
(not self.video.year or self.video.year == int(result['attributes']['year'])):
|
||||||
|
title_id = result['id']
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
if title_id:
|
if title_id:
|
||||||
logging.debug('Found this title ID: {}'.format(title_id))
|
logging.debug('Found this title ID: {}'.format(title_id))
|
||||||
|
@ -245,19 +246,28 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
|
||||||
def query(self, languages, video):
|
def query(self, languages, video):
|
||||||
self.video = video
|
self.video = video
|
||||||
if self.use_hash:
|
if self.use_hash:
|
||||||
hash = self.video.hashes.get('opensubtitlescom')
|
file_hash = self.video.hashes.get('opensubtitlescom')
|
||||||
logging.debug('Searching using this hash: {}'.format(hash))
|
logging.debug('Searching using this hash: {}'.format(hash))
|
||||||
else:
|
else:
|
||||||
hash = None
|
file_hash = None
|
||||||
|
|
||||||
if isinstance(self.video, Episode):
|
if isinstance(self.video, Episode):
|
||||||
title = self.video.series
|
title = self.video.series
|
||||||
else:
|
else:
|
||||||
title = self.video.title
|
title = self.video.title
|
||||||
|
|
||||||
title_id = self.search_titles(title)
|
imdb_id = None
|
||||||
if not title_id:
|
if isinstance(self.video, Episode) and self.video.series_imdb_id:
|
||||||
return []
|
imdb_id = self.sanitize_external_ids(self.video.series_imdb_id)
|
||||||
|
elif isinstance(self.video, Movie) and self.video.imdb_id:
|
||||||
|
imdb_id = self.sanitize_external_ids(self.video.imdb_id)
|
||||||
|
|
||||||
|
title_id = None
|
||||||
|
if not imdb_id:
|
||||||
|
title_id = self.search_titles(title)
|
||||||
|
if not title_id:
|
||||||
|
return []
|
||||||
|
|
||||||
lang_strings = [str(lang.basename) for lang in languages]
|
lang_strings = [str(lang.basename) for lang in languages]
|
||||||
only_foreign = all([lang.forced for lang in languages])
|
only_foreign = all([lang.forced for lang in languages])
|
||||||
also_foreign = any([lang.forced for lang in languages])
|
also_foreign = any([lang.forced for lang in languages])
|
||||||
|
@ -277,17 +287,17 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
|
||||||
params=(('episode_number', self.video.episode),
|
params=(('episode_number', self.video.episode),
|
||||||
('foreign_parts_only', forced),
|
('foreign_parts_only', forced),
|
||||||
('languages', langs.lower()),
|
('languages', langs.lower()),
|
||||||
('moviehash', hash),
|
('moviehash', file_hash),
|
||||||
('parent_feature_id', title_id),
|
('parent_feature_id', title_id) if title_id else ('imdb_id', imdb_id),
|
||||||
('season_number', self.video.season),
|
('season_number', self.video.season),
|
||||||
('query', os.path.basename(self.video.name))),
|
('query', os.path.basename(self.video.name))),
|
||||||
timeout=30)
|
timeout=30)
|
||||||
else:
|
else:
|
||||||
res = self.session.get(self.server_url + 'subtitles',
|
res = self.session.get(self.server_url + 'subtitles',
|
||||||
params=(('foreign_parts_only', forced),
|
params=(('foreign_parts_only', forced),
|
||||||
('id', title_id),
|
('id', title_id) if title_id else ('imdb_id', imdb_id),
|
||||||
('languages', langs.lower()),
|
('languages', langs.lower()),
|
||||||
('moviehash', hash),
|
('moviehash', file_hash),
|
||||||
('query', os.path.basename(self.video.name))),
|
('query', os.path.basename(self.video.name))),
|
||||||
timeout=30)
|
timeout=30)
|
||||||
|
|
||||||
|
|
|
@ -27,8 +27,7 @@ except ImportError:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
from babelfish import language_converters
|
from babelfish import language_converters
|
||||||
from subliminal import Episode
|
from subliminal.video import Episode, Movie
|
||||||
from subliminal import Movie
|
|
||||||
from subliminal.providers.podnapisi import PodnapisiProvider as _PodnapisiProvider, \
|
from subliminal.providers.podnapisi import PodnapisiProvider as _PodnapisiProvider, \
|
||||||
PodnapisiSubtitle as _PodnapisiSubtitle
|
PodnapisiSubtitle as _PodnapisiSubtitle
|
||||||
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
|
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
|
||||||
|
@ -130,6 +129,8 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
|
||||||
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
||||||
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
||||||
|
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
server_url = 'https://podnapisi.net/subtitles/'
|
server_url = 'https://podnapisi.net/subtitles/'
|
||||||
only_foreign = False
|
only_foreign = False
|
||||||
also_foreign = False
|
also_foreign = False
|
||||||
|
|
|
@ -65,6 +65,7 @@ class RegieLiveProvider(Provider):
|
||||||
"""RegieLive Provider."""
|
"""RegieLive Provider."""
|
||||||
languages = {Language(l) for l in ['ron']}
|
languages = {Language(l) for l in ['ron']}
|
||||||
language = list(languages)[0]
|
language = list(languages)[0]
|
||||||
|
video_types = (Episode, Movie)
|
||||||
SEARCH_THROTTLE = 8
|
SEARCH_THROTTLE = 8
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from subliminal.providers.shooter import ShooterProvider as _ShooterProvider, ShooterSubtitle as _ShooterSubtitle
|
from subliminal.providers.shooter import ShooterProvider as _ShooterProvider, ShooterSubtitle as _ShooterSubtitle
|
||||||
|
from subliminal.video import Episode, Movie
|
||||||
|
|
||||||
|
|
||||||
class ShooterSubtitle(_ShooterSubtitle):
|
class ShooterSubtitle(_ShooterSubtitle):
|
||||||
|
@ -13,4 +14,4 @@ class ShooterSubtitle(_ShooterSubtitle):
|
||||||
|
|
||||||
class ShooterProvider(_ShooterProvider):
|
class ShooterProvider(_ShooterProvider):
|
||||||
subtitle_class = ShooterSubtitle
|
subtitle_class = ShooterSubtitle
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
|
@ -102,6 +102,7 @@ class SoustitreseuProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
"""Sous-Titres.eu Provider."""
|
"""Sous-Titres.eu Provider."""
|
||||||
subtitle_class = SoustitreseuSubtitle
|
subtitle_class = SoustitreseuSubtitle
|
||||||
languages = {Language(l) for l in ['fra', 'eng']}
|
languages = {Language(l) for l in ['fra', 'eng']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = 'https://www.sous-titres.eu/'
|
server_url = 'https://www.sous-titres.eu/'
|
||||||
search_url = server_url + 'search.html'
|
search_url = server_url + 'search.html'
|
||||||
|
|
||||||
|
|
|
@ -83,6 +83,7 @@ class SubdivxSubtitlesProvider(Provider):
|
||||||
provider_name = "subdivx"
|
provider_name = "subdivx"
|
||||||
hash_verifiable = False
|
hash_verifiable = False
|
||||||
languages = {Language("spa", "MX")} | {Language.fromalpha2("es")}
|
languages = {Language("spa", "MX")} | {Language.fromalpha2("es")}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
subtitle_class = SubdivxSubtitle
|
subtitle_class = SubdivxSubtitle
|
||||||
|
|
||||||
server_url = "https://www.subdivx.com/"
|
server_url = "https://www.subdivx.com/"
|
||||||
|
|
|
@ -21,6 +21,7 @@ from babelfish import language_converters
|
||||||
from guessit import guessit
|
from guessit import guessit
|
||||||
from dogpile.cache.api import NO_VALUE
|
from dogpile.cache.api import NO_VALUE
|
||||||
from subliminal import Episode, ProviderError
|
from subliminal import Episode, ProviderError
|
||||||
|
from subliminal.video import Episode, Movie
|
||||||
from subliminal.exceptions import ConfigurationError, ServiceUnavailable
|
from subliminal.exceptions import ConfigurationError, ServiceUnavailable
|
||||||
from subliminal.utils import sanitize_release_group
|
from subliminal.utils import sanitize_release_group
|
||||||
from subliminal.cache import region
|
from subliminal.cache import region
|
||||||
|
@ -124,7 +125,7 @@ class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
languages = supported_languages
|
languages = supported_languages
|
||||||
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
|
||||||
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
|
||||||
|
video_types = (Episode, Movie)
|
||||||
session = None
|
session = None
|
||||||
skip_wrong_fps = False
|
skip_wrong_fps = False
|
||||||
hearing_impaired_verifiable = True
|
hearing_impaired_verifiable = True
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
from guessit import guessit
|
from guessit import guessit
|
||||||
from subliminal.video import Episode
|
from subliminal.video import Episode, Movie
|
||||||
from subliminal.providers.subscenter import SubsCenterProvider as _SubsCenterProvider, \
|
from subliminal.providers.subscenter import SubsCenterProvider as _SubsCenterProvider, \
|
||||||
SubsCenterSubtitle as _SubsCenterSubtitle
|
SubsCenterSubtitle as _SubsCenterSubtitle
|
||||||
from subzero.language import Language
|
from subzero.language import Language
|
||||||
|
@ -37,7 +37,7 @@ class SubsCenterSubtitle(_SubsCenterSubtitle):
|
||||||
|
|
||||||
class SubsCenterProvider(_SubsCenterProvider):
|
class SubsCenterProvider(_SubsCenterProvider):
|
||||||
languages = {Language.fromalpha2(l) for l in ['he']}
|
languages = {Language.fromalpha2(l) for l in ['he']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
subtitle_class = SubsCenterSubtitle
|
subtitle_class = SubsCenterSubtitle
|
||||||
hearing_impaired_verifiable = True
|
hearing_impaired_verifiable = True
|
||||||
server_url = 'http://www.subscenter.info/he/'
|
server_url = 'http://www.subscenter.info/he/'
|
||||||
|
|
||||||
|
|
|
@ -122,6 +122,7 @@ class SubsSabBzProvider(Provider):
|
||||||
languages = {Language(l) for l in [
|
languages = {Language(l) for l in [
|
||||||
'bul', 'eng'
|
'bul', 'eng'
|
||||||
]}
|
]}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
self.session = Session()
|
self.session = Session()
|
||||||
|
|
|
@ -120,6 +120,7 @@ class SubsUnacsProvider(Provider):
|
||||||
languages = {Language(l) for l in [
|
languages = {Language(l) for l in [
|
||||||
'bul', 'eng'
|
'bul', 'eng'
|
||||||
]}
|
]}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
self.session = Session()
|
self.session = Session()
|
||||||
|
|
|
@ -124,6 +124,7 @@ class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
subtitle_class = SubtitrarinoiSubtitle
|
subtitle_class = SubtitrarinoiSubtitle
|
||||||
languages = {Language(lang) for lang in ['ron']}
|
languages = {Language(lang) for lang in ['ron']}
|
||||||
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
|
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = 'https://www.subtitrari-noi.ro/'
|
server_url = 'https://www.subtitrari-noi.ro/'
|
||||||
api_url = server_url + 'paginare_filme.php'
|
api_url = server_url + 'paginare_filme.php'
|
||||||
|
|
||||||
|
|
|
@ -94,6 +94,7 @@ class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
"""subtitri.id.lv Provider."""
|
"""subtitri.id.lv Provider."""
|
||||||
subtitle_class = SubtitriIdSubtitle
|
subtitle_class = SubtitriIdSubtitle
|
||||||
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
|
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
|
||||||
|
video_types = (Movie,)
|
||||||
server_url = 'http://subtitri.id.lv'
|
server_url = 'http://subtitri.id.lv'
|
||||||
search_url = server_url + '/search/'
|
search_url = server_url + '/search/'
|
||||||
|
|
||||||
|
|
|
@ -135,6 +135,7 @@ class TitloviSubtitle(Subtitle):
|
||||||
class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
|
class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
subtitle_class = TitloviSubtitle
|
subtitle_class = TitloviSubtitle
|
||||||
languages = {Language.fromtitlovi(l) for l in language_converters['titlovi'].codes} | {Language.fromietf('sr-Latn')}
|
languages = {Language.fromtitlovi(l) for l in language_converters['titlovi'].codes} | {Language.fromietf('sr-Latn')}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
api_url = 'https://kodi.titlovi.com/api/subtitles'
|
api_url = 'https://kodi.titlovi.com/api/subtitles'
|
||||||
api_gettoken_url = api_url + '/gettoken'
|
api_gettoken_url = api_url + '/gettoken'
|
||||||
api_search_url = api_url + '/search'
|
api_search_url = api_url + '/search'
|
||||||
|
|
|
@ -125,6 +125,7 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
|
||||||
subtitle_class = TitrariSubtitle
|
subtitle_class = TitrariSubtitle
|
||||||
languages = {Language(lang) for lang in ['ron', 'eng']}
|
languages = {Language(lang) for lang in ['ron', 'eng']}
|
||||||
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
|
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
|
||||||
|
video_types = (Episode, Movie)
|
||||||
api_url = 'https://www.titrari.ro/'
|
api_url = 'https://www.titrari.ro/'
|
||||||
query_advanced_search = 'cautarepreaavansata'
|
query_advanced_search = 'cautarepreaavansata'
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -141,7 +141,7 @@ class TuSubtituloProvider(Provider):
|
||||||
|
|
||||||
completed = "%" not in content[5].text
|
completed = "%" not in content[5].text
|
||||||
download_url = (
|
download_url = (
|
||||||
content[6].find_all("a")[1].get("href").split("?sub=")[-1]
|
parse.unquote(content[6].find_all("a")[1].get("href").split("?sub=")[-1])
|
||||||
)
|
)
|
||||||
episode_id = download_url.split("/")[4]
|
episode_id = download_url.split("/")[4]
|
||||||
|
|
||||||
|
@ -219,9 +219,9 @@ class TuSubtituloProvider(Provider):
|
||||||
soup = bso(r.content, "lxml")
|
soup = bso(r.content, "lxml")
|
||||||
|
|
||||||
for url, selected in zip(soup.select(_CSS1), soup.select(_CSS2)):
|
for url, selected in zip(soup.select(_CSS1), soup.select(_CSS2)):
|
||||||
meta = ".".join(
|
meta = parse.unquote(".".join(
|
||||||
selected.get("href").split(discriminator)[-1].split(".")[:-1]
|
selected.get("href").split(discriminator)[-1].split(".")[:-1]
|
||||||
)
|
))
|
||||||
if meta in episode_dict["download_url"]:
|
if meta in episode_dict["download_url"]:
|
||||||
|
|
||||||
id_url = url.find_all("a")[0].get("href")
|
id_url = url.find_all("a")[0].get("href")
|
||||||
|
@ -255,7 +255,11 @@ class TuSubtituloProvider(Provider):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def list_subtitles(self, video, languages):
|
def list_subtitles(self, video, languages):
|
||||||
return self.query(video)
|
# return self.query(video)
|
||||||
|
|
||||||
|
# returning no subtitles automatically to prevent requests to the provider who explicitly requested to be
|
||||||
|
# removed in https://github.com/morpheus65535/bazarr/issues/1591
|
||||||
|
return []
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _check_response(response):
|
def _check_response(response):
|
||||||
|
|
|
@ -10,6 +10,7 @@ from subliminal.cache import SHOW_EXPIRATION_TIME, region, EPISODE_EXPIRATION_TI
|
||||||
from subliminal.providers.tvsubtitles import TVsubtitlesProvider as _TVsubtitlesProvider, \
|
from subliminal.providers.tvsubtitles import TVsubtitlesProvider as _TVsubtitlesProvider, \
|
||||||
TVsubtitlesSubtitle as _TVsubtitlesSubtitle, link_re, episode_id_re
|
TVsubtitlesSubtitle as _TVsubtitlesSubtitle, link_re, episode_id_re
|
||||||
from subliminal.utils import sanitize
|
from subliminal.utils import sanitize
|
||||||
|
from subliminal.video import Episode
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -26,6 +27,7 @@ class TVsubtitlesProvider(_TVsubtitlesProvider):
|
||||||
'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por',
|
'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por',
|
||||||
'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho'
|
'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho'
|
||||||
]}
|
]}
|
||||||
|
video_types = (Episode,)
|
||||||
subtitle_class = TVsubtitlesSubtitle
|
subtitle_class = TVsubtitlesSubtitle
|
||||||
|
|
||||||
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
|
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
|
||||||
|
|
|
@ -79,6 +79,7 @@ class WizdomSubtitle(Subtitle):
|
||||||
class WizdomProvider(Provider):
|
class WizdomProvider(Provider):
|
||||||
"""Wizdom Provider."""
|
"""Wizdom Provider."""
|
||||||
languages = {Language(l) for l in ['heb']}
|
languages = {Language(l) for l in ['heb']}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
server_url = 'wizdom.xyz'
|
server_url = 'wizdom.xyz'
|
||||||
|
|
||||||
_tmdb_api_key = 'a51ee051bcd762543373903de296e0a3'
|
_tmdb_api_key = 'a51ee051bcd762543373903de296e0a3'
|
||||||
|
|
|
@ -102,6 +102,7 @@ class YavkaNetProvider(Provider):
|
||||||
languages = {Language(l) for l in [
|
languages = {Language(l) for l in [
|
||||||
'bul', 'eng', 'rus', 'spa', 'ita'
|
'bul', 'eng', 'rus', 'spa', 'ita'
|
||||||
]}
|
]}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
self.session = Session()
|
self.session = Session()
|
||||||
|
|
|
@ -84,6 +84,7 @@ class ZimukuProvider(Provider):
|
||||||
"""Zimuku Provider."""
|
"""Zimuku Provider."""
|
||||||
|
|
||||||
languages = {Language(*l) for l in supported_languages}
|
languages = {Language(*l) for l in supported_languages}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
logger.info(str(supported_languages))
|
logger.info(str(supported_languages))
|
||||||
|
|
||||||
server_url = "http://zimuku.org"
|
server_url = "http://zimuku.org"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
apprise=0.8.8
|
apprise=0.8.8
|
||||||
apscheduler=3.5.1
|
apscheduler=3.8.0
|
||||||
babelfish=0.5.5
|
babelfish=0.5.5
|
||||||
backports.functools-lru-cache=1.5
|
backports.functools-lru-cache=1.5
|
||||||
Beaker=1.10.0
|
Beaker=1.10.0
|
||||||
|
@ -14,7 +14,6 @@ enzyme=0.4.1
|
||||||
ffsubsync=0.4.11
|
ffsubsync=0.4.11
|
||||||
Flask=1.1.1
|
Flask=1.1.1
|
||||||
flask-socketio=5.0.2dev
|
flask-socketio=5.0.2dev
|
||||||
gevent-websocker=0.10.1
|
|
||||||
gitpython=2.1.9
|
gitpython=2.1.9
|
||||||
guessit=3.3.1
|
guessit=3.3.1
|
||||||
guess_language-spirit=0.5.3
|
guess_language-spirit=0.5.3
|
||||||
|
@ -33,7 +32,7 @@ rarfile=3.0
|
||||||
rebulk=3.0.1
|
rebulk=3.0.1
|
||||||
requests=2.18.4
|
requests=2.18.4
|
||||||
semver=2.13.0
|
semver=2.13.0
|
||||||
signalr-client=0.0.7 <-- Modified to work with Sonarr and added exception handler
|
signalr-client-threads=0.0.12 <-- Modified to work with Sonarr
|
||||||
signalrcore=0.9.2 <-- https://github.com/mandrewcito/signalrcore/pull/60 and 61
|
signalrcore=0.9.2 <-- https://github.com/mandrewcito/signalrcore/pull/60 and 61
|
||||||
SimpleConfigParser=0.1.0 <-- modified version: do not update!!!
|
SimpleConfigParser=0.1.0 <-- modified version: do not update!!!
|
||||||
six=1.11.0
|
six=1.11.0
|
||||||
|
@ -44,6 +43,7 @@ subliminal=2.1.0dev
|
||||||
tzlocal=2.1b1
|
tzlocal=2.1b1
|
||||||
twine=3.4.1
|
twine=3.4.1
|
||||||
urllib3=1.23
|
urllib3=1.23
|
||||||
|
waitress=2.0.0
|
||||||
websocket-client=1.0.0
|
websocket-client=1.0.0
|
||||||
|
|
||||||
## indirect dependencies
|
## indirect dependencies
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from waitress.server import create_server
|
||||||
|
|
||||||
|
|
||||||
|
def serve(app, **kw):
|
||||||
|
_server = kw.pop("_server", create_server) # test shim
|
||||||
|
_quiet = kw.pop("_quiet", False) # test shim
|
||||||
|
_profile = kw.pop("_profile", False) # test shim
|
||||||
|
if not _quiet: # pragma: no cover
|
||||||
|
# idempotent if logging has already been set up
|
||||||
|
logging.basicConfig()
|
||||||
|
server = _server(app, **kw)
|
||||||
|
if not _quiet: # pragma: no cover
|
||||||
|
server.print_listen("Serving on http://{}:{}")
|
||||||
|
if _profile: # pragma: no cover
|
||||||
|
profile("server.run()", globals(), locals(), (), False)
|
||||||
|
else:
|
||||||
|
server.run()
|
||||||
|
|
||||||
|
|
||||||
|
def serve_paste(app, global_conf, **kw):
|
||||||
|
serve(app, **kw)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover
|
||||||
|
# runs a command under the profiler and print profiling output at shutdown
|
||||||
|
import os
|
||||||
|
import profile
|
||||||
|
import pstats
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
fd, fn = tempfile.mkstemp()
|
||||||
|
try:
|
||||||
|
profile.runctx(cmd, globals, locals, fn)
|
||||||
|
stats = pstats.Stats(fn)
|
||||||
|
stats.strip_dirs()
|
||||||
|
# calls,time,cumulative and cumulative,calls,time are useful
|
||||||
|
stats.sort_stats(*(sort_order or ("cumulative", "calls", "time")))
|
||||||
|
if callers:
|
||||||
|
stats.print_callers(0.3)
|
||||||
|
else:
|
||||||
|
stats.print_stats(0.3)
|
||||||
|
finally:
|
||||||
|
os.remove(fn)
|
|
@ -0,0 +1,3 @@
|
||||||
|
from waitress.runner import run # pragma nocover
|
||||||
|
|
||||||
|
run() # pragma nocover
|
|
@ -0,0 +1,523 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
"""Adjustments are tunable parameters.
|
||||||
|
"""
|
||||||
|
import getopt
|
||||||
|
import socket
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from .compat import HAS_IPV6, WIN
|
||||||
|
from .proxy_headers import PROXY_HEADERS
|
||||||
|
|
||||||
|
truthy = frozenset(("t", "true", "y", "yes", "on", "1"))
|
||||||
|
|
||||||
|
KNOWN_PROXY_HEADERS = frozenset(
|
||||||
|
header.lower().replace("_", "-") for header in PROXY_HEADERS
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def asbool(s):
|
||||||
|
"""Return the boolean value ``True`` if the case-lowered value of string
|
||||||
|
input ``s`` is any of ``t``, ``true``, ``y``, ``on``, or ``1``, otherwise
|
||||||
|
return the boolean value ``False``. If ``s`` is the value ``None``,
|
||||||
|
return ``False``. If ``s`` is already one of the boolean values ``True``
|
||||||
|
or ``False``, return it."""
|
||||||
|
if s is None:
|
||||||
|
return False
|
||||||
|
if isinstance(s, bool):
|
||||||
|
return s
|
||||||
|
s = str(s).strip()
|
||||||
|
return s.lower() in truthy
|
||||||
|
|
||||||
|
|
||||||
|
def asoctal(s):
|
||||||
|
"""Convert the given octal string to an actual number."""
|
||||||
|
return int(s, 8)
|
||||||
|
|
||||||
|
|
||||||
|
def aslist_cronly(value):
|
||||||
|
if isinstance(value, str):
|
||||||
|
value = filter(None, [x.strip() for x in value.splitlines()])
|
||||||
|
return list(value)
|
||||||
|
|
||||||
|
|
||||||
|
def aslist(value):
|
||||||
|
"""Return a list of strings, separating the input based on newlines
|
||||||
|
and, if flatten=True (the default), also split on spaces within
|
||||||
|
each line."""
|
||||||
|
values = aslist_cronly(value)
|
||||||
|
result = []
|
||||||
|
for value in values:
|
||||||
|
subvalues = value.split()
|
||||||
|
result.extend(subvalues)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def asset(value):
|
||||||
|
return set(aslist(value))
|
||||||
|
|
||||||
|
|
||||||
|
def slash_fixed_str(s):
|
||||||
|
s = s.strip()
|
||||||
|
if s:
|
||||||
|
# always have a leading slash, replace any number of leading slashes
|
||||||
|
# with a single slash, and strip any trailing slashes
|
||||||
|
s = "/" + s.lstrip("/").rstrip("/")
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def str_iftruthy(s):
|
||||||
|
return str(s) if s else None
|
||||||
|
|
||||||
|
|
||||||
|
def as_socket_list(sockets):
|
||||||
|
"""Checks if the elements in the list are of type socket and
|
||||||
|
removes them if not."""
|
||||||
|
return [sock for sock in sockets if isinstance(sock, socket.socket)]
|
||||||
|
|
||||||
|
|
||||||
|
class _str_marker(str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _int_marker(int):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class _bool_marker:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Adjustments:
|
||||||
|
"""This class contains tunable parameters."""
|
||||||
|
|
||||||
|
_params = (
|
||||||
|
("host", str),
|
||||||
|
("port", int),
|
||||||
|
("ipv4", asbool),
|
||||||
|
("ipv6", asbool),
|
||||||
|
("listen", aslist),
|
||||||
|
("threads", int),
|
||||||
|
("trusted_proxy", str_iftruthy),
|
||||||
|
("trusted_proxy_count", int),
|
||||||
|
("trusted_proxy_headers", asset),
|
||||||
|
("log_untrusted_proxy_headers", asbool),
|
||||||
|
("clear_untrusted_proxy_headers", asbool),
|
||||||
|
("url_scheme", str),
|
||||||
|
("url_prefix", slash_fixed_str),
|
||||||
|
("backlog", int),
|
||||||
|
("recv_bytes", int),
|
||||||
|
("send_bytes", int),
|
||||||
|
("outbuf_overflow", int),
|
||||||
|
("outbuf_high_watermark", int),
|
||||||
|
("inbuf_overflow", int),
|
||||||
|
("connection_limit", int),
|
||||||
|
("cleanup_interval", int),
|
||||||
|
("channel_timeout", int),
|
||||||
|
("log_socket_errors", asbool),
|
||||||
|
("max_request_header_size", int),
|
||||||
|
("max_request_body_size", int),
|
||||||
|
("expose_tracebacks", asbool),
|
||||||
|
("ident", str_iftruthy),
|
||||||
|
("asyncore_loop_timeout", int),
|
||||||
|
("asyncore_use_poll", asbool),
|
||||||
|
("unix_socket", str),
|
||||||
|
("unix_socket_perms", asoctal),
|
||||||
|
("sockets", as_socket_list),
|
||||||
|
("channel_request_lookahead", int),
|
||||||
|
("server_name", str),
|
||||||
|
)
|
||||||
|
|
||||||
|
_param_map = dict(_params)
|
||||||
|
|
||||||
|
# hostname or IP address to listen on
|
||||||
|
host = _str_marker("0.0.0.0")
|
||||||
|
|
||||||
|
# TCP port to listen on
|
||||||
|
port = _int_marker(8080)
|
||||||
|
|
||||||
|
listen = ["{}:{}".format(host, port)]
|
||||||
|
|
||||||
|
# number of threads available for tasks
|
||||||
|
threads = 4
|
||||||
|
|
||||||
|
# Host allowed to overrid ``wsgi.url_scheme`` via header
|
||||||
|
trusted_proxy = None
|
||||||
|
|
||||||
|
# How many proxies we trust when chained
|
||||||
|
#
|
||||||
|
# X-Forwarded-For: 192.0.2.1, "[2001:db8::1]"
|
||||||
|
#
|
||||||
|
# or
|
||||||
|
#
|
||||||
|
# Forwarded: for=192.0.2.1, For="[2001:db8::1]"
|
||||||
|
#
|
||||||
|
# means there were (potentially), two proxies involved. If we know there is
|
||||||
|
# only 1 valid proxy, then that initial IP address "192.0.2.1" is not
|
||||||
|
# trusted and we completely ignore it. If there are two trusted proxies in
|
||||||
|
# the path, this value should be set to a higher number.
|
||||||
|
trusted_proxy_count = None
|
||||||
|
|
||||||
|
# Which of the proxy headers should we trust, this is a set where you
|
||||||
|
# either specify forwarded or one or more of forwarded-host, forwarded-for,
|
||||||
|
# forwarded-proto, forwarded-port.
|
||||||
|
trusted_proxy_headers = set()
|
||||||
|
|
||||||
|
# Would you like waitress to log warnings about untrusted proxy headers
|
||||||
|
# that were encountered while processing the proxy headers? This only makes
|
||||||
|
# sense to set when you have a trusted_proxy, and you expect the upstream
|
||||||
|
# proxy server to filter invalid headers
|
||||||
|
log_untrusted_proxy_headers = False
|
||||||
|
|
||||||
|
# Should waitress clear any proxy headers that are not deemed trusted from
|
||||||
|
# the environ? Change to True by default in 2.x
|
||||||
|
clear_untrusted_proxy_headers = _bool_marker
|
||||||
|
|
||||||
|
# default ``wsgi.url_scheme`` value
|
||||||
|
url_scheme = "http"
|
||||||
|
|
||||||
|
# default ``SCRIPT_NAME`` value, also helps reset ``PATH_INFO``
|
||||||
|
# when nonempty
|
||||||
|
url_prefix = ""
|
||||||
|
|
||||||
|
# server identity (sent in Server: header)
|
||||||
|
ident = "waitress"
|
||||||
|
|
||||||
|
# backlog is the value waitress passes to pass to socket.listen() This is
|
||||||
|
# the maximum number of incoming TCP connections that will wait in an OS
|
||||||
|
# queue for an available channel. From listen(1): "If a connection
|
||||||
|
# request arrives when the queue is full, the client may receive an error
|
||||||
|
# with an indication of ECONNREFUSED or, if the underlying protocol
|
||||||
|
# supports retransmission, the request may be ignored so that a later
|
||||||
|
# reattempt at connection succeeds."
|
||||||
|
backlog = 1024
|
||||||
|
|
||||||
|
# recv_bytes is the argument to pass to socket.recv().
|
||||||
|
recv_bytes = 8192
|
||||||
|
|
||||||
|
# deprecated setting controls how many bytes will be buffered before
|
||||||
|
# being flushed to the socket
|
||||||
|
send_bytes = 1
|
||||||
|
|
||||||
|
# A tempfile should be created if the pending output is larger than
|
||||||
|
# outbuf_overflow, which is measured in bytes. The default is 1MB. This
|
||||||
|
# is conservative.
|
||||||
|
outbuf_overflow = 1048576
|
||||||
|
|
||||||
|
# The app_iter will pause when pending output is larger than this value
|
||||||
|
# in bytes.
|
||||||
|
outbuf_high_watermark = 16777216
|
||||||
|
|
||||||
|
# A tempfile should be created if the pending input is larger than
|
||||||
|
# inbuf_overflow, which is measured in bytes. The default is 512K. This
|
||||||
|
# is conservative.
|
||||||
|
inbuf_overflow = 524288
|
||||||
|
|
||||||
|
# Stop creating new channels if too many are already active (integer).
|
||||||
|
# Each channel consumes at least one file descriptor, and, depending on
|
||||||
|
# the input and output body sizes, potentially up to three. The default
|
||||||
|
# is conservative, but you may need to increase the number of file
|
||||||
|
# descriptors available to the Waitress process on most platforms in
|
||||||
|
# order to safely change it (see ``ulimit -a`` "open files" setting).
|
||||||
|
# Note that this doesn't control the maximum number of TCP connections
|
||||||
|
# that can be waiting for processing; the ``backlog`` argument controls
|
||||||
|
# that.
|
||||||
|
connection_limit = 100
|
||||||
|
|
||||||
|
# Minimum seconds between cleaning up inactive channels.
|
||||||
|
cleanup_interval = 30
|
||||||
|
|
||||||
|
# Maximum seconds to leave an inactive connection open.
|
||||||
|
channel_timeout = 120
|
||||||
|
|
||||||
|
# Boolean: turn off to not log premature client disconnects.
|
||||||
|
log_socket_errors = True
|
||||||
|
|
||||||
|
# maximum number of bytes of all request headers combined (256K default)
|
||||||
|
max_request_header_size = 262144
|
||||||
|
|
||||||
|
# maximum number of bytes in request body (1GB default)
|
||||||
|
max_request_body_size = 1073741824
|
||||||
|
|
||||||
|
# expose tracebacks of uncaught exceptions
|
||||||
|
expose_tracebacks = False
|
||||||
|
|
||||||
|
# Path to a Unix domain socket to use.
|
||||||
|
unix_socket = None
|
||||||
|
|
||||||
|
# Path to a Unix domain socket to use.
|
||||||
|
unix_socket_perms = 0o600
|
||||||
|
|
||||||
|
# The socket options to set on receiving a connection. It is a list of
|
||||||
|
# (level, optname, value) tuples. TCP_NODELAY disables the Nagle
|
||||||
|
# algorithm for writes (Waitress already buffers its writes).
|
||||||
|
socket_options = [
|
||||||
|
(socket.SOL_TCP, socket.TCP_NODELAY, 1),
|
||||||
|
]
|
||||||
|
|
||||||
|
# The asyncore.loop timeout value
|
||||||
|
asyncore_loop_timeout = 1
|
||||||
|
|
||||||
|
# The asyncore.loop flag to use poll() instead of the default select().
|
||||||
|
asyncore_use_poll = False
|
||||||
|
|
||||||
|
# Enable IPv4 by default
|
||||||
|
ipv4 = True
|
||||||
|
|
||||||
|
# Enable IPv6 by default
|
||||||
|
ipv6 = True
|
||||||
|
|
||||||
|
# A list of sockets that waitress will use to accept connections. They can
|
||||||
|
# be used for e.g. socket activation
|
||||||
|
sockets = []
|
||||||
|
|
||||||
|
# By setting this to a value larger than zero, each channel stays readable
|
||||||
|
# and continues to read requests from the client even if a request is still
|
||||||
|
# running, until the number of buffered requests exceeds this value.
|
||||||
|
# This allows detecting if a client closed the connection while its request
|
||||||
|
# is being processed.
|
||||||
|
channel_request_lookahead = 0
|
||||||
|
|
||||||
|
# This setting controls the SERVER_NAME of the WSGI environment, this is
|
||||||
|
# only ever used if the remote client sent a request without a Host header
|
||||||
|
# (or when using the Proxy settings, without forwarding a Host header)
|
||||||
|
server_name = "waitress.invalid"
|
||||||
|
|
||||||
|
def __init__(self, **kw):
|
||||||
|
|
||||||
|
if "listen" in kw and ("host" in kw or "port" in kw):
|
||||||
|
raise ValueError("host or port may not be set if listen is set.")
|
||||||
|
|
||||||
|
if "listen" in kw and "sockets" in kw:
|
||||||
|
raise ValueError("socket may not be set if listen is set.")
|
||||||
|
|
||||||
|
if "sockets" in kw and ("host" in kw or "port" in kw):
|
||||||
|
raise ValueError("host or port may not be set if sockets is set.")
|
||||||
|
|
||||||
|
if "sockets" in kw and "unix_socket" in kw:
|
||||||
|
raise ValueError("unix_socket may not be set if sockets is set")
|
||||||
|
|
||||||
|
if "unix_socket" in kw and ("host" in kw or "port" in kw):
|
||||||
|
raise ValueError("unix_socket may not be set if host or port is set")
|
||||||
|
|
||||||
|
if "unix_socket" in kw and "listen" in kw:
|
||||||
|
raise ValueError("unix_socket may not be set if listen is set")
|
||||||
|
|
||||||
|
if "send_bytes" in kw:
|
||||||
|
warnings.warn(
|
||||||
|
"send_bytes will be removed in a future release", DeprecationWarning
|
||||||
|
)
|
||||||
|
|
||||||
|
for k, v in kw.items():
|
||||||
|
if k not in self._param_map:
|
||||||
|
raise ValueError("Unknown adjustment %r" % k)
|
||||||
|
setattr(self, k, self._param_map[k](v))
|
||||||
|
|
||||||
|
if not isinstance(self.host, _str_marker) or not isinstance(
|
||||||
|
self.port, _int_marker
|
||||||
|
):
|
||||||
|
self.listen = ["{}:{}".format(self.host, self.port)]
|
||||||
|
|
||||||
|
enabled_families = socket.AF_UNSPEC
|
||||||
|
|
||||||
|
if not self.ipv4 and not HAS_IPV6: # pragma: no cover
|
||||||
|
raise ValueError(
|
||||||
|
"IPv4 is disabled but IPv6 is not available. Cowardly refusing to start."
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.ipv4 and not self.ipv6:
|
||||||
|
enabled_families = socket.AF_INET
|
||||||
|
|
||||||
|
if not self.ipv4 and self.ipv6 and HAS_IPV6:
|
||||||
|
enabled_families = socket.AF_INET6
|
||||||
|
|
||||||
|
wanted_sockets = []
|
||||||
|
hp_pairs = []
|
||||||
|
for i in self.listen:
|
||||||
|
if ":" in i:
|
||||||
|
(host, port) = i.rsplit(":", 1)
|
||||||
|
|
||||||
|
# IPv6 we need to make sure that we didn't split on the address
|
||||||
|
if "]" in port: # pragma: nocover
|
||||||
|
(host, port) = (i, str(self.port))
|
||||||
|
else:
|
||||||
|
(host, port) = (i, str(self.port))
|
||||||
|
|
||||||
|
if WIN: # pragma: no cover
|
||||||
|
try:
|
||||||
|
# Try turning the port into an integer
|
||||||
|
port = int(port)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(
|
||||||
|
"Windows does not support service names instead of port numbers"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if "[" in host and "]" in host: # pragma: nocover
|
||||||
|
host = host.strip("[").rstrip("]")
|
||||||
|
|
||||||
|
if host == "*":
|
||||||
|
host = None
|
||||||
|
|
||||||
|
for s in socket.getaddrinfo(
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
enabled_families,
|
||||||
|
socket.SOCK_STREAM,
|
||||||
|
socket.IPPROTO_TCP,
|
||||||
|
socket.AI_PASSIVE,
|
||||||
|
):
|
||||||
|
(family, socktype, proto, _, sockaddr) = s
|
||||||
|
|
||||||
|
# It seems that getaddrinfo() may sometimes happily return
|
||||||
|
# the same result multiple times, this of course makes
|
||||||
|
# bind() very unhappy...
|
||||||
|
#
|
||||||
|
# Split on %, and drop the zone-index from the host in the
|
||||||
|
# sockaddr. Works around a bug in OS X whereby
|
||||||
|
# getaddrinfo() returns the same link-local interface with
|
||||||
|
# two different zone-indices (which makes no sense what so
|
||||||
|
# ever...) yet treats them equally when we attempt to bind().
|
||||||
|
if (
|
||||||
|
sockaddr[1] == 0
|
||||||
|
or (sockaddr[0].split("%", 1)[0], sockaddr[1]) not in hp_pairs
|
||||||
|
):
|
||||||
|
wanted_sockets.append((family, socktype, proto, sockaddr))
|
||||||
|
hp_pairs.append((sockaddr[0].split("%", 1)[0], sockaddr[1]))
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
raise ValueError("Invalid host/port specified.")
|
||||||
|
|
||||||
|
if self.trusted_proxy_count is not None and self.trusted_proxy is None:
|
||||||
|
raise ValueError(
|
||||||
|
"trusted_proxy_count has no meaning without setting " "trusted_proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif self.trusted_proxy_count is None:
|
||||||
|
self.trusted_proxy_count = 1
|
||||||
|
|
||||||
|
if self.trusted_proxy_headers and self.trusted_proxy is None:
|
||||||
|
raise ValueError(
|
||||||
|
"trusted_proxy_headers has no meaning without setting " "trusted_proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.trusted_proxy_headers:
|
||||||
|
self.trusted_proxy_headers = {
|
||||||
|
header.lower() for header in self.trusted_proxy_headers
|
||||||
|
}
|
||||||
|
|
||||||
|
unknown_values = self.trusted_proxy_headers - KNOWN_PROXY_HEADERS
|
||||||
|
if unknown_values:
|
||||||
|
raise ValueError(
|
||||||
|
"Received unknown trusted_proxy_headers value (%s) expected one "
|
||||||
|
"of %s"
|
||||||
|
% (", ".join(unknown_values), ", ".join(KNOWN_PROXY_HEADERS))
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
"forwarded" in self.trusted_proxy_headers
|
||||||
|
and self.trusted_proxy_headers - {"forwarded"}
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
"The Forwarded proxy header and the "
|
||||||
|
"X-Forwarded-{By,Host,Proto,Port,For} headers are mutually "
|
||||||
|
"exclusive. Can't trust both!"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif self.trusted_proxy is not None:
|
||||||
|
warnings.warn(
|
||||||
|
"No proxy headers were marked as trusted, but trusted_proxy was set. "
|
||||||
|
"Implicitly trusting X-Forwarded-Proto for backwards compatibility. "
|
||||||
|
"This will be removed in future versions of waitress.",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
self.trusted_proxy_headers = {"x-forwarded-proto"}
|
||||||
|
|
||||||
|
if self.clear_untrusted_proxy_headers is _bool_marker:
|
||||||
|
warnings.warn(
|
||||||
|
"In future versions of Waitress clear_untrusted_proxy_headers will be "
|
||||||
|
"set to True by default. You may opt-out by setting this value to "
|
||||||
|
"False, or opt-in explicitly by setting this to True.",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
self.clear_untrusted_proxy_headers = False
|
||||||
|
|
||||||
|
self.listen = wanted_sockets
|
||||||
|
|
||||||
|
self.check_sockets(self.sockets)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse_args(cls, argv):
|
||||||
|
"""Pre-parse command line arguments for input into __init__. Note that
|
||||||
|
this does not cast values into adjustment types, it just creates a
|
||||||
|
dictionary suitable for passing into __init__, where __init__ does the
|
||||||
|
casting.
|
||||||
|
"""
|
||||||
|
long_opts = ["help", "call"]
|
||||||
|
for opt, cast in cls._params:
|
||||||
|
opt = opt.replace("_", "-")
|
||||||
|
if cast is asbool:
|
||||||
|
long_opts.append(opt)
|
||||||
|
long_opts.append("no-" + opt)
|
||||||
|
else:
|
||||||
|
long_opts.append(opt + "=")
|
||||||
|
|
||||||
|
kw = {
|
||||||
|
"help": False,
|
||||||
|
"call": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts, args = getopt.getopt(argv, "", long_opts)
|
||||||
|
for opt, value in opts:
|
||||||
|
param = opt.lstrip("-").replace("-", "_")
|
||||||
|
|
||||||
|
if param == "listen":
|
||||||
|
kw["listen"] = "{} {}".format(kw.get("listen", ""), value)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if param.startswith("no_"):
|
||||||
|
param = param[3:]
|
||||||
|
kw[param] = "false"
|
||||||
|
elif param in ("help", "call"):
|
||||||
|
kw[param] = True
|
||||||
|
elif cls._param_map[param] is asbool:
|
||||||
|
kw[param] = "true"
|
||||||
|
else:
|
||||||
|
kw[param] = value
|
||||||
|
|
||||||
|
return kw, args
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def check_sockets(cls, sockets):
|
||||||
|
has_unix_socket = False
|
||||||
|
has_inet_socket = False
|
||||||
|
has_unsupported_socket = False
|
||||||
|
for sock in sockets:
|
||||||
|
if (
|
||||||
|
sock.family == socket.AF_INET or sock.family == socket.AF_INET6
|
||||||
|
) and sock.type == socket.SOCK_STREAM:
|
||||||
|
has_inet_socket = True
|
||||||
|
elif (
|
||||||
|
hasattr(socket, "AF_UNIX")
|
||||||
|
and sock.family == socket.AF_UNIX
|
||||||
|
and sock.type == socket.SOCK_STREAM
|
||||||
|
):
|
||||||
|
has_unix_socket = True
|
||||||
|
else:
|
||||||
|
has_unsupported_socket = True
|
||||||
|
if has_unix_socket and has_inet_socket:
|
||||||
|
raise ValueError("Internet and UNIX sockets may not be mixed.")
|
||||||
|
if has_unsupported_socket:
|
||||||
|
raise ValueError("Only Internet or UNIX stream sockets may be used.")
|
|
@ -0,0 +1,308 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001-2004 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
"""Buffers
|
||||||
|
"""
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
# copy_bytes controls the size of temp. strings for shuffling data around.
|
||||||
|
COPY_BYTES = 1 << 18 # 256K
|
||||||
|
|
||||||
|
# The maximum number of bytes to buffer in a simple string.
|
||||||
|
STRBUF_LIMIT = 8192
|
||||||
|
|
||||||
|
|
||||||
|
class FileBasedBuffer:
|
||||||
|
|
||||||
|
remain = 0
|
||||||
|
|
||||||
|
def __init__(self, file, from_buffer=None):
|
||||||
|
self.file = file
|
||||||
|
if from_buffer is not None:
|
||||||
|
from_file = from_buffer.getfile()
|
||||||
|
read_pos = from_file.tell()
|
||||||
|
from_file.seek(0)
|
||||||
|
while True:
|
||||||
|
data = from_file.read(COPY_BYTES)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
file.write(data)
|
||||||
|
self.remain = int(file.tell() - read_pos)
|
||||||
|
from_file.seek(read_pos)
|
||||||
|
file.seek(read_pos)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self.remain
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
__bool__ = __nonzero__ # py3
|
||||||
|
|
||||||
|
def append(self, s):
|
||||||
|
file = self.file
|
||||||
|
read_pos = file.tell()
|
||||||
|
file.seek(0, 2)
|
||||||
|
file.write(s)
|
||||||
|
file.seek(read_pos)
|
||||||
|
self.remain = self.remain + len(s)
|
||||||
|
|
||||||
|
def get(self, numbytes=-1, skip=False):
|
||||||
|
file = self.file
|
||||||
|
if not skip:
|
||||||
|
read_pos = file.tell()
|
||||||
|
if numbytes < 0:
|
||||||
|
# Read all
|
||||||
|
res = file.read()
|
||||||
|
else:
|
||||||
|
res = file.read(numbytes)
|
||||||
|
if skip:
|
||||||
|
self.remain -= len(res)
|
||||||
|
else:
|
||||||
|
file.seek(read_pos)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def skip(self, numbytes, allow_prune=0):
|
||||||
|
if self.remain < numbytes:
|
||||||
|
raise ValueError(
|
||||||
|
"Can't skip %d bytes in buffer of %d bytes" % (numbytes, self.remain)
|
||||||
|
)
|
||||||
|
self.file.seek(numbytes, 1)
|
||||||
|
self.remain = self.remain - numbytes
|
||||||
|
|
||||||
|
def newfile(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def prune(self):
|
||||||
|
file = self.file
|
||||||
|
if self.remain == 0:
|
||||||
|
read_pos = file.tell()
|
||||||
|
file.seek(0, 2)
|
||||||
|
sz = file.tell()
|
||||||
|
file.seek(read_pos)
|
||||||
|
if sz == 0:
|
||||||
|
# Nothing to prune.
|
||||||
|
return
|
||||||
|
nf = self.newfile()
|
||||||
|
while True:
|
||||||
|
data = file.read(COPY_BYTES)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
nf.write(data)
|
||||||
|
self.file = nf
|
||||||
|
|
||||||
|
def getfile(self):
|
||||||
|
return self.file
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if hasattr(self.file, "close"):
|
||||||
|
self.file.close()
|
||||||
|
self.remain = 0
|
||||||
|
|
||||||
|
|
||||||
|
class TempfileBasedBuffer(FileBasedBuffer):
|
||||||
|
def __init__(self, from_buffer=None):
|
||||||
|
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
|
||||||
|
|
||||||
|
def newfile(self):
|
||||||
|
from tempfile import TemporaryFile
|
||||||
|
|
||||||
|
return TemporaryFile("w+b")
|
||||||
|
|
||||||
|
|
||||||
|
class BytesIOBasedBuffer(FileBasedBuffer):
|
||||||
|
def __init__(self, from_buffer=None):
|
||||||
|
if from_buffer is not None:
|
||||||
|
FileBasedBuffer.__init__(self, BytesIO(), from_buffer)
|
||||||
|
else:
|
||||||
|
# Shortcut. :-)
|
||||||
|
self.file = BytesIO()
|
||||||
|
|
||||||
|
def newfile(self):
|
||||||
|
return BytesIO()
|
||||||
|
|
||||||
|
|
||||||
|
def _is_seekable(fp):
|
||||||
|
if hasattr(fp, "seekable"):
|
||||||
|
return fp.seekable()
|
||||||
|
return hasattr(fp, "seek") and hasattr(fp, "tell")
|
||||||
|
|
||||||
|
|
||||||
|
class ReadOnlyFileBasedBuffer(FileBasedBuffer):
|
||||||
|
# used as wsgi.file_wrapper
|
||||||
|
|
||||||
|
def __init__(self, file, block_size=32768):
|
||||||
|
self.file = file
|
||||||
|
self.block_size = block_size # for __iter__
|
||||||
|
|
||||||
|
def prepare(self, size=None):
|
||||||
|
if _is_seekable(self.file):
|
||||||
|
start_pos = self.file.tell()
|
||||||
|
self.file.seek(0, 2)
|
||||||
|
end_pos = self.file.tell()
|
||||||
|
self.file.seek(start_pos)
|
||||||
|
fsize = end_pos - start_pos
|
||||||
|
if size is None:
|
||||||
|
self.remain = fsize
|
||||||
|
else:
|
||||||
|
self.remain = min(fsize, size)
|
||||||
|
return self.remain
|
||||||
|
|
||||||
|
def get(self, numbytes=-1, skip=False):
|
||||||
|
# never read more than self.remain (it can be user-specified)
|
||||||
|
if numbytes == -1 or numbytes > self.remain:
|
||||||
|
numbytes = self.remain
|
||||||
|
file = self.file
|
||||||
|
if not skip:
|
||||||
|
read_pos = file.tell()
|
||||||
|
res = file.read(numbytes)
|
||||||
|
if skip:
|
||||||
|
self.remain -= len(res)
|
||||||
|
else:
|
||||||
|
file.seek(read_pos)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def __iter__(self): # called by task if self.filelike has no seek/tell
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
val = self.file.read(self.block_size)
|
||||||
|
if not val:
|
||||||
|
raise StopIteration
|
||||||
|
return val
|
||||||
|
|
||||||
|
__next__ = next # py3
|
||||||
|
|
||||||
|
def append(self, s):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class OverflowableBuffer:
|
||||||
|
"""
|
||||||
|
This buffer implementation has four stages:
|
||||||
|
- No data
|
||||||
|
- Bytes-based buffer
|
||||||
|
- BytesIO-based buffer
|
||||||
|
- Temporary file storage
|
||||||
|
The first two stages are fastest for simple transfers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
overflowed = False
|
||||||
|
buf = None
|
||||||
|
strbuf = b"" # Bytes-based buffer.
|
||||||
|
|
||||||
|
def __init__(self, overflow):
|
||||||
|
# overflow is the maximum to be stored in a StringIO buffer.
|
||||||
|
self.overflow = overflow
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is not None:
|
||||||
|
# use buf.__len__ rather than len(buf) FBO of not getting
|
||||||
|
# OverflowError on Python 2
|
||||||
|
return buf.__len__()
|
||||||
|
else:
|
||||||
|
return self.strbuf.__len__()
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
# use self.__len__ rather than len(self) FBO of not getting
|
||||||
|
# OverflowError on Python 2
|
||||||
|
return self.__len__() > 0
|
||||||
|
|
||||||
|
__bool__ = __nonzero__ # py3
|
||||||
|
|
||||||
|
def _create_buffer(self):
|
||||||
|
strbuf = self.strbuf
|
||||||
|
if len(strbuf) >= self.overflow:
|
||||||
|
self._set_large_buffer()
|
||||||
|
else:
|
||||||
|
self._set_small_buffer()
|
||||||
|
buf = self.buf
|
||||||
|
if strbuf:
|
||||||
|
buf.append(self.strbuf)
|
||||||
|
self.strbuf = b""
|
||||||
|
return buf
|
||||||
|
|
||||||
|
def _set_small_buffer(self):
|
||||||
|
self.buf = BytesIOBasedBuffer(self.buf)
|
||||||
|
self.overflowed = False
|
||||||
|
|
||||||
|
def _set_large_buffer(self):
|
||||||
|
self.buf = TempfileBasedBuffer(self.buf)
|
||||||
|
self.overflowed = True
|
||||||
|
|
||||||
|
def append(self, s):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is None:
|
||||||
|
strbuf = self.strbuf
|
||||||
|
if len(strbuf) + len(s) < STRBUF_LIMIT:
|
||||||
|
self.strbuf = strbuf + s
|
||||||
|
return
|
||||||
|
buf = self._create_buffer()
|
||||||
|
buf.append(s)
|
||||||
|
# use buf.__len__ rather than len(buf) FBO of not getting
|
||||||
|
# OverflowError on Python 2
|
||||||
|
sz = buf.__len__()
|
||||||
|
if not self.overflowed:
|
||||||
|
if sz >= self.overflow:
|
||||||
|
self._set_large_buffer()
|
||||||
|
|
||||||
|
def get(self, numbytes=-1, skip=False):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is None:
|
||||||
|
strbuf = self.strbuf
|
||||||
|
if not skip:
|
||||||
|
return strbuf
|
||||||
|
buf = self._create_buffer()
|
||||||
|
return buf.get(numbytes, skip)
|
||||||
|
|
||||||
|
def skip(self, numbytes, allow_prune=False):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is None:
|
||||||
|
if allow_prune and numbytes == len(self.strbuf):
|
||||||
|
# We could slice instead of converting to
|
||||||
|
# a buffer, but that would eat up memory in
|
||||||
|
# large transfers.
|
||||||
|
self.strbuf = b""
|
||||||
|
return
|
||||||
|
buf = self._create_buffer()
|
||||||
|
buf.skip(numbytes, allow_prune)
|
||||||
|
|
||||||
|
def prune(self):
|
||||||
|
"""
|
||||||
|
A potentially expensive operation that removes all data
|
||||||
|
already retrieved from the buffer.
|
||||||
|
"""
|
||||||
|
buf = self.buf
|
||||||
|
if buf is None:
|
||||||
|
self.strbuf = b""
|
||||||
|
return
|
||||||
|
buf.prune()
|
||||||
|
if self.overflowed:
|
||||||
|
# use buf.__len__ rather than len(buf) FBO of not getting
|
||||||
|
# OverflowError on Python 2
|
||||||
|
sz = buf.__len__()
|
||||||
|
if sz < self.overflow:
|
||||||
|
# Revert to a faster buffer.
|
||||||
|
self._set_small_buffer()
|
||||||
|
|
||||||
|
def getfile(self):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is None:
|
||||||
|
buf = self._create_buffer()
|
||||||
|
return buf.getfile()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
buf = self.buf
|
||||||
|
if buf is not None:
|
||||||
|
buf.close()
|
|
@ -0,0 +1,487 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from waitress.buffers import OverflowableBuffer, ReadOnlyFileBasedBuffer
|
||||||
|
from waitress.parser import HTTPRequestParser
|
||||||
|
from waitress.task import ErrorTask, WSGITask
|
||||||
|
from waitress.utilities import InternalServerError
|
||||||
|
|
||||||
|
from . import wasyncore
|
||||||
|
|
||||||
|
|
||||||
|
class ClientDisconnected(Exception):
|
||||||
|
""" Raised when attempting to write to a closed socket."""
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPChannel(wasyncore.dispatcher):
|
||||||
|
"""
|
||||||
|
Setting self.requests = [somerequest] prevents more requests from being
|
||||||
|
received until the out buffers have been flushed.
|
||||||
|
|
||||||
|
Setting self.requests = [] allows more requests to be received.
|
||||||
|
"""
|
||||||
|
|
||||||
|
task_class = WSGITask
|
||||||
|
error_task_class = ErrorTask
|
||||||
|
parser_class = HTTPRequestParser
|
||||||
|
|
||||||
|
# A request that has not been received yet completely is stored here
|
||||||
|
request = None
|
||||||
|
last_activity = 0 # Time of last activity
|
||||||
|
will_close = False # set to True to close the socket.
|
||||||
|
close_when_flushed = False # set to True to close the socket when flushed
|
||||||
|
sent_continue = False # used as a latch after sending 100 continue
|
||||||
|
total_outbufs_len = 0 # total bytes ready to send
|
||||||
|
current_outbuf_count = 0 # total bytes written to current outbuf
|
||||||
|
|
||||||
|
#
|
||||||
|
# ASYNCHRONOUS METHODS (including __init__)
|
||||||
|
#
|
||||||
|
|
||||||
|
def __init__(self, server, sock, addr, adj, map=None):
|
||||||
|
self.server = server
|
||||||
|
self.adj = adj
|
||||||
|
self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
|
||||||
|
self.creation_time = self.last_activity = time.time()
|
||||||
|
self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
|
||||||
|
|
||||||
|
# requests_lock used to push/pop requests and modify the request that is
|
||||||
|
# currently being created
|
||||||
|
self.requests_lock = threading.Lock()
|
||||||
|
# outbuf_lock used to access any outbuf (expected to use an RLock)
|
||||||
|
self.outbuf_lock = threading.Condition()
|
||||||
|
|
||||||
|
wasyncore.dispatcher.__init__(self, sock, map=map)
|
||||||
|
|
||||||
|
# Don't let wasyncore.dispatcher throttle self.addr on us.
|
||||||
|
self.addr = addr
|
||||||
|
self.requests = []
|
||||||
|
|
||||||
|
def check_client_disconnected(self):
|
||||||
|
"""
|
||||||
|
This method is inserted into the environment of any created task so it
|
||||||
|
may occasionally check if the client has disconnected and interrupt
|
||||||
|
execution.
|
||||||
|
"""
|
||||||
|
return not self.connected
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
# if there's data in the out buffer or we've been instructed to close
|
||||||
|
# the channel (possibly by our server maintenance logic), run
|
||||||
|
# handle_write
|
||||||
|
|
||||||
|
return self.total_outbufs_len or self.will_close or self.close_when_flushed
|
||||||
|
|
||||||
|
def handle_write(self):
|
||||||
|
# Precondition: there's data in the out buffer to be sent, or
|
||||||
|
# there's a pending will_close request
|
||||||
|
|
||||||
|
if not self.connected:
|
||||||
|
# we dont want to close the channel twice
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# try to flush any pending output
|
||||||
|
|
||||||
|
if not self.requests:
|
||||||
|
# 1. There are no running tasks, so we don't need to try to lock
|
||||||
|
# the outbuf before sending
|
||||||
|
# 2. The data in the out buffer should be sent as soon as possible
|
||||||
|
# because it's either data left over from task output
|
||||||
|
# or a 100 Continue line sent within "received".
|
||||||
|
flush = self._flush_some
|
||||||
|
elif self.total_outbufs_len >= self.adj.send_bytes:
|
||||||
|
# 1. There's a running task, so we need to try to lock
|
||||||
|
# the outbuf before sending
|
||||||
|
# 2. Only try to send if the data in the out buffer is larger
|
||||||
|
# than self.adj_bytes to avoid TCP fragmentation
|
||||||
|
flush = self._flush_some_if_lockable
|
||||||
|
else:
|
||||||
|
# 1. There's not enough data in the out buffer to bother to send
|
||||||
|
# right now.
|
||||||
|
flush = None
|
||||||
|
|
||||||
|
if flush:
|
||||||
|
try:
|
||||||
|
flush()
|
||||||
|
except OSError:
|
||||||
|
if self.adj.log_socket_errors:
|
||||||
|
self.logger.exception("Socket error")
|
||||||
|
self.will_close = True
|
||||||
|
except Exception: # pragma: nocover
|
||||||
|
self.logger.exception("Unexpected exception when flushing")
|
||||||
|
self.will_close = True
|
||||||
|
|
||||||
|
if self.close_when_flushed and not self.total_outbufs_len:
|
||||||
|
self.close_when_flushed = False
|
||||||
|
self.will_close = True
|
||||||
|
|
||||||
|
if self.will_close:
|
||||||
|
self.handle_close()
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
# We might want to read more requests. We can only do this if:
|
||||||
|
# 1. We're not already about to close the connection.
|
||||||
|
# 2. We're not waiting to flush remaining data before closing the
|
||||||
|
# connection
|
||||||
|
# 3. There are not too many tasks already queued
|
||||||
|
# 4. There's no data in the output buffer that needs to be sent
|
||||||
|
# before we potentially create a new task.
|
||||||
|
|
||||||
|
return not (
|
||||||
|
self.will_close
|
||||||
|
or self.close_when_flushed
|
||||||
|
or len(self.requests) > self.adj.channel_request_lookahead
|
||||||
|
or self.total_outbufs_len
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
try:
|
||||||
|
data = self.recv(self.adj.recv_bytes)
|
||||||
|
except OSError:
|
||||||
|
if self.adj.log_socket_errors:
|
||||||
|
self.logger.exception("Socket error")
|
||||||
|
self.handle_close()
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
if data:
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.received(data)
|
||||||
|
else:
|
||||||
|
# Client disconnected.
|
||||||
|
self.connected = False
|
||||||
|
|
||||||
|
def send_continue(self):
|
||||||
|
"""
|
||||||
|
Send a 100-Continue header to the client. This is either called from
|
||||||
|
receive (if no requests are running and the client expects it) or at
|
||||||
|
the end of service (if no more requests are queued and a request has
|
||||||
|
been read partially that expects it).
|
||||||
|
"""
|
||||||
|
self.request.expect_continue = False
|
||||||
|
outbuf_payload = b"HTTP/1.1 100 Continue\r\n\r\n"
|
||||||
|
num_bytes = len(outbuf_payload)
|
||||||
|
with self.outbuf_lock:
|
||||||
|
self.outbufs[-1].append(outbuf_payload)
|
||||||
|
self.current_outbuf_count += num_bytes
|
||||||
|
self.total_outbufs_len += num_bytes
|
||||||
|
self.sent_continue = True
|
||||||
|
self._flush_some()
|
||||||
|
self.request.completed = False
|
||||||
|
|
||||||
|
def received(self, data):
|
||||||
|
"""
|
||||||
|
Receives input asynchronously and assigns one or more requests to the
|
||||||
|
channel.
|
||||||
|
"""
|
||||||
|
if not data:
|
||||||
|
return False
|
||||||
|
|
||||||
|
with self.requests_lock:
|
||||||
|
while data:
|
||||||
|
if self.request is None:
|
||||||
|
self.request = self.parser_class(self.adj)
|
||||||
|
n = self.request.received(data)
|
||||||
|
|
||||||
|
# if there are requests queued, we can not send the continue
|
||||||
|
# header yet since the responses need to be kept in order
|
||||||
|
if (
|
||||||
|
self.request.expect_continue
|
||||||
|
and self.request.headers_finished
|
||||||
|
and not self.requests
|
||||||
|
and not self.sent_continue
|
||||||
|
):
|
||||||
|
self.send_continue()
|
||||||
|
|
||||||
|
if self.request.completed:
|
||||||
|
# The request (with the body) is ready to use.
|
||||||
|
self.sent_continue = False
|
||||||
|
|
||||||
|
if not self.request.empty:
|
||||||
|
self.requests.append(self.request)
|
||||||
|
if len(self.requests) == 1:
|
||||||
|
# self.requests was empty before so the main thread
|
||||||
|
# is in charge of starting the task. Otherwise,
|
||||||
|
# service() will add a new task after each request
|
||||||
|
# has been processed
|
||||||
|
self.server.add_task(self)
|
||||||
|
self.request = None
|
||||||
|
|
||||||
|
if n >= len(data):
|
||||||
|
break
|
||||||
|
data = data[n:]
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _flush_some_if_lockable(self):
|
||||||
|
# Since our task may be appending to the outbuf, we try to acquire
|
||||||
|
# the lock, but we don't block if we can't.
|
||||||
|
|
||||||
|
if self.outbuf_lock.acquire(False):
|
||||||
|
try:
|
||||||
|
self._flush_some()
|
||||||
|
|
||||||
|
if self.total_outbufs_len < self.adj.outbuf_high_watermark:
|
||||||
|
self.outbuf_lock.notify()
|
||||||
|
finally:
|
||||||
|
self.outbuf_lock.release()
|
||||||
|
|
||||||
|
def _flush_some(self):
|
||||||
|
# Send as much data as possible to our client
|
||||||
|
|
||||||
|
sent = 0
|
||||||
|
dobreak = False
|
||||||
|
|
||||||
|
while True:
|
||||||
|
outbuf = self.outbufs[0]
|
||||||
|
# use outbuf.__len__ rather than len(outbuf) FBO of not getting
|
||||||
|
# OverflowError on 32-bit Python
|
||||||
|
outbuflen = outbuf.__len__()
|
||||||
|
|
||||||
|
while outbuflen > 0:
|
||||||
|
chunk = outbuf.get(self.sendbuf_len)
|
||||||
|
num_sent = self.send(chunk)
|
||||||
|
|
||||||
|
if num_sent:
|
||||||
|
outbuf.skip(num_sent, True)
|
||||||
|
outbuflen -= num_sent
|
||||||
|
sent += num_sent
|
||||||
|
self.total_outbufs_len -= num_sent
|
||||||
|
else:
|
||||||
|
# failed to write anything, break out entirely
|
||||||
|
dobreak = True
|
||||||
|
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# self.outbufs[-1] must always be a writable outbuf
|
||||||
|
|
||||||
|
if len(self.outbufs) > 1:
|
||||||
|
toclose = self.outbufs.pop(0)
|
||||||
|
try:
|
||||||
|
toclose.close()
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception("Unexpected error when closing an outbuf")
|
||||||
|
else:
|
||||||
|
# caught up, done flushing for now
|
||||||
|
dobreak = True
|
||||||
|
|
||||||
|
if dobreak:
|
||||||
|
break
|
||||||
|
|
||||||
|
if sent:
|
||||||
|
self.last_activity = time.time()
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def handle_close(self):
|
||||||
|
with self.outbuf_lock:
|
||||||
|
for outbuf in self.outbufs:
|
||||||
|
try:
|
||||||
|
outbuf.close()
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception(
|
||||||
|
"Unknown exception while trying to close outbuf"
|
||||||
|
)
|
||||||
|
self.total_outbufs_len = 0
|
||||||
|
self.connected = False
|
||||||
|
self.outbuf_lock.notify()
|
||||||
|
wasyncore.dispatcher.close(self)
|
||||||
|
|
||||||
|
def add_channel(self, map=None):
|
||||||
|
"""See wasyncore.dispatcher
|
||||||
|
|
||||||
|
This hook keeps track of opened channels.
|
||||||
|
"""
|
||||||
|
wasyncore.dispatcher.add_channel(self, map)
|
||||||
|
self.server.active_channels[self._fileno] = self
|
||||||
|
|
||||||
|
def del_channel(self, map=None):
|
||||||
|
"""See wasyncore.dispatcher
|
||||||
|
|
||||||
|
This hook keeps track of closed channels.
|
||||||
|
"""
|
||||||
|
fd = self._fileno # next line sets this to None
|
||||||
|
wasyncore.dispatcher.del_channel(self, map)
|
||||||
|
ac = self.server.active_channels
|
||||||
|
|
||||||
|
if fd in ac:
|
||||||
|
del ac[fd]
|
||||||
|
|
||||||
|
#
|
||||||
|
# SYNCHRONOUS METHODS
|
||||||
|
#
|
||||||
|
|
||||||
|
def write_soon(self, data):
|
||||||
|
if not self.connected:
|
||||||
|
# if the socket is closed then interrupt the task so that it
|
||||||
|
# can cleanup possibly before the app_iter is exhausted
|
||||||
|
raise ClientDisconnected
|
||||||
|
|
||||||
|
if data:
|
||||||
|
# the async mainloop might be popping data off outbuf; we can
|
||||||
|
# block here waiting for it because we're in a task thread
|
||||||
|
with self.outbuf_lock:
|
||||||
|
self._flush_outbufs_below_high_watermark()
|
||||||
|
|
||||||
|
if not self.connected:
|
||||||
|
raise ClientDisconnected
|
||||||
|
num_bytes = len(data)
|
||||||
|
|
||||||
|
if data.__class__ is ReadOnlyFileBasedBuffer:
|
||||||
|
# they used wsgi.file_wrapper
|
||||||
|
self.outbufs.append(data)
|
||||||
|
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
|
||||||
|
self.outbufs.append(nextbuf)
|
||||||
|
self.current_outbuf_count = 0
|
||||||
|
else:
|
||||||
|
if self.current_outbuf_count >= self.adj.outbuf_high_watermark:
|
||||||
|
# rotate to a new buffer if the current buffer has hit
|
||||||
|
# the watermark to avoid it growing unbounded
|
||||||
|
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
|
||||||
|
self.outbufs.append(nextbuf)
|
||||||
|
self.current_outbuf_count = 0
|
||||||
|
self.outbufs[-1].append(data)
|
||||||
|
self.current_outbuf_count += num_bytes
|
||||||
|
self.total_outbufs_len += num_bytes
|
||||||
|
|
||||||
|
if self.total_outbufs_len >= self.adj.send_bytes:
|
||||||
|
self.server.pull_trigger()
|
||||||
|
|
||||||
|
return num_bytes
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _flush_outbufs_below_high_watermark(self):
|
||||||
|
# check first to avoid locking if possible
|
||||||
|
|
||||||
|
if self.total_outbufs_len > self.adj.outbuf_high_watermark:
|
||||||
|
with self.outbuf_lock:
|
||||||
|
while (
|
||||||
|
self.connected
|
||||||
|
and self.total_outbufs_len > self.adj.outbuf_high_watermark
|
||||||
|
):
|
||||||
|
self.server.pull_trigger()
|
||||||
|
self.outbuf_lock.wait()
|
||||||
|
|
||||||
|
def service(self):
|
||||||
|
"""Execute one request. If there are more, we add another task to the
|
||||||
|
server at the end."""
|
||||||
|
|
||||||
|
request = self.requests[0]
|
||||||
|
|
||||||
|
if request.error:
|
||||||
|
task = self.error_task_class(self, request)
|
||||||
|
else:
|
||||||
|
task = self.task_class(self, request)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self.connected:
|
||||||
|
task.service()
|
||||||
|
else:
|
||||||
|
task.close_on_finish = True
|
||||||
|
except ClientDisconnected:
|
||||||
|
self.logger.info("Client disconnected while serving %s" % task.request.path)
|
||||||
|
task.close_on_finish = True
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception("Exception while serving %s" % task.request.path)
|
||||||
|
|
||||||
|
if not task.wrote_header:
|
||||||
|
if self.adj.expose_tracebacks:
|
||||||
|
body = traceback.format_exc()
|
||||||
|
else:
|
||||||
|
body = "The server encountered an unexpected internal server error"
|
||||||
|
req_version = request.version
|
||||||
|
req_headers = request.headers
|
||||||
|
err_request = self.parser_class(self.adj)
|
||||||
|
err_request.error = InternalServerError(body)
|
||||||
|
# copy some original request attributes to fulfill
|
||||||
|
# HTTP 1.1 requirements
|
||||||
|
err_request.version = req_version
|
||||||
|
try:
|
||||||
|
err_request.headers["CONNECTION"] = req_headers["CONNECTION"]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
task = self.error_task_class(self, err_request)
|
||||||
|
try:
|
||||||
|
task.service() # must not fail
|
||||||
|
except ClientDisconnected:
|
||||||
|
task.close_on_finish = True
|
||||||
|
else:
|
||||||
|
task.close_on_finish = True
|
||||||
|
|
||||||
|
if task.close_on_finish:
|
||||||
|
with self.requests_lock:
|
||||||
|
self.close_when_flushed = True
|
||||||
|
|
||||||
|
for request in self.requests:
|
||||||
|
request.close()
|
||||||
|
self.requests = []
|
||||||
|
else:
|
||||||
|
# before processing a new request, ensure there is not too
|
||||||
|
# much data in the outbufs waiting to be flushed
|
||||||
|
# NB: currently readable() returns False while we are
|
||||||
|
# flushing data so we know no new requests will come in
|
||||||
|
# that we need to account for, otherwise it'd be better
|
||||||
|
# to do this check at the start of the request instead of
|
||||||
|
# at the end to account for consecutive service() calls
|
||||||
|
|
||||||
|
if len(self.requests) > 1:
|
||||||
|
self._flush_outbufs_below_high_watermark()
|
||||||
|
|
||||||
|
# this is a little hacky but basically it's forcing the
|
||||||
|
# next request to create a new outbuf to avoid sharing
|
||||||
|
# outbufs across requests which can cause outbufs to
|
||||||
|
# not be deallocated regularly when a connection is open
|
||||||
|
# for a long time
|
||||||
|
|
||||||
|
if self.current_outbuf_count > 0:
|
||||||
|
self.current_outbuf_count = self.adj.outbuf_high_watermark
|
||||||
|
|
||||||
|
request.close()
|
||||||
|
|
||||||
|
# Add new task to process the next request
|
||||||
|
with self.requests_lock:
|
||||||
|
self.requests.pop(0)
|
||||||
|
if self.connected and self.requests:
|
||||||
|
self.server.add_task(self)
|
||||||
|
elif (
|
||||||
|
self.connected
|
||||||
|
and self.request is not None
|
||||||
|
and self.request.expect_continue
|
||||||
|
and self.request.headers_finished
|
||||||
|
and not self.sent_continue
|
||||||
|
):
|
||||||
|
# A request waits for a signal to continue, but we could
|
||||||
|
# not send it until now because requests were being
|
||||||
|
# processed and the output needs to be kept in order
|
||||||
|
self.send_continue()
|
||||||
|
|
||||||
|
if self.connected:
|
||||||
|
self.server.pull_trigger()
|
||||||
|
|
||||||
|
self.last_activity = time.time()
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
""" Cancels all pending / active requests """
|
||||||
|
self.will_close = True
|
||||||
|
self.connected = False
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.requests = []
|
|
@ -0,0 +1,29 @@
|
||||||
|
import platform
|
||||||
|
|
||||||
|
# Fix for issue reported in https://github.com/Pylons/waitress/issues/138,
|
||||||
|
# Python on Windows may not define IPPROTO_IPV6 in socket.
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# True if we are running on Windows
|
||||||
|
WIN = platform.system() == "Windows"
|
||||||
|
|
||||||
|
MAXINT = sys.maxsize
|
||||||
|
HAS_IPV6 = socket.has_ipv6
|
||||||
|
|
||||||
|
if hasattr(socket, "IPPROTO_IPV6") and hasattr(socket, "IPV6_V6ONLY"):
|
||||||
|
IPPROTO_IPV6 = socket.IPPROTO_IPV6
|
||||||
|
IPV6_V6ONLY = socket.IPV6_V6ONLY
|
||||||
|
else: # pragma: no cover
|
||||||
|
if WIN:
|
||||||
|
IPPROTO_IPV6 = 41
|
||||||
|
IPV6_V6ONLY = 27
|
||||||
|
else:
|
||||||
|
warnings.warn(
|
||||||
|
"OS does not support required IPv6 socket flags. This is requirement "
|
||||||
|
"for Waitress. Please open an issue at https://github.com/Pylons/waitress. "
|
||||||
|
"IPv6 support has been disabled.",
|
||||||
|
RuntimeWarning,
|
||||||
|
)
|
||||||
|
HAS_IPV6 = False
|
|
@ -0,0 +1,439 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
"""HTTP Request Parser
|
||||||
|
|
||||||
|
This server uses asyncore to accept connections and do initial
|
||||||
|
processing but threads to do work.
|
||||||
|
"""
|
||||||
|
from io import BytesIO
|
||||||
|
import re
|
||||||
|
from urllib import parse
|
||||||
|
from urllib.parse import unquote_to_bytes
|
||||||
|
|
||||||
|
from waitress.buffers import OverflowableBuffer
|
||||||
|
from waitress.receiver import ChunkedReceiver, FixedStreamReceiver
|
||||||
|
from waitress.utilities import (
|
||||||
|
BadRequest,
|
||||||
|
RequestEntityTooLarge,
|
||||||
|
RequestHeaderFieldsTooLarge,
|
||||||
|
ServerNotImplemented,
|
||||||
|
find_double_newline,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .rfc7230 import HEADER_FIELD
|
||||||
|
|
||||||
|
|
||||||
|
def unquote_bytes_to_wsgi(bytestring):
|
||||||
|
return unquote_to_bytes(bytestring).decode("latin-1")
|
||||||
|
|
||||||
|
|
||||||
|
class ParsingError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TransferEncodingNotImplemented(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPRequestParser:
|
||||||
|
"""A structure that collects the HTTP request.
|
||||||
|
|
||||||
|
Once the stream is completed, the instance is passed to
|
||||||
|
a server task constructor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
completed = False # Set once request is completed.
|
||||||
|
empty = False # Set if no request was made.
|
||||||
|
expect_continue = False # client sent "Expect: 100-continue" header
|
||||||
|
headers_finished = False # True when headers have been read
|
||||||
|
header_plus = b""
|
||||||
|
chunked = False
|
||||||
|
content_length = 0
|
||||||
|
header_bytes_received = 0
|
||||||
|
body_bytes_received = 0
|
||||||
|
body_rcv = None
|
||||||
|
version = "1.0"
|
||||||
|
error = None
|
||||||
|
connection_close = False
|
||||||
|
|
||||||
|
# Other attributes: first_line, header, headers, command, uri, version,
|
||||||
|
# path, query, fragment
|
||||||
|
|
||||||
|
def __init__(self, adj):
|
||||||
|
"""
|
||||||
|
adj is an Adjustments object.
|
||||||
|
"""
|
||||||
|
# headers is a mapping containing keys translated to uppercase
|
||||||
|
# with dashes turned into underscores.
|
||||||
|
self.headers = {}
|
||||||
|
self.adj = adj
|
||||||
|
|
||||||
|
def received(self, data):
|
||||||
|
"""
|
||||||
|
Receives the HTTP stream for one request. Returns the number of
|
||||||
|
bytes consumed. Sets the completed flag once both the header and the
|
||||||
|
body have been received.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.completed:
|
||||||
|
return 0 # Can't consume any more.
|
||||||
|
|
||||||
|
datalen = len(data)
|
||||||
|
br = self.body_rcv
|
||||||
|
|
||||||
|
if br is None:
|
||||||
|
# In header.
|
||||||
|
max_header = self.adj.max_request_header_size
|
||||||
|
|
||||||
|
s = self.header_plus + data
|
||||||
|
index = find_double_newline(s)
|
||||||
|
consumed = 0
|
||||||
|
|
||||||
|
if index >= 0:
|
||||||
|
# If the headers have ended, and we also have part of the body
|
||||||
|
# message in data we still want to validate we aren't going
|
||||||
|
# over our limit for received headers.
|
||||||
|
self.header_bytes_received += index
|
||||||
|
consumed = datalen - (len(s) - index)
|
||||||
|
else:
|
||||||
|
self.header_bytes_received += datalen
|
||||||
|
consumed = datalen
|
||||||
|
|
||||||
|
# If the first line + headers is over the max length, we return a
|
||||||
|
# RequestHeaderFieldsTooLarge error rather than continuing to
|
||||||
|
# attempt to parse the headers.
|
||||||
|
|
||||||
|
if self.header_bytes_received >= max_header:
|
||||||
|
self.parse_header(b"GET / HTTP/1.0\r\n")
|
||||||
|
self.error = RequestHeaderFieldsTooLarge(
|
||||||
|
"exceeds max_header of %s" % max_header
|
||||||
|
)
|
||||||
|
self.completed = True
|
||||||
|
|
||||||
|
return consumed
|
||||||
|
|
||||||
|
if index >= 0:
|
||||||
|
# Header finished.
|
||||||
|
header_plus = s[:index]
|
||||||
|
|
||||||
|
# Remove preceeding blank lines. This is suggested by
|
||||||
|
# https://tools.ietf.org/html/rfc7230#section-3.5 to support
|
||||||
|
# clients sending an extra CR LF after another request when
|
||||||
|
# using HTTP pipelining
|
||||||
|
header_plus = header_plus.lstrip()
|
||||||
|
|
||||||
|
if not header_plus:
|
||||||
|
self.empty = True
|
||||||
|
self.completed = True
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
self.parse_header(header_plus)
|
||||||
|
except ParsingError as e:
|
||||||
|
self.error = BadRequest(e.args[0])
|
||||||
|
self.completed = True
|
||||||
|
except TransferEncodingNotImplemented as e:
|
||||||
|
self.error = ServerNotImplemented(e.args[0])
|
||||||
|
self.completed = True
|
||||||
|
else:
|
||||||
|
if self.body_rcv is None:
|
||||||
|
# no content-length header and not a t-e: chunked
|
||||||
|
# request
|
||||||
|
self.completed = True
|
||||||
|
|
||||||
|
if self.content_length > 0:
|
||||||
|
max_body = self.adj.max_request_body_size
|
||||||
|
# we won't accept this request if the content-length
|
||||||
|
# is too large
|
||||||
|
|
||||||
|
if self.content_length >= max_body:
|
||||||
|
self.error = RequestEntityTooLarge(
|
||||||
|
"exceeds max_body of %s" % max_body
|
||||||
|
)
|
||||||
|
self.completed = True
|
||||||
|
self.headers_finished = True
|
||||||
|
|
||||||
|
return consumed
|
||||||
|
|
||||||
|
# Header not finished yet.
|
||||||
|
self.header_plus = s
|
||||||
|
|
||||||
|
return datalen
|
||||||
|
else:
|
||||||
|
# In body.
|
||||||
|
consumed = br.received(data)
|
||||||
|
self.body_bytes_received += consumed
|
||||||
|
max_body = self.adj.max_request_body_size
|
||||||
|
|
||||||
|
if self.body_bytes_received >= max_body:
|
||||||
|
# this will only be raised during t-e: chunked requests
|
||||||
|
self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body)
|
||||||
|
self.completed = True
|
||||||
|
elif br.error:
|
||||||
|
# garbage in chunked encoding input probably
|
||||||
|
self.error = br.error
|
||||||
|
self.completed = True
|
||||||
|
elif br.completed:
|
||||||
|
# The request (with the body) is ready to use.
|
||||||
|
self.completed = True
|
||||||
|
|
||||||
|
if self.chunked:
|
||||||
|
# We've converted the chunked transfer encoding request
|
||||||
|
# body into a normal request body, so we know its content
|
||||||
|
# length; set the header here. We already popped the
|
||||||
|
# TRANSFER_ENCODING header in parse_header, so this will
|
||||||
|
# appear to the client to be an entirely non-chunked HTTP
|
||||||
|
# request with a valid content-length.
|
||||||
|
self.headers["CONTENT_LENGTH"] = str(br.__len__())
|
||||||
|
|
||||||
|
return consumed
|
||||||
|
|
||||||
|
def parse_header(self, header_plus):
|
||||||
|
"""
|
||||||
|
Parses the header_plus block of text (the headers plus the
|
||||||
|
first line of the request).
|
||||||
|
"""
|
||||||
|
index = header_plus.find(b"\r\n")
|
||||||
|
|
||||||
|
if index >= 0:
|
||||||
|
first_line = header_plus[:index].rstrip()
|
||||||
|
header = header_plus[index + 2 :]
|
||||||
|
else:
|
||||||
|
raise ParsingError("HTTP message header invalid")
|
||||||
|
|
||||||
|
if b"\r" in first_line or b"\n" in first_line:
|
||||||
|
raise ParsingError("Bare CR or LF found in HTTP message")
|
||||||
|
|
||||||
|
self.first_line = first_line # for testing
|
||||||
|
|
||||||
|
lines = get_header_lines(header)
|
||||||
|
|
||||||
|
headers = self.headers
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
header = HEADER_FIELD.match(line)
|
||||||
|
|
||||||
|
if not header:
|
||||||
|
raise ParsingError("Invalid header")
|
||||||
|
|
||||||
|
key, value = header.group("name", "value")
|
||||||
|
|
||||||
|
if b"_" in key:
|
||||||
|
# TODO(xistence): Should we drop this request instead?
|
||||||
|
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Only strip off whitespace that is considered valid whitespace by
|
||||||
|
# RFC7230, don't strip the rest
|
||||||
|
value = value.strip(b" \t")
|
||||||
|
key1 = key.upper().replace(b"-", b"_").decode("latin-1")
|
||||||
|
# If a header already exists, we append subsequent values
|
||||||
|
# separated by a comma. Applications already need to handle
|
||||||
|
# the comma separated values, as HTTP front ends might do
|
||||||
|
# the concatenation for you (behavior specified in RFC2616).
|
||||||
|
try:
|
||||||
|
headers[key1] += (b", " + value).decode("latin-1")
|
||||||
|
except KeyError:
|
||||||
|
headers[key1] = value.decode("latin-1")
|
||||||
|
|
||||||
|
# command, uri, version will be bytes
|
||||||
|
command, uri, version = crack_first_line(first_line)
|
||||||
|
version = version.decode("latin-1")
|
||||||
|
command = command.decode("latin-1")
|
||||||
|
self.command = command
|
||||||
|
self.version = version
|
||||||
|
(
|
||||||
|
self.proxy_scheme,
|
||||||
|
self.proxy_netloc,
|
||||||
|
self.path,
|
||||||
|
self.query,
|
||||||
|
self.fragment,
|
||||||
|
) = split_uri(uri)
|
||||||
|
self.url_scheme = self.adj.url_scheme
|
||||||
|
connection = headers.get("CONNECTION", "")
|
||||||
|
|
||||||
|
if version == "1.0":
|
||||||
|
if connection.lower() != "keep-alive":
|
||||||
|
self.connection_close = True
|
||||||
|
|
||||||
|
if version == "1.1":
|
||||||
|
# since the server buffers data from chunked transfers and clients
|
||||||
|
# never need to deal with chunked requests, downstream clients
|
||||||
|
# should not see the HTTP_TRANSFER_ENCODING header; we pop it
|
||||||
|
# here
|
||||||
|
te = headers.pop("TRANSFER_ENCODING", "")
|
||||||
|
|
||||||
|
# NB: We can not just call bare strip() here because it will also
|
||||||
|
# remove other non-printable characters that we explicitly do not
|
||||||
|
# want removed so that if someone attempts to smuggle a request
|
||||||
|
# with these characters we don't fall prey to it.
|
||||||
|
#
|
||||||
|
# For example \x85 is stripped by default, but it is not considered
|
||||||
|
# valid whitespace to be stripped by RFC7230.
|
||||||
|
encodings = [
|
||||||
|
encoding.strip(" \t").lower() for encoding in te.split(",") if encoding
|
||||||
|
]
|
||||||
|
|
||||||
|
for encoding in encodings:
|
||||||
|
# Out of the transfer-codings listed in
|
||||||
|
# https://tools.ietf.org/html/rfc7230#section-4 we only support
|
||||||
|
# chunked at this time.
|
||||||
|
|
||||||
|
# Note: the identity transfer-coding was removed in RFC7230:
|
||||||
|
# https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus
|
||||||
|
# not supported
|
||||||
|
|
||||||
|
if encoding not in {"chunked"}:
|
||||||
|
raise TransferEncodingNotImplemented(
|
||||||
|
"Transfer-Encoding requested is not supported."
|
||||||
|
)
|
||||||
|
|
||||||
|
if encodings and encodings[-1] == "chunked":
|
||||||
|
self.chunked = True
|
||||||
|
buf = OverflowableBuffer(self.adj.inbuf_overflow)
|
||||||
|
self.body_rcv = ChunkedReceiver(buf)
|
||||||
|
elif encodings: # pragma: nocover
|
||||||
|
raise TransferEncodingNotImplemented(
|
||||||
|
"Transfer-Encoding requested is not supported."
|
||||||
|
)
|
||||||
|
|
||||||
|
expect = headers.get("EXPECT", "").lower()
|
||||||
|
self.expect_continue = expect == "100-continue"
|
||||||
|
|
||||||
|
if connection.lower() == "close":
|
||||||
|
self.connection_close = True
|
||||||
|
|
||||||
|
if not self.chunked:
|
||||||
|
try:
|
||||||
|
cl = int(headers.get("CONTENT_LENGTH", 0))
|
||||||
|
except ValueError:
|
||||||
|
raise ParsingError("Content-Length is invalid")
|
||||||
|
|
||||||
|
self.content_length = cl
|
||||||
|
|
||||||
|
if cl > 0:
|
||||||
|
buf = OverflowableBuffer(self.adj.inbuf_overflow)
|
||||||
|
self.body_rcv = FixedStreamReceiver(cl, buf)
|
||||||
|
|
||||||
|
def get_body_stream(self):
|
||||||
|
body_rcv = self.body_rcv
|
||||||
|
|
||||||
|
if body_rcv is not None:
|
||||||
|
return body_rcv.getfile()
|
||||||
|
else:
|
||||||
|
return BytesIO()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
body_rcv = self.body_rcv
|
||||||
|
|
||||||
|
if body_rcv is not None:
|
||||||
|
body_rcv.getbuf().close()
|
||||||
|
|
||||||
|
|
||||||
|
def split_uri(uri):
|
||||||
|
# urlsplit handles byte input by returning bytes on py3, so
|
||||||
|
# scheme, netloc, path, query, and fragment are bytes
|
||||||
|
|
||||||
|
scheme = netloc = path = query = fragment = b""
|
||||||
|
|
||||||
|
# urlsplit below will treat this as a scheme-less netloc, thereby losing
|
||||||
|
# the original intent of the request. Here we shamelessly stole 4 lines of
|
||||||
|
# code from the CPython stdlib to parse out the fragment and query but
|
||||||
|
# leave the path alone. See
|
||||||
|
# https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468
|
||||||
|
# and https://github.com/Pylons/waitress/issues/260
|
||||||
|
|
||||||
|
if uri[:2] == b"//":
|
||||||
|
path = uri
|
||||||
|
|
||||||
|
if b"#" in path:
|
||||||
|
path, fragment = path.split(b"#", 1)
|
||||||
|
|
||||||
|
if b"?" in path:
|
||||||
|
path, query = path.split(b"?", 1)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
scheme, netloc, path, query, fragment = parse.urlsplit(uri)
|
||||||
|
except UnicodeError:
|
||||||
|
raise ParsingError("Bad URI")
|
||||||
|
|
||||||
|
return (
|
||||||
|
scheme.decode("latin-1"),
|
||||||
|
netloc.decode("latin-1"),
|
||||||
|
unquote_bytes_to_wsgi(path),
|
||||||
|
query.decode("latin-1"),
|
||||||
|
fragment.decode("latin-1"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_header_lines(header):
|
||||||
|
"""
|
||||||
|
Splits the header into lines, putting multi-line headers together.
|
||||||
|
"""
|
||||||
|
r = []
|
||||||
|
lines = header.split(b"\r\n")
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if b"\r" in line or b"\n" in line:
|
||||||
|
raise ParsingError(
|
||||||
|
'Bare CR or LF found in header line "%s"' % str(line, "latin-1")
|
||||||
|
)
|
||||||
|
|
||||||
|
if line.startswith((b" ", b"\t")):
|
||||||
|
if not r:
|
||||||
|
# https://corte.si/posts/code/pathod/pythonservers/index.html
|
||||||
|
raise ParsingError('Malformed header line "%s"' % str(line, "latin-1"))
|
||||||
|
r[-1] += line
|
||||||
|
else:
|
||||||
|
r.append(line)
|
||||||
|
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
first_line_re = re.compile(
|
||||||
|
b"([^ ]+) "
|
||||||
|
b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)"
|
||||||
|
b"(( HTTP/([0-9.]+))$|$)"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def crack_first_line(line):
|
||||||
|
m = first_line_re.match(line)
|
||||||
|
|
||||||
|
if m is not None and m.end() == len(line):
|
||||||
|
if m.group(3):
|
||||||
|
version = m.group(5)
|
||||||
|
else:
|
||||||
|
version = b""
|
||||||
|
method = m.group(1)
|
||||||
|
|
||||||
|
# the request methods that are currently defined are all uppercase:
|
||||||
|
# https://www.iana.org/assignments/http-methods/http-methods.xhtml and
|
||||||
|
# the request method is case sensitive according to
|
||||||
|
# https://tools.ietf.org/html/rfc7231#section-4.1
|
||||||
|
|
||||||
|
# By disallowing anything but uppercase methods we save poor
|
||||||
|
# unsuspecting souls from sending lowercase HTTP methods to waitress
|
||||||
|
# and having the request complete, while servers like nginx drop the
|
||||||
|
# request onto the floor.
|
||||||
|
|
||||||
|
if method != method.upper():
|
||||||
|
raise ParsingError('Malformed HTTP method "%s"' % str(method, "latin-1"))
|
||||||
|
uri = m.group(2)
|
||||||
|
|
||||||
|
return method, uri, version
|
||||||
|
else:
|
||||||
|
return b"", b"", b""
|
|
@ -0,0 +1,330 @@
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from .utilities import BadRequest, logger, undquote
|
||||||
|
|
||||||
|
PROXY_HEADERS = frozenset(
|
||||||
|
{
|
||||||
|
"X_FORWARDED_FOR",
|
||||||
|
"X_FORWARDED_HOST",
|
||||||
|
"X_FORWARDED_PROTO",
|
||||||
|
"X_FORWARDED_PORT",
|
||||||
|
"X_FORWARDED_BY",
|
||||||
|
"FORWARDED",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
Forwarded = namedtuple("Forwarded", ["by", "for_", "host", "proto"])
|
||||||
|
|
||||||
|
|
||||||
|
class MalformedProxyHeader(Exception):
|
||||||
|
def __init__(self, header, reason, value):
|
||||||
|
self.header = header
|
||||||
|
self.reason = reason
|
||||||
|
self.value = value
|
||||||
|
super().__init__(header, reason, value)
|
||||||
|
|
||||||
|
|
||||||
|
def proxy_headers_middleware(
|
||||||
|
app,
|
||||||
|
trusted_proxy=None,
|
||||||
|
trusted_proxy_count=1,
|
||||||
|
trusted_proxy_headers=None,
|
||||||
|
clear_untrusted=True,
|
||||||
|
log_untrusted=False,
|
||||||
|
logger=logger,
|
||||||
|
):
|
||||||
|
def translate_proxy_headers(environ, start_response):
|
||||||
|
untrusted_headers = PROXY_HEADERS
|
||||||
|
remote_peer = environ["REMOTE_ADDR"]
|
||||||
|
if trusted_proxy == "*" or remote_peer == trusted_proxy:
|
||||||
|
try:
|
||||||
|
untrusted_headers = parse_proxy_headers(
|
||||||
|
environ,
|
||||||
|
trusted_proxy_count=trusted_proxy_count,
|
||||||
|
trusted_proxy_headers=trusted_proxy_headers,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
except MalformedProxyHeader as ex:
|
||||||
|
logger.warning(
|
||||||
|
'Malformed proxy header "%s" from "%s": %s value: %s',
|
||||||
|
ex.header,
|
||||||
|
remote_peer,
|
||||||
|
ex.reason,
|
||||||
|
ex.value,
|
||||||
|
)
|
||||||
|
error = BadRequest('Header "{}" malformed.'.format(ex.header))
|
||||||
|
return error.wsgi_response(environ, start_response)
|
||||||
|
|
||||||
|
# Clear out the untrusted proxy headers
|
||||||
|
if clear_untrusted:
|
||||||
|
clear_untrusted_headers(
|
||||||
|
environ, untrusted_headers, log_warning=log_untrusted, logger=logger
|
||||||
|
)
|
||||||
|
|
||||||
|
return app(environ, start_response)
|
||||||
|
|
||||||
|
return translate_proxy_headers
|
||||||
|
|
||||||
|
|
||||||
|
def parse_proxy_headers(
|
||||||
|
environ, trusted_proxy_count, trusted_proxy_headers, logger=logger
|
||||||
|
):
|
||||||
|
if trusted_proxy_headers is None:
|
||||||
|
trusted_proxy_headers = set()
|
||||||
|
|
||||||
|
forwarded_for = []
|
||||||
|
forwarded_host = forwarded_proto = forwarded_port = forwarded = ""
|
||||||
|
client_addr = None
|
||||||
|
untrusted_headers = set(PROXY_HEADERS)
|
||||||
|
|
||||||
|
def raise_for_multiple_values():
|
||||||
|
raise ValueError("Unspecified behavior for multiple values found in header")
|
||||||
|
|
||||||
|
if "x-forwarded-for" in trusted_proxy_headers and "HTTP_X_FORWARDED_FOR" in environ:
|
||||||
|
try:
|
||||||
|
forwarded_for = []
|
||||||
|
|
||||||
|
for forward_hop in environ["HTTP_X_FORWARDED_FOR"].split(","):
|
||||||
|
forward_hop = forward_hop.strip()
|
||||||
|
forward_hop = undquote(forward_hop)
|
||||||
|
|
||||||
|
# Make sure that all IPv6 addresses are surrounded by brackets,
|
||||||
|
# this is assuming that the IPv6 representation here does not
|
||||||
|
# include a port number.
|
||||||
|
|
||||||
|
if "." not in forward_hop and (
|
||||||
|
":" in forward_hop and forward_hop[-1] != "]"
|
||||||
|
):
|
||||||
|
forwarded_for.append("[{}]".format(forward_hop))
|
||||||
|
else:
|
||||||
|
forwarded_for.append(forward_hop)
|
||||||
|
|
||||||
|
forwarded_for = forwarded_for[-trusted_proxy_count:]
|
||||||
|
client_addr = forwarded_for[0]
|
||||||
|
|
||||||
|
untrusted_headers.remove("X_FORWARDED_FOR")
|
||||||
|
except Exception as ex:
|
||||||
|
raise MalformedProxyHeader(
|
||||||
|
"X-Forwarded-For", str(ex), environ["HTTP_X_FORWARDED_FOR"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
"x-forwarded-host" in trusted_proxy_headers
|
||||||
|
and "HTTP_X_FORWARDED_HOST" in environ
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
forwarded_host_multiple = []
|
||||||
|
|
||||||
|
for forward_host in environ["HTTP_X_FORWARDED_HOST"].split(","):
|
||||||
|
forward_host = forward_host.strip()
|
||||||
|
forward_host = undquote(forward_host)
|
||||||
|
forwarded_host_multiple.append(forward_host)
|
||||||
|
|
||||||
|
forwarded_host_multiple = forwarded_host_multiple[-trusted_proxy_count:]
|
||||||
|
forwarded_host = forwarded_host_multiple[0]
|
||||||
|
|
||||||
|
untrusted_headers.remove("X_FORWARDED_HOST")
|
||||||
|
except Exception as ex:
|
||||||
|
raise MalformedProxyHeader(
|
||||||
|
"X-Forwarded-Host", str(ex), environ["HTTP_X_FORWARDED_HOST"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if "x-forwarded-proto" in trusted_proxy_headers:
|
||||||
|
try:
|
||||||
|
forwarded_proto = undquote(environ.get("HTTP_X_FORWARDED_PROTO", ""))
|
||||||
|
if "," in forwarded_proto:
|
||||||
|
raise_for_multiple_values()
|
||||||
|
untrusted_headers.remove("X_FORWARDED_PROTO")
|
||||||
|
except Exception as ex:
|
||||||
|
raise MalformedProxyHeader(
|
||||||
|
"X-Forwarded-Proto", str(ex), environ["HTTP_X_FORWARDED_PROTO"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if "x-forwarded-port" in trusted_proxy_headers:
|
||||||
|
try:
|
||||||
|
forwarded_port = undquote(environ.get("HTTP_X_FORWARDED_PORT", ""))
|
||||||
|
if "," in forwarded_port:
|
||||||
|
raise_for_multiple_values()
|
||||||
|
untrusted_headers.remove("X_FORWARDED_PORT")
|
||||||
|
except Exception as ex:
|
||||||
|
raise MalformedProxyHeader(
|
||||||
|
"X-Forwarded-Port", str(ex), environ["HTTP_X_FORWARDED_PORT"]
|
||||||
|
)
|
||||||
|
|
||||||
|
if "x-forwarded-by" in trusted_proxy_headers:
|
||||||
|
# Waitress itself does not use X-Forwarded-By, but we can not
|
||||||
|
# remove it so it can get set in the environ
|
||||||
|
untrusted_headers.remove("X_FORWARDED_BY")
|
||||||
|
|
||||||
|
if "forwarded" in trusted_proxy_headers:
|
||||||
|
forwarded = environ.get("HTTP_FORWARDED", None)
|
||||||
|
untrusted_headers = PROXY_HEADERS - {"FORWARDED"}
|
||||||
|
|
||||||
|
# If the Forwarded header exists, it gets priority
|
||||||
|
if forwarded:
|
||||||
|
proxies = []
|
||||||
|
try:
|
||||||
|
for forwarded_element in forwarded.split(","):
|
||||||
|
# Remove whitespace that may have been introduced when
|
||||||
|
# appending a new entry
|
||||||
|
forwarded_element = forwarded_element.strip()
|
||||||
|
|
||||||
|
forwarded_for = forwarded_host = forwarded_proto = ""
|
||||||
|
forwarded_port = forwarded_by = ""
|
||||||
|
|
||||||
|
for pair in forwarded_element.split(";"):
|
||||||
|
pair = pair.lower()
|
||||||
|
|
||||||
|
if not pair:
|
||||||
|
continue
|
||||||
|
|
||||||
|
token, equals, value = pair.partition("=")
|
||||||
|
|
||||||
|
if equals != "=":
|
||||||
|
raise ValueError('Invalid forwarded-pair missing "="')
|
||||||
|
|
||||||
|
if token.strip() != token:
|
||||||
|
raise ValueError("Token may not be surrounded by whitespace")
|
||||||
|
|
||||||
|
if value.strip() != value:
|
||||||
|
raise ValueError("Value may not be surrounded by whitespace")
|
||||||
|
|
||||||
|
if token == "by":
|
||||||
|
forwarded_by = undquote(value)
|
||||||
|
|
||||||
|
elif token == "for":
|
||||||
|
forwarded_for = undquote(value)
|
||||||
|
|
||||||
|
elif token == "host":
|
||||||
|
forwarded_host = undquote(value)
|
||||||
|
|
||||||
|
elif token == "proto":
|
||||||
|
forwarded_proto = undquote(value)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.warning("Unknown Forwarded token: %s" % token)
|
||||||
|
|
||||||
|
proxies.append(
|
||||||
|
Forwarded(
|
||||||
|
forwarded_by, forwarded_for, forwarded_host, forwarded_proto
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as ex:
|
||||||
|
raise MalformedProxyHeader("Forwarded", str(ex), environ["HTTP_FORWARDED"])
|
||||||
|
|
||||||
|
proxies = proxies[-trusted_proxy_count:]
|
||||||
|
|
||||||
|
# Iterate backwards and fill in some values, the oldest entry that
|
||||||
|
# contains the information we expect is the one we use. We expect
|
||||||
|
# that intermediate proxies may re-write the host header or proto,
|
||||||
|
# but the oldest entry is the one that contains the information the
|
||||||
|
# client expects when generating URL's
|
||||||
|
#
|
||||||
|
# Forwarded: for="[2001:db8::1]";host="example.com:8443";proto="https"
|
||||||
|
# Forwarded: for=192.0.2.1;host="example.internal:8080"
|
||||||
|
#
|
||||||
|
# (After HTTPS header folding) should mean that we use as values:
|
||||||
|
#
|
||||||
|
# Host: example.com
|
||||||
|
# Protocol: https
|
||||||
|
# Port: 8443
|
||||||
|
|
||||||
|
for proxy in proxies[::-1]:
|
||||||
|
client_addr = proxy.for_ or client_addr
|
||||||
|
forwarded_host = proxy.host or forwarded_host
|
||||||
|
forwarded_proto = proxy.proto or forwarded_proto
|
||||||
|
|
||||||
|
if forwarded_proto:
|
||||||
|
forwarded_proto = forwarded_proto.lower()
|
||||||
|
|
||||||
|
if forwarded_proto not in {"http", "https"}:
|
||||||
|
raise MalformedProxyHeader(
|
||||||
|
"Forwarded Proto=" if forwarded else "X-Forwarded-Proto",
|
||||||
|
"unsupported proto value",
|
||||||
|
forwarded_proto,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set the URL scheme to the proxy provided proto
|
||||||
|
environ["wsgi.url_scheme"] = forwarded_proto
|
||||||
|
|
||||||
|
if not forwarded_port:
|
||||||
|
if forwarded_proto == "http":
|
||||||
|
forwarded_port = "80"
|
||||||
|
|
||||||
|
if forwarded_proto == "https":
|
||||||
|
forwarded_port = "443"
|
||||||
|
|
||||||
|
if forwarded_host:
|
||||||
|
if ":" in forwarded_host and forwarded_host[-1] != "]":
|
||||||
|
host, port = forwarded_host.rsplit(":", 1)
|
||||||
|
host, port = host.strip(), str(port)
|
||||||
|
|
||||||
|
# We trust the port in the Forwarded Host/X-Forwarded-Host over
|
||||||
|
# X-Forwarded-Port, or whatever we got from Forwarded
|
||||||
|
# Proto/X-Forwarded-Proto.
|
||||||
|
|
||||||
|
if forwarded_port != port:
|
||||||
|
forwarded_port = port
|
||||||
|
|
||||||
|
# We trust the proxy server's forwarded Host
|
||||||
|
environ["SERVER_NAME"] = host
|
||||||
|
environ["HTTP_HOST"] = forwarded_host
|
||||||
|
else:
|
||||||
|
# We trust the proxy server's forwarded Host
|
||||||
|
environ["SERVER_NAME"] = forwarded_host
|
||||||
|
environ["HTTP_HOST"] = forwarded_host
|
||||||
|
|
||||||
|
if forwarded_port:
|
||||||
|
if forwarded_port not in {"443", "80"}:
|
||||||
|
environ["HTTP_HOST"] = "{}:{}".format(
|
||||||
|
forwarded_host, forwarded_port
|
||||||
|
)
|
||||||
|
elif forwarded_port == "80" and environ["wsgi.url_scheme"] != "http":
|
||||||
|
environ["HTTP_HOST"] = "{}:{}".format(
|
||||||
|
forwarded_host, forwarded_port
|
||||||
|
)
|
||||||
|
elif forwarded_port == "443" and environ["wsgi.url_scheme"] != "https":
|
||||||
|
environ["HTTP_HOST"] = "{}:{}".format(
|
||||||
|
forwarded_host, forwarded_port
|
||||||
|
)
|
||||||
|
|
||||||
|
if forwarded_port:
|
||||||
|
environ["SERVER_PORT"] = str(forwarded_port)
|
||||||
|
|
||||||
|
if client_addr:
|
||||||
|
if ":" in client_addr and client_addr[-1] != "]":
|
||||||
|
addr, port = client_addr.rsplit(":", 1)
|
||||||
|
environ["REMOTE_ADDR"] = strip_brackets(addr.strip())
|
||||||
|
environ["REMOTE_PORT"] = port.strip()
|
||||||
|
else:
|
||||||
|
environ["REMOTE_ADDR"] = strip_brackets(client_addr.strip())
|
||||||
|
environ["REMOTE_HOST"] = environ["REMOTE_ADDR"]
|
||||||
|
|
||||||
|
return untrusted_headers
|
||||||
|
|
||||||
|
|
||||||
|
def strip_brackets(addr):
|
||||||
|
if addr[0] == "[" and addr[-1] == "]":
|
||||||
|
return addr[1:-1]
|
||||||
|
return addr
|
||||||
|
|
||||||
|
|
||||||
|
def clear_untrusted_headers(
|
||||||
|
environ, untrusted_headers, log_warning=False, logger=logger
|
||||||
|
):
|
||||||
|
untrusted_headers_removed = [
|
||||||
|
header
|
||||||
|
for header in untrusted_headers
|
||||||
|
if environ.pop("HTTP_" + header, False) is not False
|
||||||
|
]
|
||||||
|
|
||||||
|
if log_warning and untrusted_headers_removed:
|
||||||
|
untrusted_headers_removed = [
|
||||||
|
"-".join(x.capitalize() for x in header.split("_"))
|
||||||
|
for header in untrusted_headers_removed
|
||||||
|
]
|
||||||
|
logger.warning(
|
||||||
|
"Removed untrusted headers (%s). Waitress recommends these be "
|
||||||
|
"removed upstream.",
|
||||||
|
", ".join(untrusted_headers_removed),
|
||||||
|
)
|
|
@ -0,0 +1,186 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
"""Data Chunk Receiver
|
||||||
|
"""
|
||||||
|
|
||||||
|
from waitress.utilities import BadRequest, find_double_newline
|
||||||
|
|
||||||
|
|
||||||
|
class FixedStreamReceiver:
|
||||||
|
|
||||||
|
# See IStreamConsumer
|
||||||
|
completed = False
|
||||||
|
error = None
|
||||||
|
|
||||||
|
def __init__(self, cl, buf):
|
||||||
|
self.remain = cl
|
||||||
|
self.buf = buf
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self.buf.__len__()
|
||||||
|
|
||||||
|
def received(self, data):
|
||||||
|
"See IStreamConsumer"
|
||||||
|
rm = self.remain
|
||||||
|
|
||||||
|
if rm < 1:
|
||||||
|
self.completed = True # Avoid any chance of spinning
|
||||||
|
|
||||||
|
return 0
|
||||||
|
datalen = len(data)
|
||||||
|
|
||||||
|
if rm <= datalen:
|
||||||
|
self.buf.append(data[:rm])
|
||||||
|
self.remain = 0
|
||||||
|
self.completed = True
|
||||||
|
|
||||||
|
return rm
|
||||||
|
else:
|
||||||
|
self.buf.append(data)
|
||||||
|
self.remain -= datalen
|
||||||
|
|
||||||
|
return datalen
|
||||||
|
|
||||||
|
def getfile(self):
|
||||||
|
return self.buf.getfile()
|
||||||
|
|
||||||
|
def getbuf(self):
|
||||||
|
return self.buf
|
||||||
|
|
||||||
|
|
||||||
|
class ChunkedReceiver:
|
||||||
|
|
||||||
|
chunk_remainder = 0
|
||||||
|
validate_chunk_end = False
|
||||||
|
control_line = b""
|
||||||
|
chunk_end = b""
|
||||||
|
all_chunks_received = False
|
||||||
|
trailer = b""
|
||||||
|
completed = False
|
||||||
|
error = None
|
||||||
|
|
||||||
|
# max_control_line = 1024
|
||||||
|
# max_trailer = 65536
|
||||||
|
|
||||||
|
def __init__(self, buf):
|
||||||
|
self.buf = buf
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self.buf.__len__()
|
||||||
|
|
||||||
|
def received(self, s):
|
||||||
|
# Returns the number of bytes consumed.
|
||||||
|
|
||||||
|
if self.completed:
|
||||||
|
return 0
|
||||||
|
orig_size = len(s)
|
||||||
|
|
||||||
|
while s:
|
||||||
|
rm = self.chunk_remainder
|
||||||
|
|
||||||
|
if rm > 0:
|
||||||
|
# Receive the remainder of a chunk.
|
||||||
|
to_write = s[:rm]
|
||||||
|
self.buf.append(to_write)
|
||||||
|
written = len(to_write)
|
||||||
|
s = s[written:]
|
||||||
|
|
||||||
|
self.chunk_remainder -= written
|
||||||
|
|
||||||
|
if self.chunk_remainder == 0:
|
||||||
|
self.validate_chunk_end = True
|
||||||
|
elif self.validate_chunk_end:
|
||||||
|
s = self.chunk_end + s
|
||||||
|
|
||||||
|
pos = s.find(b"\r\n")
|
||||||
|
|
||||||
|
if pos < 0 and len(s) < 2:
|
||||||
|
self.chunk_end = s
|
||||||
|
s = b""
|
||||||
|
else:
|
||||||
|
self.chunk_end = b""
|
||||||
|
if pos == 0:
|
||||||
|
# Chop off the terminating CR LF from the chunk
|
||||||
|
s = s[2:]
|
||||||
|
else:
|
||||||
|
self.error = BadRequest("Chunk not properly terminated")
|
||||||
|
self.all_chunks_received = True
|
||||||
|
|
||||||
|
# Always exit this loop
|
||||||
|
self.validate_chunk_end = False
|
||||||
|
elif not self.all_chunks_received:
|
||||||
|
# Receive a control line.
|
||||||
|
s = self.control_line + s
|
||||||
|
pos = s.find(b"\r\n")
|
||||||
|
|
||||||
|
if pos < 0:
|
||||||
|
# Control line not finished.
|
||||||
|
self.control_line = s
|
||||||
|
s = b""
|
||||||
|
else:
|
||||||
|
# Control line finished.
|
||||||
|
line = s[:pos]
|
||||||
|
s = s[pos + 2 :]
|
||||||
|
self.control_line = b""
|
||||||
|
line = line.strip()
|
||||||
|
|
||||||
|
if line:
|
||||||
|
# Begin a new chunk.
|
||||||
|
semi = line.find(b";")
|
||||||
|
|
||||||
|
if semi >= 0:
|
||||||
|
# discard extension info.
|
||||||
|
line = line[:semi]
|
||||||
|
try:
|
||||||
|
sz = int(line.strip(), 16) # hexadecimal
|
||||||
|
except ValueError: # garbage in input
|
||||||
|
self.error = BadRequest("garbage in chunked encoding input")
|
||||||
|
sz = 0
|
||||||
|
|
||||||
|
if sz > 0:
|
||||||
|
# Start a new chunk.
|
||||||
|
self.chunk_remainder = sz
|
||||||
|
else:
|
||||||
|
# Finished chunks.
|
||||||
|
self.all_chunks_received = True
|
||||||
|
# else expect a control line.
|
||||||
|
else:
|
||||||
|
# Receive the trailer.
|
||||||
|
trailer = self.trailer + s
|
||||||
|
|
||||||
|
if trailer.startswith(b"\r\n"):
|
||||||
|
# No trailer.
|
||||||
|
self.completed = True
|
||||||
|
|
||||||
|
return orig_size - (len(trailer) - 2)
|
||||||
|
pos = find_double_newline(trailer)
|
||||||
|
|
||||||
|
if pos < 0:
|
||||||
|
# Trailer not finished.
|
||||||
|
self.trailer = trailer
|
||||||
|
s = b""
|
||||||
|
else:
|
||||||
|
# Finished the trailer.
|
||||||
|
self.completed = True
|
||||||
|
self.trailer = trailer[:pos]
|
||||||
|
|
||||||
|
return orig_size - (len(trailer) - pos)
|
||||||
|
|
||||||
|
return orig_size
|
||||||
|
|
||||||
|
def getfile(self):
|
||||||
|
return self.buf.getfile()
|
||||||
|
|
||||||
|
def getbuf(self):
|
||||||
|
return self.buf
|
|
@ -0,0 +1,50 @@
|
||||||
|
"""
|
||||||
|
This contains a bunch of RFC7230 definitions and regular expressions that are
|
||||||
|
needed to properly parse HTTP messages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
WS = "[ \t]"
|
||||||
|
OWS = WS + "{0,}?"
|
||||||
|
RWS = WS + "{1,}?"
|
||||||
|
BWS = OWS
|
||||||
|
|
||||||
|
# RFC 7230 Section 3.2.6 "Field Value Components":
|
||||||
|
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
|
||||||
|
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
|
||||||
|
# / DIGIT / ALPHA
|
||||||
|
# obs-text = %x80-FF
|
||||||
|
TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]"
|
||||||
|
OBS_TEXT = r"\x80-\xff"
|
||||||
|
|
||||||
|
TOKEN = TCHAR + "{1,}"
|
||||||
|
|
||||||
|
# RFC 5234 Appendix B.1 "Core Rules":
|
||||||
|
# VCHAR = %x21-7E
|
||||||
|
# ; visible (printing) characters
|
||||||
|
VCHAR = r"\x21-\x7e"
|
||||||
|
|
||||||
|
# header-field = field-name ":" OWS field-value OWS
|
||||||
|
# field-name = token
|
||||||
|
# field-value = *( field-content / obs-fold )
|
||||||
|
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||||
|
# field-vchar = VCHAR / obs-text
|
||||||
|
|
||||||
|
# Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
|
||||||
|
# changes field-content to:
|
||||||
|
#
|
||||||
|
# field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
|
||||||
|
# field-vchar ]
|
||||||
|
|
||||||
|
FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]"
|
||||||
|
# Field content is more greedy than the ABNF, in that it will match the whole value
|
||||||
|
FIELD_CONTENT = FIELD_VCHAR + "+(?:[ \t]+" + FIELD_VCHAR + "+)*"
|
||||||
|
# Which allows the field value here to just see if there is even a value in the first place
|
||||||
|
FIELD_VALUE = "(?:" + FIELD_CONTENT + ")?"
|
||||||
|
|
||||||
|
HEADER_FIELD = re.compile(
|
||||||
|
(
|
||||||
|
"^(?P<name>" + TOKEN + "):" + OWS + "(?P<value>" + FIELD_VALUE + ")" + OWS + "$"
|
||||||
|
).encode("latin-1")
|
||||||
|
)
|
|
@ -0,0 +1,299 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
"""Command line runner.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import getopt
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from waitress import serve
|
||||||
|
from waitress.adjustments import Adjustments
|
||||||
|
from waitress.utilities import logger
|
||||||
|
|
||||||
|
HELP = """\
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
{0} [OPTS] MODULE:OBJECT
|
||||||
|
|
||||||
|
Standard options:
|
||||||
|
|
||||||
|
--help
|
||||||
|
Show this information.
|
||||||
|
|
||||||
|
--call
|
||||||
|
Call the given object to get the WSGI application.
|
||||||
|
|
||||||
|
--host=ADDR
|
||||||
|
Hostname or IP address on which to listen, default is '0.0.0.0',
|
||||||
|
which means "all IP addresses on this host".
|
||||||
|
|
||||||
|
Note: May not be used together with --listen
|
||||||
|
|
||||||
|
--port=PORT
|
||||||
|
TCP port on which to listen, default is '8080'
|
||||||
|
|
||||||
|
Note: May not be used together with --listen
|
||||||
|
|
||||||
|
--listen=ip:port
|
||||||
|
Tell waitress to listen on an ip port combination.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
--listen=127.0.0.1:8080
|
||||||
|
--listen=[::1]:8080
|
||||||
|
--listen=*:8080
|
||||||
|
|
||||||
|
This option may be used multiple times to listen on multiple sockets.
|
||||||
|
A wildcard for the hostname is also supported and will bind to both
|
||||||
|
IPv4/IPv6 depending on whether they are enabled or disabled.
|
||||||
|
|
||||||
|
--[no-]ipv4
|
||||||
|
Toggle on/off IPv4 support.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
--no-ipv4
|
||||||
|
|
||||||
|
This will disable IPv4 socket support. This affects wildcard matching
|
||||||
|
when generating the list of sockets.
|
||||||
|
|
||||||
|
--[no-]ipv6
|
||||||
|
Toggle on/off IPv6 support.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
--no-ipv6
|
||||||
|
|
||||||
|
This will turn on IPv6 socket support. This affects wildcard matching
|
||||||
|
when generating a list of sockets.
|
||||||
|
|
||||||
|
--unix-socket=PATH
|
||||||
|
Path of Unix socket. If a socket path is specified, a Unix domain
|
||||||
|
socket is made instead of the usual inet domain socket.
|
||||||
|
|
||||||
|
Not available on Windows.
|
||||||
|
|
||||||
|
--unix-socket-perms=PERMS
|
||||||
|
Octal permissions to use for the Unix domain socket, default is
|
||||||
|
'600'.
|
||||||
|
|
||||||
|
--url-scheme=STR
|
||||||
|
Default wsgi.url_scheme value, default is 'http'.
|
||||||
|
|
||||||
|
--url-prefix=STR
|
||||||
|
The ``SCRIPT_NAME`` WSGI environment value. Setting this to anything
|
||||||
|
except the empty string will cause the WSGI ``SCRIPT_NAME`` value to be
|
||||||
|
the value passed minus any trailing slashes you add, and it will cause
|
||||||
|
the ``PATH_INFO`` of any request which is prefixed with this value to
|
||||||
|
be stripped of the prefix. Default is the empty string.
|
||||||
|
|
||||||
|
--ident=STR
|
||||||
|
Server identity used in the 'Server' header in responses. Default
|
||||||
|
is 'waitress'.
|
||||||
|
|
||||||
|
Tuning options:
|
||||||
|
|
||||||
|
--threads=INT
|
||||||
|
Number of threads used to process application logic, default is 4.
|
||||||
|
|
||||||
|
--backlog=INT
|
||||||
|
Connection backlog for the server. Default is 1024.
|
||||||
|
|
||||||
|
--recv-bytes=INT
|
||||||
|
Number of bytes to request when calling socket.recv(). Default is
|
||||||
|
8192.
|
||||||
|
|
||||||
|
--send-bytes=INT
|
||||||
|
Number of bytes to send to socket.send(). Default is 18000.
|
||||||
|
Multiples of 9000 should avoid partly-filled TCP packets.
|
||||||
|
|
||||||
|
--outbuf-overflow=INT
|
||||||
|
A temporary file should be created if the pending output is larger
|
||||||
|
than this. Default is 1048576 (1MB).
|
||||||
|
|
||||||
|
--outbuf-high-watermark=INT
|
||||||
|
The app_iter will pause when pending output is larger than this value
|
||||||
|
and will resume once enough data is written to the socket to fall below
|
||||||
|
this threshold. Default is 16777216 (16MB).
|
||||||
|
|
||||||
|
--inbuf-overflow=INT
|
||||||
|
A temporary file should be created if the pending input is larger
|
||||||
|
than this. Default is 524288 (512KB).
|
||||||
|
|
||||||
|
--connection-limit=INT
|
||||||
|
Stop creating new channels if too many are already active.
|
||||||
|
Default is 100.
|
||||||
|
|
||||||
|
--cleanup-interval=INT
|
||||||
|
Minimum seconds between cleaning up inactive channels. Default
|
||||||
|
is 30. See '--channel-timeout'.
|
||||||
|
|
||||||
|
--channel-timeout=INT
|
||||||
|
Maximum number of seconds to leave inactive connections open.
|
||||||
|
Default is 120. 'Inactive' is defined as 'has received no data
|
||||||
|
from the client and has sent no data to the client'.
|
||||||
|
|
||||||
|
--[no-]log-socket-errors
|
||||||
|
Toggle whether premature client disconnect tracebacks ought to be
|
||||||
|
logged. On by default.
|
||||||
|
|
||||||
|
--max-request-header-size=INT
|
||||||
|
Maximum size of all request headers combined. Default is 262144
|
||||||
|
(256KB).
|
||||||
|
|
||||||
|
--max-request-body-size=INT
|
||||||
|
Maximum size of request body. Default is 1073741824 (1GB).
|
||||||
|
|
||||||
|
--[no-]expose-tracebacks
|
||||||
|
Toggle whether to expose tracebacks of unhandled exceptions to the
|
||||||
|
client. Off by default.
|
||||||
|
|
||||||
|
--asyncore-loop-timeout=INT
|
||||||
|
The timeout value in seconds passed to asyncore.loop(). Default is 1.
|
||||||
|
|
||||||
|
--asyncore-use-poll
|
||||||
|
The use_poll argument passed to ``asyncore.loop()``. Helps overcome
|
||||||
|
open file descriptors limit. Default is False.
|
||||||
|
|
||||||
|
--channel-request-lookahead=INT
|
||||||
|
Allows channels to stay readable and buffer more requests up to the
|
||||||
|
given maximum even if a request is already being processed. This allows
|
||||||
|
detecting if a client closed the connection while its request is being
|
||||||
|
processed. Default is 0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
RUNNER_PATTERN = re.compile(
|
||||||
|
r"""
|
||||||
|
^
|
||||||
|
(?P<module>
|
||||||
|
[a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)*
|
||||||
|
)
|
||||||
|
:
|
||||||
|
(?P<object>
|
||||||
|
[a-z_][a-z0-9_]*(?:\.[a-z_][a-z0-9_]*)*
|
||||||
|
)
|
||||||
|
$
|
||||||
|
""",
|
||||||
|
re.I | re.X,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def match(obj_name):
|
||||||
|
matches = RUNNER_PATTERN.match(obj_name)
|
||||||
|
if not matches:
|
||||||
|
raise ValueError("Malformed application '{}'".format(obj_name))
|
||||||
|
return matches.group("module"), matches.group("object")
|
||||||
|
|
||||||
|
|
||||||
|
def resolve(module_name, object_name):
|
||||||
|
"""Resolve a named object in a module."""
|
||||||
|
# We cast each segments due to an issue that has been found to manifest
|
||||||
|
# in Python 2.6.6, but not 2.6.8, and may affect other revisions of Python
|
||||||
|
# 2.6 and 2.7, whereby ``__import__`` chokes if the list passed in the
|
||||||
|
# ``fromlist`` argument are unicode strings rather than 8-bit strings.
|
||||||
|
# The error triggered is "TypeError: Item in ``fromlist '' not a string".
|
||||||
|
# My guess is that this was fixed by checking against ``basestring``
|
||||||
|
# rather than ``str`` sometime between the release of 2.6.6 and 2.6.8,
|
||||||
|
# but I've yet to go over the commits. I know, however, that the NEWS
|
||||||
|
# file makes no mention of such a change to the behaviour of
|
||||||
|
# ``__import__``.
|
||||||
|
segments = [str(segment) for segment in object_name.split(".")]
|
||||||
|
obj = __import__(module_name, fromlist=segments[:1])
|
||||||
|
for segment in segments:
|
||||||
|
obj = getattr(obj, segment)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def show_help(stream, name, error=None): # pragma: no cover
|
||||||
|
if error is not None:
|
||||||
|
print("Error: {}\n".format(error), file=stream)
|
||||||
|
print(HELP.format(name), file=stream)
|
||||||
|
|
||||||
|
|
||||||
|
def show_exception(stream):
|
||||||
|
exc_type, exc_value = sys.exc_info()[:2]
|
||||||
|
args = getattr(exc_value, "args", None)
|
||||||
|
print(
|
||||||
|
("There was an exception ({}) importing your module.\n").format(
|
||||||
|
exc_type.__name__,
|
||||||
|
),
|
||||||
|
file=stream,
|
||||||
|
)
|
||||||
|
if args:
|
||||||
|
print("It had these arguments: ", file=stream)
|
||||||
|
for idx, arg in enumerate(args, start=1):
|
||||||
|
print("{}. {}\n".format(idx, arg), file=stream)
|
||||||
|
else:
|
||||||
|
print("It had no arguments.", file=stream)
|
||||||
|
|
||||||
|
|
||||||
|
def run(argv=sys.argv, _serve=serve):
|
||||||
|
"""Command line runner."""
|
||||||
|
name = os.path.basename(argv[0])
|
||||||
|
|
||||||
|
try:
|
||||||
|
kw, args = Adjustments.parse_args(argv[1:])
|
||||||
|
except getopt.GetoptError as exc:
|
||||||
|
show_help(sys.stderr, name, str(exc))
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if kw["help"]:
|
||||||
|
show_help(sys.stdout, name)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if len(args) != 1:
|
||||||
|
show_help(sys.stderr, name, "Specify one application only")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# set a default level for the logger only if it hasn't been set explicitly
|
||||||
|
# note that this level does not override any parent logger levels,
|
||||||
|
# handlers, etc but without it no log messages are emitted by default
|
||||||
|
if logger.level == logging.NOTSET:
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
try:
|
||||||
|
module, obj_name = match(args[0])
|
||||||
|
except ValueError as exc:
|
||||||
|
show_help(sys.stderr, name, str(exc))
|
||||||
|
show_exception(sys.stderr)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Add the current directory onto sys.path
|
||||||
|
sys.path.append(os.getcwd())
|
||||||
|
|
||||||
|
# Get the WSGI function.
|
||||||
|
try:
|
||||||
|
app = resolve(module, obj_name)
|
||||||
|
except ImportError:
|
||||||
|
show_help(sys.stderr, name, "Bad module '{}'".format(module))
|
||||||
|
show_exception(sys.stderr)
|
||||||
|
return 1
|
||||||
|
except AttributeError:
|
||||||
|
show_help(sys.stderr, name, "Bad object name '{}'".format(obj_name))
|
||||||
|
show_exception(sys.stderr)
|
||||||
|
return 1
|
||||||
|
if kw["call"]:
|
||||||
|
app = app()
|
||||||
|
|
||||||
|
# These arguments are specific to the runner, not waitress itself.
|
||||||
|
del kw["call"], kw["help"]
|
||||||
|
|
||||||
|
_serve(app, **kw)
|
||||||
|
return 0
|
|
@ -0,0 +1,417 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
|
||||||
|
from waitress import trigger
|
||||||
|
from waitress.adjustments import Adjustments
|
||||||
|
from waitress.channel import HTTPChannel
|
||||||
|
from waitress.compat import IPPROTO_IPV6, IPV6_V6ONLY
|
||||||
|
from waitress.task import ThreadedTaskDispatcher
|
||||||
|
from waitress.utilities import cleanup_unix_socket
|
||||||
|
|
||||||
|
from . import wasyncore
|
||||||
|
from .proxy_headers import proxy_headers_middleware
|
||||||
|
|
||||||
|
|
||||||
|
def create_server(
|
||||||
|
application,
|
||||||
|
map=None,
|
||||||
|
_start=True, # test shim
|
||||||
|
_sock=None, # test shim
|
||||||
|
_dispatcher=None, # test shim
|
||||||
|
**kw # adjustments
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
if __name__ == '__main__':
|
||||||
|
server = create_server(app)
|
||||||
|
server.run()
|
||||||
|
"""
|
||||||
|
if application is None:
|
||||||
|
raise ValueError(
|
||||||
|
'The "app" passed to ``create_server`` was ``None``. You forgot '
|
||||||
|
"to return a WSGI app within your application."
|
||||||
|
)
|
||||||
|
adj = Adjustments(**kw)
|
||||||
|
|
||||||
|
if map is None: # pragma: nocover
|
||||||
|
map = {}
|
||||||
|
|
||||||
|
dispatcher = _dispatcher
|
||||||
|
if dispatcher is None:
|
||||||
|
dispatcher = ThreadedTaskDispatcher()
|
||||||
|
dispatcher.set_thread_count(adj.threads)
|
||||||
|
|
||||||
|
if adj.unix_socket and hasattr(socket, "AF_UNIX"):
|
||||||
|
sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None)
|
||||||
|
return UnixWSGIServer(
|
||||||
|
application,
|
||||||
|
map,
|
||||||
|
_start,
|
||||||
|
_sock,
|
||||||
|
dispatcher=dispatcher,
|
||||||
|
adj=adj,
|
||||||
|
sockinfo=sockinfo,
|
||||||
|
)
|
||||||
|
|
||||||
|
effective_listen = []
|
||||||
|
last_serv = None
|
||||||
|
if not adj.sockets:
|
||||||
|
for sockinfo in adj.listen:
|
||||||
|
# When TcpWSGIServer is called, it registers itself in the map. This
|
||||||
|
# side-effect is all we need it for, so we don't store a reference to
|
||||||
|
# or return it to the user.
|
||||||
|
last_serv = TcpWSGIServer(
|
||||||
|
application,
|
||||||
|
map,
|
||||||
|
_start,
|
||||||
|
_sock,
|
||||||
|
dispatcher=dispatcher,
|
||||||
|
adj=adj,
|
||||||
|
sockinfo=sockinfo,
|
||||||
|
)
|
||||||
|
effective_listen.append(
|
||||||
|
(last_serv.effective_host, last_serv.effective_port)
|
||||||
|
)
|
||||||
|
|
||||||
|
for sock in adj.sockets:
|
||||||
|
sockinfo = (sock.family, sock.type, sock.proto, sock.getsockname())
|
||||||
|
if sock.family == socket.AF_INET or sock.family == socket.AF_INET6:
|
||||||
|
last_serv = TcpWSGIServer(
|
||||||
|
application,
|
||||||
|
map,
|
||||||
|
_start,
|
||||||
|
sock,
|
||||||
|
dispatcher=dispatcher,
|
||||||
|
adj=adj,
|
||||||
|
bind_socket=False,
|
||||||
|
sockinfo=sockinfo,
|
||||||
|
)
|
||||||
|
effective_listen.append(
|
||||||
|
(last_serv.effective_host, last_serv.effective_port)
|
||||||
|
)
|
||||||
|
elif hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX:
|
||||||
|
last_serv = UnixWSGIServer(
|
||||||
|
application,
|
||||||
|
map,
|
||||||
|
_start,
|
||||||
|
sock,
|
||||||
|
dispatcher=dispatcher,
|
||||||
|
adj=adj,
|
||||||
|
bind_socket=False,
|
||||||
|
sockinfo=sockinfo,
|
||||||
|
)
|
||||||
|
effective_listen.append(
|
||||||
|
(last_serv.effective_host, last_serv.effective_port)
|
||||||
|
)
|
||||||
|
|
||||||
|
# We are running a single server, so we can just return the last server,
|
||||||
|
# saves us from having to create one more object
|
||||||
|
if len(effective_listen) == 1:
|
||||||
|
# In this case we have no need to use a MultiSocketServer
|
||||||
|
return last_serv
|
||||||
|
|
||||||
|
log_info = last_serv.log_info
|
||||||
|
# Return a class that has a utility function to print out the sockets it's
|
||||||
|
# listening on, and has a .run() function. All of the TcpWSGIServers
|
||||||
|
# registered themselves in the map above.
|
||||||
|
return MultiSocketServer(map, adj, effective_listen, dispatcher, log_info)
|
||||||
|
|
||||||
|
|
||||||
|
# This class is only ever used if we have multiple listen sockets. It allows
|
||||||
|
# the serve() API to call .run() which starts the wasyncore loop, and catches
|
||||||
|
# SystemExit/KeyboardInterrupt so that it can atempt to cleanly shut down.
|
||||||
|
class MultiSocketServer:
|
||||||
|
asyncore = wasyncore # test shim
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
map=None,
|
||||||
|
adj=None,
|
||||||
|
effective_listen=None,
|
||||||
|
dispatcher=None,
|
||||||
|
log_info=None,
|
||||||
|
):
|
||||||
|
self.adj = adj
|
||||||
|
self.map = map
|
||||||
|
self.effective_listen = effective_listen
|
||||||
|
self.task_dispatcher = dispatcher
|
||||||
|
self.log_info = log_info
|
||||||
|
|
||||||
|
def print_listen(self, format_str): # pragma: nocover
|
||||||
|
for l in self.effective_listen:
|
||||||
|
l = list(l)
|
||||||
|
|
||||||
|
if ":" in l[0]:
|
||||||
|
l[0] = "[{}]".format(l[0])
|
||||||
|
|
||||||
|
self.log_info(format_str.format(*l))
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
self.asyncore.loop(
|
||||||
|
timeout=self.adj.asyncore_loop_timeout,
|
||||||
|
map=self.map,
|
||||||
|
use_poll=self.adj.asyncore_use_poll,
|
||||||
|
)
|
||||||
|
except (SystemExit, KeyboardInterrupt):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.task_dispatcher.shutdown()
|
||||||
|
wasyncore.close_all(self.map)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseWSGIServer(wasyncore.dispatcher):
|
||||||
|
|
||||||
|
channel_class = HTTPChannel
|
||||||
|
next_channel_cleanup = 0
|
||||||
|
socketmod = socket # test shim
|
||||||
|
asyncore = wasyncore # test shim
|
||||||
|
in_connection_overflow = False
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
application,
|
||||||
|
map=None,
|
||||||
|
_start=True, # test shim
|
||||||
|
_sock=None, # test shim
|
||||||
|
dispatcher=None, # dispatcher
|
||||||
|
adj=None, # adjustments
|
||||||
|
sockinfo=None, # opaque object
|
||||||
|
bind_socket=True,
|
||||||
|
**kw
|
||||||
|
):
|
||||||
|
if adj is None:
|
||||||
|
adj = Adjustments(**kw)
|
||||||
|
|
||||||
|
if adj.trusted_proxy or adj.clear_untrusted_proxy_headers:
|
||||||
|
# wrap the application to deal with proxy headers
|
||||||
|
# we wrap it here because webtest subclasses the TcpWSGIServer
|
||||||
|
# directly and thus doesn't run any code that's in create_server
|
||||||
|
application = proxy_headers_middleware(
|
||||||
|
application,
|
||||||
|
trusted_proxy=adj.trusted_proxy,
|
||||||
|
trusted_proxy_count=adj.trusted_proxy_count,
|
||||||
|
trusted_proxy_headers=adj.trusted_proxy_headers,
|
||||||
|
clear_untrusted=adj.clear_untrusted_proxy_headers,
|
||||||
|
log_untrusted=adj.log_untrusted_proxy_headers,
|
||||||
|
logger=self.logger,
|
||||||
|
)
|
||||||
|
|
||||||
|
if map is None:
|
||||||
|
# use a nonglobal socket map by default to hopefully prevent
|
||||||
|
# conflicts with apps and libs that use the wasyncore global socket
|
||||||
|
# map ala https://github.com/Pylons/waitress/issues/63
|
||||||
|
map = {}
|
||||||
|
if sockinfo is None:
|
||||||
|
sockinfo = adj.listen[0]
|
||||||
|
|
||||||
|
self.sockinfo = sockinfo
|
||||||
|
self.family = sockinfo[0]
|
||||||
|
self.socktype = sockinfo[1]
|
||||||
|
self.application = application
|
||||||
|
self.adj = adj
|
||||||
|
self.trigger = trigger.trigger(map)
|
||||||
|
if dispatcher is None:
|
||||||
|
dispatcher = ThreadedTaskDispatcher()
|
||||||
|
dispatcher.set_thread_count(self.adj.threads)
|
||||||
|
|
||||||
|
self.task_dispatcher = dispatcher
|
||||||
|
self.asyncore.dispatcher.__init__(self, _sock, map=map)
|
||||||
|
if _sock is None:
|
||||||
|
self.create_socket(self.family, self.socktype)
|
||||||
|
if self.family == socket.AF_INET6: # pragma: nocover
|
||||||
|
self.socket.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1)
|
||||||
|
|
||||||
|
self.set_reuse_addr()
|
||||||
|
|
||||||
|
if bind_socket:
|
||||||
|
self.bind_server_socket()
|
||||||
|
|
||||||
|
self.effective_host, self.effective_port = self.getsockname()
|
||||||
|
self.server_name = adj.server_name
|
||||||
|
self.active_channels = {}
|
||||||
|
if _start:
|
||||||
|
self.accept_connections()
|
||||||
|
|
||||||
|
def bind_server_socket(self):
|
||||||
|
raise NotImplementedError # pragma: no cover
|
||||||
|
|
||||||
|
def getsockname(self):
|
||||||
|
raise NotImplementedError # pragma: no cover
|
||||||
|
|
||||||
|
def accept_connections(self):
|
||||||
|
self.accepting = True
|
||||||
|
self.socket.listen(self.adj.backlog) # Get around asyncore NT limit
|
||||||
|
|
||||||
|
def add_task(self, task):
|
||||||
|
self.task_dispatcher.add_task(task)
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
now = time.time()
|
||||||
|
if now >= self.next_channel_cleanup:
|
||||||
|
self.next_channel_cleanup = now + self.adj.cleanup_interval
|
||||||
|
self.maintenance(now)
|
||||||
|
|
||||||
|
if self.accepting:
|
||||||
|
if (
|
||||||
|
not self.in_connection_overflow
|
||||||
|
and len(self._map) >= self.adj.connection_limit
|
||||||
|
):
|
||||||
|
self.in_connection_overflow = True
|
||||||
|
self.logger.warning(
|
||||||
|
"total open connections reached the connection limit, "
|
||||||
|
"no longer accepting new connections"
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
self.in_connection_overflow
|
||||||
|
and len(self._map) < self.adj.connection_limit
|
||||||
|
):
|
||||||
|
self.in_connection_overflow = False
|
||||||
|
self.logger.info(
|
||||||
|
"total open connections dropped below the connection limit, "
|
||||||
|
"listening again"
|
||||||
|
)
|
||||||
|
return not self.in_connection_overflow
|
||||||
|
return False
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def handle_connect(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def handle_accept(self):
|
||||||
|
try:
|
||||||
|
v = self.accept()
|
||||||
|
if v is None:
|
||||||
|
return
|
||||||
|
conn, addr = v
|
||||||
|
except OSError:
|
||||||
|
# Linux: On rare occasions we get a bogus socket back from
|
||||||
|
# accept. socketmodule.c:makesockaddr complains that the
|
||||||
|
# address family is unknown. We don't want the whole server
|
||||||
|
# to shut down because of this.
|
||||||
|
if self.adj.log_socket_errors:
|
||||||
|
self.logger.warning("server accept() threw an exception", exc_info=True)
|
||||||
|
return
|
||||||
|
self.set_socket_options(conn)
|
||||||
|
addr = self.fix_addr(addr)
|
||||||
|
self.channel_class(self, conn, addr, self.adj, map=self._map)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
self.asyncore.loop(
|
||||||
|
timeout=self.adj.asyncore_loop_timeout,
|
||||||
|
map=self._map,
|
||||||
|
use_poll=self.adj.asyncore_use_poll,
|
||||||
|
)
|
||||||
|
except (SystemExit, KeyboardInterrupt):
|
||||||
|
self.task_dispatcher.shutdown()
|
||||||
|
|
||||||
|
def pull_trigger(self):
|
||||||
|
self.trigger.pull_trigger()
|
||||||
|
|
||||||
|
def set_socket_options(self, conn):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def fix_addr(self, addr):
|
||||||
|
return addr
|
||||||
|
|
||||||
|
def maintenance(self, now):
|
||||||
|
"""
|
||||||
|
Closes channels that have not had any activity in a while.
|
||||||
|
|
||||||
|
The timeout is configured through adj.channel_timeout (seconds).
|
||||||
|
"""
|
||||||
|
cutoff = now - self.adj.channel_timeout
|
||||||
|
for channel in self.active_channels.values():
|
||||||
|
if (not channel.requests) and channel.last_activity < cutoff:
|
||||||
|
channel.will_close = True
|
||||||
|
|
||||||
|
def print_listen(self, format_str): # pragma: no cover
|
||||||
|
self.log_info(format_str.format(self.effective_host, self.effective_port))
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.trigger.close()
|
||||||
|
return wasyncore.dispatcher.close(self)
|
||||||
|
|
||||||
|
|
||||||
|
class TcpWSGIServer(BaseWSGIServer):
|
||||||
|
def bind_server_socket(self):
|
||||||
|
(_, _, _, sockaddr) = self.sockinfo
|
||||||
|
self.bind(sockaddr)
|
||||||
|
|
||||||
|
def getsockname(self):
|
||||||
|
# Return the IP address, port as numeric
|
||||||
|
return self.socketmod.getnameinfo(
|
||||||
|
self.socket.getsockname(),
|
||||||
|
self.socketmod.NI_NUMERICHOST | self.socketmod.NI_NUMERICSERV,
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_socket_options(self, conn):
|
||||||
|
for (level, optname, value) in self.adj.socket_options:
|
||||||
|
conn.setsockopt(level, optname, value)
|
||||||
|
|
||||||
|
|
||||||
|
if hasattr(socket, "AF_UNIX"):
|
||||||
|
|
||||||
|
class UnixWSGIServer(BaseWSGIServer):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
application,
|
||||||
|
map=None,
|
||||||
|
_start=True, # test shim
|
||||||
|
_sock=None, # test shim
|
||||||
|
dispatcher=None, # dispatcher
|
||||||
|
adj=None, # adjustments
|
||||||
|
sockinfo=None, # opaque object
|
||||||
|
**kw
|
||||||
|
):
|
||||||
|
if sockinfo is None:
|
||||||
|
sockinfo = (socket.AF_UNIX, socket.SOCK_STREAM, None, None)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
application,
|
||||||
|
map=map,
|
||||||
|
_start=_start,
|
||||||
|
_sock=_sock,
|
||||||
|
dispatcher=dispatcher,
|
||||||
|
adj=adj,
|
||||||
|
sockinfo=sockinfo,
|
||||||
|
**kw,
|
||||||
|
)
|
||||||
|
|
||||||
|
def bind_server_socket(self):
|
||||||
|
cleanup_unix_socket(self.adj.unix_socket)
|
||||||
|
self.bind(self.adj.unix_socket)
|
||||||
|
if os.path.exists(self.adj.unix_socket):
|
||||||
|
os.chmod(self.adj.unix_socket, self.adj.unix_socket_perms)
|
||||||
|
|
||||||
|
def getsockname(self):
|
||||||
|
return ("unix", self.socket.getsockname())
|
||||||
|
|
||||||
|
def fix_addr(self, addr):
|
||||||
|
return ("localhost", None)
|
||||||
|
|
||||||
|
|
||||||
|
# Compatibility alias.
|
||||||
|
WSGIServer = TcpWSGIServer
|
|
@ -0,0 +1,570 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE.
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
from collections import deque
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .buffers import ReadOnlyFileBasedBuffer
|
||||||
|
from .utilities import build_http_date, logger, queue_logger
|
||||||
|
|
||||||
|
rename_headers = { # or keep them without the HTTP_ prefix added
|
||||||
|
"CONTENT_LENGTH": "CONTENT_LENGTH",
|
||||||
|
"CONTENT_TYPE": "CONTENT_TYPE",
|
||||||
|
}
|
||||||
|
|
||||||
|
hop_by_hop = frozenset(
|
||||||
|
(
|
||||||
|
"connection",
|
||||||
|
"keep-alive",
|
||||||
|
"proxy-authenticate",
|
||||||
|
"proxy-authorization",
|
||||||
|
"te",
|
||||||
|
"trailers",
|
||||||
|
"transfer-encoding",
|
||||||
|
"upgrade",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ThreadedTaskDispatcher:
|
||||||
|
"""A Task Dispatcher that creates a thread for each task."""
|
||||||
|
|
||||||
|
stop_count = 0 # Number of threads that will stop soon.
|
||||||
|
active_count = 0 # Number of currently active threads
|
||||||
|
logger = logger
|
||||||
|
queue_logger = queue_logger
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.threads = set()
|
||||||
|
self.queue = deque()
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
self.queue_cv = threading.Condition(self.lock)
|
||||||
|
self.thread_exit_cv = threading.Condition(self.lock)
|
||||||
|
|
||||||
|
def start_new_thread(self, target, thread_no):
|
||||||
|
t = threading.Thread(
|
||||||
|
target=target, name="waitress-{}".format(thread_no), args=(thread_no,)
|
||||||
|
)
|
||||||
|
t.daemon = True
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
def handler_thread(self, thread_no):
|
||||||
|
while True:
|
||||||
|
with self.lock:
|
||||||
|
while not self.queue and self.stop_count == 0:
|
||||||
|
# Mark ourselves as idle before waiting to be
|
||||||
|
# woken up, then we will once again be active
|
||||||
|
self.active_count -= 1
|
||||||
|
self.queue_cv.wait()
|
||||||
|
self.active_count += 1
|
||||||
|
|
||||||
|
if self.stop_count > 0:
|
||||||
|
self.active_count -= 1
|
||||||
|
self.stop_count -= 1
|
||||||
|
self.threads.discard(thread_no)
|
||||||
|
self.thread_exit_cv.notify()
|
||||||
|
break
|
||||||
|
|
||||||
|
task = self.queue.popleft()
|
||||||
|
try:
|
||||||
|
task.service()
|
||||||
|
except BaseException:
|
||||||
|
self.logger.exception("Exception when servicing %r", task)
|
||||||
|
|
||||||
|
def set_thread_count(self, count):
|
||||||
|
with self.lock:
|
||||||
|
threads = self.threads
|
||||||
|
thread_no = 0
|
||||||
|
running = len(threads) - self.stop_count
|
||||||
|
while running < count:
|
||||||
|
# Start threads.
|
||||||
|
while thread_no in threads:
|
||||||
|
thread_no = thread_no + 1
|
||||||
|
threads.add(thread_no)
|
||||||
|
running += 1
|
||||||
|
self.start_new_thread(self.handler_thread, thread_no)
|
||||||
|
self.active_count += 1
|
||||||
|
thread_no = thread_no + 1
|
||||||
|
if running > count:
|
||||||
|
# Stop threads.
|
||||||
|
self.stop_count += running - count
|
||||||
|
self.queue_cv.notify_all()
|
||||||
|
|
||||||
|
def add_task(self, task):
|
||||||
|
with self.lock:
|
||||||
|
self.queue.append(task)
|
||||||
|
self.queue_cv.notify()
|
||||||
|
queue_size = len(self.queue)
|
||||||
|
idle_threads = len(self.threads) - self.stop_count - self.active_count
|
||||||
|
if queue_size > idle_threads:
|
||||||
|
self.queue_logger.warning(
|
||||||
|
"Task queue depth is %d", queue_size - idle_threads
|
||||||
|
)
|
||||||
|
|
||||||
|
def shutdown(self, cancel_pending=True, timeout=5):
|
||||||
|
self.set_thread_count(0)
|
||||||
|
# Ensure the threads shut down.
|
||||||
|
threads = self.threads
|
||||||
|
expiration = time.time() + timeout
|
||||||
|
with self.lock:
|
||||||
|
while threads:
|
||||||
|
if time.time() >= expiration:
|
||||||
|
self.logger.warning("%d thread(s) still running", len(threads))
|
||||||
|
break
|
||||||
|
self.thread_exit_cv.wait(0.1)
|
||||||
|
if cancel_pending:
|
||||||
|
# Cancel remaining tasks.
|
||||||
|
queue = self.queue
|
||||||
|
if len(queue) > 0:
|
||||||
|
self.logger.warning("Canceling %d pending task(s)", len(queue))
|
||||||
|
while queue:
|
||||||
|
task = queue.popleft()
|
||||||
|
task.cancel()
|
||||||
|
self.queue_cv.notify_all()
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class Task:
|
||||||
|
close_on_finish = False
|
||||||
|
status = "200 OK"
|
||||||
|
wrote_header = False
|
||||||
|
start_time = 0
|
||||||
|
content_length = None
|
||||||
|
content_bytes_written = 0
|
||||||
|
logged_write_excess = False
|
||||||
|
logged_write_no_body = False
|
||||||
|
complete = False
|
||||||
|
chunked_response = False
|
||||||
|
logger = logger
|
||||||
|
|
||||||
|
def __init__(self, channel, request):
|
||||||
|
self.channel = channel
|
||||||
|
self.request = request
|
||||||
|
self.response_headers = []
|
||||||
|
version = request.version
|
||||||
|
if version not in ("1.0", "1.1"):
|
||||||
|
# fall back to a version we support.
|
||||||
|
version = "1.0"
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
def service(self):
|
||||||
|
try:
|
||||||
|
self.start()
|
||||||
|
self.execute()
|
||||||
|
self.finish()
|
||||||
|
except OSError:
|
||||||
|
self.close_on_finish = True
|
||||||
|
if self.channel.adj.log_socket_errors:
|
||||||
|
raise
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_body(self):
|
||||||
|
return not (
|
||||||
|
self.status.startswith("1")
|
||||||
|
or self.status.startswith("204")
|
||||||
|
or self.status.startswith("304")
|
||||||
|
)
|
||||||
|
|
||||||
|
def build_response_header(self):
|
||||||
|
version = self.version
|
||||||
|
# Figure out whether the connection should be closed.
|
||||||
|
connection = self.request.headers.get("CONNECTION", "").lower()
|
||||||
|
response_headers = []
|
||||||
|
content_length_header = None
|
||||||
|
date_header = None
|
||||||
|
server_header = None
|
||||||
|
connection_close_header = None
|
||||||
|
|
||||||
|
for (headername, headerval) in self.response_headers:
|
||||||
|
headername = "-".join([x.capitalize() for x in headername.split("-")])
|
||||||
|
|
||||||
|
if headername == "Content-Length":
|
||||||
|
if self.has_body:
|
||||||
|
content_length_header = headerval
|
||||||
|
else:
|
||||||
|
continue # pragma: no cover
|
||||||
|
|
||||||
|
if headername == "Date":
|
||||||
|
date_header = headerval
|
||||||
|
|
||||||
|
if headername == "Server":
|
||||||
|
server_header = headerval
|
||||||
|
|
||||||
|
if headername == "Connection":
|
||||||
|
connection_close_header = headerval.lower()
|
||||||
|
# replace with properly capitalized version
|
||||||
|
response_headers.append((headername, headerval))
|
||||||
|
|
||||||
|
if (
|
||||||
|
content_length_header is None
|
||||||
|
and self.content_length is not None
|
||||||
|
and self.has_body
|
||||||
|
):
|
||||||
|
content_length_header = str(self.content_length)
|
||||||
|
response_headers.append(("Content-Length", content_length_header))
|
||||||
|
|
||||||
|
def close_on_finish():
|
||||||
|
if connection_close_header is None:
|
||||||
|
response_headers.append(("Connection", "close"))
|
||||||
|
self.close_on_finish = True
|
||||||
|
|
||||||
|
if version == "1.0":
|
||||||
|
if connection == "keep-alive":
|
||||||
|
if not content_length_header:
|
||||||
|
close_on_finish()
|
||||||
|
else:
|
||||||
|
response_headers.append(("Connection", "Keep-Alive"))
|
||||||
|
else:
|
||||||
|
close_on_finish()
|
||||||
|
|
||||||
|
elif version == "1.1":
|
||||||
|
if connection == "close":
|
||||||
|
close_on_finish()
|
||||||
|
|
||||||
|
if not content_length_header:
|
||||||
|
# RFC 7230: MUST NOT send Transfer-Encoding or Content-Length
|
||||||
|
# for any response with a status code of 1xx, 204 or 304.
|
||||||
|
|
||||||
|
if self.has_body:
|
||||||
|
response_headers.append(("Transfer-Encoding", "chunked"))
|
||||||
|
self.chunked_response = True
|
||||||
|
|
||||||
|
if not self.close_on_finish:
|
||||||
|
close_on_finish()
|
||||||
|
|
||||||
|
# under HTTP 1.1 keep-alive is default, no need to set the header
|
||||||
|
else:
|
||||||
|
raise AssertionError("neither HTTP/1.0 or HTTP/1.1")
|
||||||
|
|
||||||
|
# Set the Server and Date field, if not yet specified. This is needed
|
||||||
|
# if the server is used as a proxy.
|
||||||
|
ident = self.channel.server.adj.ident
|
||||||
|
|
||||||
|
if not server_header:
|
||||||
|
if ident:
|
||||||
|
response_headers.append(("Server", ident))
|
||||||
|
else:
|
||||||
|
response_headers.append(("Via", ident or "waitress"))
|
||||||
|
|
||||||
|
if not date_header:
|
||||||
|
response_headers.append(("Date", build_http_date(self.start_time)))
|
||||||
|
|
||||||
|
self.response_headers = response_headers
|
||||||
|
|
||||||
|
first_line = "HTTP/%s %s" % (self.version, self.status)
|
||||||
|
# NB: sorting headers needs to preserve same-named-header order
|
||||||
|
# as per RFC 2616 section 4.2; thus the key=lambda x: x[0] here;
|
||||||
|
# rely on stable sort to keep relative position of same-named headers
|
||||||
|
next_lines = [
|
||||||
|
"%s: %s" % hv for hv in sorted(self.response_headers, key=lambda x: x[0])
|
||||||
|
]
|
||||||
|
lines = [first_line] + next_lines
|
||||||
|
res = "%s\r\n\r\n" % "\r\n".join(lines)
|
||||||
|
|
||||||
|
return res.encode("latin-1")
|
||||||
|
|
||||||
|
def remove_content_length_header(self):
|
||||||
|
response_headers = []
|
||||||
|
|
||||||
|
for header_name, header_value in self.response_headers:
|
||||||
|
if header_name.lower() == "content-length":
|
||||||
|
continue # pragma: nocover
|
||||||
|
response_headers.append((header_name, header_value))
|
||||||
|
|
||||||
|
self.response_headers = response_headers
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.start_time = time.time()
|
||||||
|
|
||||||
|
def finish(self):
|
||||||
|
if not self.wrote_header:
|
||||||
|
self.write(b"")
|
||||||
|
if self.chunked_response:
|
||||||
|
# not self.write, it will chunk it!
|
||||||
|
self.channel.write_soon(b"0\r\n\r\n")
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
if not self.complete:
|
||||||
|
raise RuntimeError("start_response was not called before body written")
|
||||||
|
channel = self.channel
|
||||||
|
if not self.wrote_header:
|
||||||
|
rh = self.build_response_header()
|
||||||
|
channel.write_soon(rh)
|
||||||
|
self.wrote_header = True
|
||||||
|
|
||||||
|
if data and self.has_body:
|
||||||
|
towrite = data
|
||||||
|
cl = self.content_length
|
||||||
|
if self.chunked_response:
|
||||||
|
# use chunked encoding response
|
||||||
|
towrite = hex(len(data))[2:].upper().encode("latin-1") + b"\r\n"
|
||||||
|
towrite += data + b"\r\n"
|
||||||
|
elif cl is not None:
|
||||||
|
towrite = data[: cl - self.content_bytes_written]
|
||||||
|
self.content_bytes_written += len(towrite)
|
||||||
|
if towrite != data and not self.logged_write_excess:
|
||||||
|
self.logger.warning(
|
||||||
|
"application-written content exceeded the number of "
|
||||||
|
"bytes specified by Content-Length header (%s)" % cl
|
||||||
|
)
|
||||||
|
self.logged_write_excess = True
|
||||||
|
if towrite:
|
||||||
|
channel.write_soon(towrite)
|
||||||
|
elif data:
|
||||||
|
# Cheat, and tell the application we have written all of the bytes,
|
||||||
|
# even though the response shouldn't have a body and we are
|
||||||
|
# ignoring it entirely.
|
||||||
|
self.content_bytes_written += len(data)
|
||||||
|
|
||||||
|
if not self.logged_write_no_body:
|
||||||
|
self.logger.warning(
|
||||||
|
"application-written content was ignored due to HTTP "
|
||||||
|
"response that may not contain a message-body: (%s)" % self.status
|
||||||
|
)
|
||||||
|
self.logged_write_no_body = True
|
||||||
|
|
||||||
|
|
||||||
|
class ErrorTask(Task):
|
||||||
|
"""An error task produces an error response"""
|
||||||
|
|
||||||
|
complete = True
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
e = self.request.error
|
||||||
|
status, headers, body = e.to_response()
|
||||||
|
self.status = status
|
||||||
|
self.response_headers.extend(headers)
|
||||||
|
# We need to explicitly tell the remote client we are closing the
|
||||||
|
# connection, because self.close_on_finish is set, and we are going to
|
||||||
|
# slam the door in the clients face.
|
||||||
|
self.response_headers.append(("Connection", "close"))
|
||||||
|
self.close_on_finish = True
|
||||||
|
self.content_length = len(body)
|
||||||
|
self.write(body.encode("latin-1"))
|
||||||
|
|
||||||
|
|
||||||
|
class WSGITask(Task):
|
||||||
|
"""A WSGI task produces a response from a WSGI application."""
|
||||||
|
|
||||||
|
environ = None
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
environ = self.get_environment()
|
||||||
|
|
||||||
|
def start_response(status, headers, exc_info=None):
|
||||||
|
if self.complete and not exc_info:
|
||||||
|
raise AssertionError(
|
||||||
|
"start_response called a second time without providing exc_info."
|
||||||
|
)
|
||||||
|
if exc_info:
|
||||||
|
try:
|
||||||
|
if self.wrote_header:
|
||||||
|
# higher levels will catch and handle raised exception:
|
||||||
|
# 1. "service" method in task.py
|
||||||
|
# 2. "service" method in channel.py
|
||||||
|
# 3. "handler_thread" method in task.py
|
||||||
|
raise exc_info[1]
|
||||||
|
else:
|
||||||
|
# As per WSGI spec existing headers must be cleared
|
||||||
|
self.response_headers = []
|
||||||
|
finally:
|
||||||
|
exc_info = None
|
||||||
|
|
||||||
|
self.complete = True
|
||||||
|
|
||||||
|
if not status.__class__ is str:
|
||||||
|
raise AssertionError("status %s is not a string" % status)
|
||||||
|
if "\n" in status or "\r" in status:
|
||||||
|
raise ValueError(
|
||||||
|
"carriage return/line feed character present in status"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.status = status
|
||||||
|
|
||||||
|
# Prepare the headers for output
|
||||||
|
for k, v in headers:
|
||||||
|
if not k.__class__ is str:
|
||||||
|
raise AssertionError(
|
||||||
|
"Header name %r is not a string in %r" % (k, (k, v))
|
||||||
|
)
|
||||||
|
if not v.__class__ is str:
|
||||||
|
raise AssertionError(
|
||||||
|
"Header value %r is not a string in %r" % (v, (k, v))
|
||||||
|
)
|
||||||
|
|
||||||
|
if "\n" in v or "\r" in v:
|
||||||
|
raise ValueError(
|
||||||
|
"carriage return/line feed character present in header value"
|
||||||
|
)
|
||||||
|
if "\n" in k or "\r" in k:
|
||||||
|
raise ValueError(
|
||||||
|
"carriage return/line feed character present in header name"
|
||||||
|
)
|
||||||
|
|
||||||
|
kl = k.lower()
|
||||||
|
if kl == "content-length":
|
||||||
|
self.content_length = int(v)
|
||||||
|
elif kl in hop_by_hop:
|
||||||
|
raise AssertionError(
|
||||||
|
'%s is a "hop-by-hop" header; it cannot be used by '
|
||||||
|
"a WSGI application (see PEP 3333)" % k
|
||||||
|
)
|
||||||
|
|
||||||
|
self.response_headers.extend(headers)
|
||||||
|
|
||||||
|
# Return a method used to write the response data.
|
||||||
|
return self.write
|
||||||
|
|
||||||
|
# Call the application to handle the request and write a response
|
||||||
|
app_iter = self.channel.server.application(environ, start_response)
|
||||||
|
|
||||||
|
can_close_app_iter = True
|
||||||
|
try:
|
||||||
|
if app_iter.__class__ is ReadOnlyFileBasedBuffer:
|
||||||
|
cl = self.content_length
|
||||||
|
size = app_iter.prepare(cl)
|
||||||
|
if size:
|
||||||
|
if cl != size:
|
||||||
|
if cl is not None:
|
||||||
|
self.remove_content_length_header()
|
||||||
|
self.content_length = size
|
||||||
|
self.write(b"") # generate headers
|
||||||
|
# if the write_soon below succeeds then the channel will
|
||||||
|
# take over closing the underlying file via the channel's
|
||||||
|
# _flush_some or handle_close so we intentionally avoid
|
||||||
|
# calling close in the finally block
|
||||||
|
self.channel.write_soon(app_iter)
|
||||||
|
can_close_app_iter = False
|
||||||
|
return
|
||||||
|
|
||||||
|
first_chunk_len = None
|
||||||
|
for chunk in app_iter:
|
||||||
|
if first_chunk_len is None:
|
||||||
|
first_chunk_len = len(chunk)
|
||||||
|
# Set a Content-Length header if one is not supplied.
|
||||||
|
# start_response may not have been called until first
|
||||||
|
# iteration as per PEP, so we must reinterrogate
|
||||||
|
# self.content_length here
|
||||||
|
if self.content_length is None:
|
||||||
|
app_iter_len = None
|
||||||
|
if hasattr(app_iter, "__len__"):
|
||||||
|
app_iter_len = len(app_iter)
|
||||||
|
if app_iter_len == 1:
|
||||||
|
self.content_length = first_chunk_len
|
||||||
|
# transmit headers only after first iteration of the iterable
|
||||||
|
# that returns a non-empty bytestring (PEP 3333)
|
||||||
|
if chunk:
|
||||||
|
self.write(chunk)
|
||||||
|
|
||||||
|
cl = self.content_length
|
||||||
|
if cl is not None:
|
||||||
|
if self.content_bytes_written != cl:
|
||||||
|
# close the connection so the client isn't sitting around
|
||||||
|
# waiting for more data when there are too few bytes
|
||||||
|
# to service content-length
|
||||||
|
self.close_on_finish = True
|
||||||
|
if self.request.command != "HEAD":
|
||||||
|
self.logger.warning(
|
||||||
|
"application returned too few bytes (%s) "
|
||||||
|
"for specified Content-Length (%s) via app_iter"
|
||||||
|
% (self.content_bytes_written, cl),
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
if can_close_app_iter and hasattr(app_iter, "close"):
|
||||||
|
app_iter.close()
|
||||||
|
|
||||||
|
def get_environment(self):
|
||||||
|
"""Returns a WSGI environment."""
|
||||||
|
environ = self.environ
|
||||||
|
if environ is not None:
|
||||||
|
# Return the cached copy.
|
||||||
|
return environ
|
||||||
|
|
||||||
|
request = self.request
|
||||||
|
path = request.path
|
||||||
|
channel = self.channel
|
||||||
|
server = channel.server
|
||||||
|
url_prefix = server.adj.url_prefix
|
||||||
|
|
||||||
|
if path.startswith("/"):
|
||||||
|
# strip extra slashes at the beginning of a path that starts
|
||||||
|
# with any number of slashes
|
||||||
|
path = "/" + path.lstrip("/")
|
||||||
|
|
||||||
|
if url_prefix:
|
||||||
|
# NB: url_prefix is guaranteed by the configuration machinery to
|
||||||
|
# be either the empty string or a string that starts with a single
|
||||||
|
# slash and ends without any slashes
|
||||||
|
if path == url_prefix:
|
||||||
|
# if the path is the same as the url prefix, the SCRIPT_NAME
|
||||||
|
# should be the url_prefix and PATH_INFO should be empty
|
||||||
|
path = ""
|
||||||
|
else:
|
||||||
|
# if the path starts with the url prefix plus a slash,
|
||||||
|
# the SCRIPT_NAME should be the url_prefix and PATH_INFO should
|
||||||
|
# the value of path from the slash until its end
|
||||||
|
url_prefix_with_trailing_slash = url_prefix + "/"
|
||||||
|
if path.startswith(url_prefix_with_trailing_slash):
|
||||||
|
path = path[len(url_prefix) :]
|
||||||
|
|
||||||
|
environ = {
|
||||||
|
"REMOTE_ADDR": channel.addr[0],
|
||||||
|
# Nah, we aren't actually going to look up the reverse DNS for
|
||||||
|
# REMOTE_ADDR, but we will happily set this environment variable
|
||||||
|
# for the WSGI application. Spec says we can just set this to
|
||||||
|
# REMOTE_ADDR, so we do.
|
||||||
|
"REMOTE_HOST": channel.addr[0],
|
||||||
|
# try and set the REMOTE_PORT to something useful, but maybe None
|
||||||
|
"REMOTE_PORT": str(channel.addr[1]),
|
||||||
|
"REQUEST_METHOD": request.command.upper(),
|
||||||
|
"SERVER_PORT": str(server.effective_port),
|
||||||
|
"SERVER_NAME": server.server_name,
|
||||||
|
"SERVER_SOFTWARE": server.adj.ident,
|
||||||
|
"SERVER_PROTOCOL": "HTTP/%s" % self.version,
|
||||||
|
"SCRIPT_NAME": url_prefix,
|
||||||
|
"PATH_INFO": path,
|
||||||
|
"QUERY_STRING": request.query,
|
||||||
|
"wsgi.url_scheme": request.url_scheme,
|
||||||
|
# the following environment variables are required by the WSGI spec
|
||||||
|
"wsgi.version": (1, 0),
|
||||||
|
# apps should use the logging module
|
||||||
|
"wsgi.errors": sys.stderr,
|
||||||
|
"wsgi.multithread": True,
|
||||||
|
"wsgi.multiprocess": False,
|
||||||
|
"wsgi.run_once": False,
|
||||||
|
"wsgi.input": request.get_body_stream(),
|
||||||
|
"wsgi.file_wrapper": ReadOnlyFileBasedBuffer,
|
||||||
|
"wsgi.input_terminated": True, # wsgi.input is EOF terminated
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value in dict(request.headers).items():
|
||||||
|
value = value.strip()
|
||||||
|
mykey = rename_headers.get(key, None)
|
||||||
|
if mykey is None:
|
||||||
|
mykey = "HTTP_" + key
|
||||||
|
if mykey not in environ:
|
||||||
|
environ[mykey] = value
|
||||||
|
|
||||||
|
# Insert a callable into the environment that allows the application to
|
||||||
|
# check if the client disconnected. Only works with
|
||||||
|
# channel_request_lookahead larger than 0.
|
||||||
|
environ["waitress.client_disconnected"] = self.channel.check_client_disconnected
|
||||||
|
|
||||||
|
# cache the environ for this request
|
||||||
|
self.environ = environ
|
||||||
|
return environ
|
|
@ -0,0 +1,203 @@
|
||||||
|
##############################################################################
|
||||||
|
#
|
||||||
|
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# This software is subject to the provisions of the Zope Public License,
|
||||||
|
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||||
|
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||||
|
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE
|
||||||
|
#
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from . import wasyncore
|
||||||
|
|
||||||
|
# Wake up a call to select() running in the main thread.
|
||||||
|
#
|
||||||
|
# This is useful in a context where you are using Medusa's I/O
|
||||||
|
# subsystem to deliver data, but the data is generated by another
|
||||||
|
# thread. Normally, if Medusa is in the middle of a call to
|
||||||
|
# select(), new output data generated by another thread will have
|
||||||
|
# to sit until the call to select() either times out or returns.
|
||||||
|
# If the trigger is 'pulled' by another thread, it should immediately
|
||||||
|
# generate a READ event on the trigger object, which will force the
|
||||||
|
# select() invocation to return.
|
||||||
|
#
|
||||||
|
# A common use for this facility: letting Medusa manage I/O for a
|
||||||
|
# large number of connections; but routing each request through a
|
||||||
|
# thread chosen from a fixed-size thread pool. When a thread is
|
||||||
|
# acquired, a transaction is performed, but output data is
|
||||||
|
# accumulated into buffers that will be emptied more efficiently
|
||||||
|
# by Medusa. [picture a server that can process database queries
|
||||||
|
# rapidly, but doesn't want to tie up threads waiting to send data
|
||||||
|
# to low-bandwidth connections]
|
||||||
|
#
|
||||||
|
# The other major feature provided by this class is the ability to
|
||||||
|
# move work back into the main thread: if you call pull_trigger()
|
||||||
|
# with a thunk argument, when select() wakes up and receives the
|
||||||
|
# event it will call your thunk from within that thread. The main
|
||||||
|
# purpose of this is to remove the need to wrap thread locks around
|
||||||
|
# Medusa's data structures, which normally do not need them. [To see
|
||||||
|
# why this is true, imagine this scenario: A thread tries to push some
|
||||||
|
# new data onto a channel's outgoing data queue at the same time that
|
||||||
|
# the main thread is trying to remove some]
|
||||||
|
|
||||||
|
|
||||||
|
class _triggerbase:
|
||||||
|
"""OS-independent base class for OS-dependent trigger class."""
|
||||||
|
|
||||||
|
kind = None # subclass must set to "pipe" or "loopback"; used by repr
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._closed = False
|
||||||
|
|
||||||
|
# `lock` protects the `thunks` list from being traversed and
|
||||||
|
# appended to simultaneously.
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
# List of no-argument callbacks to invoke when the trigger is
|
||||||
|
# pulled. These run in the thread running the wasyncore mainloop,
|
||||||
|
# regardless of which thread pulls the trigger.
|
||||||
|
self.thunks = []
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def handle_connect(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def handle_close(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
# Override the wasyncore close() method, because it doesn't know about
|
||||||
|
# (so can't close) all the gimmicks we have open. Subclass must
|
||||||
|
# supply a _close() method to do platform-specific closing work. _close()
|
||||||
|
# will be called iff we're not already closed.
|
||||||
|
def close(self):
|
||||||
|
if not self._closed:
|
||||||
|
self._closed = True
|
||||||
|
self.del_channel()
|
||||||
|
self._close() # subclass does OS-specific stuff
|
||||||
|
|
||||||
|
def pull_trigger(self, thunk=None):
|
||||||
|
if thunk:
|
||||||
|
with self.lock:
|
||||||
|
self.thunks.append(thunk)
|
||||||
|
self._physical_pull()
|
||||||
|
|
||||||
|
def handle_read(self):
|
||||||
|
try:
|
||||||
|
self.recv(8192)
|
||||||
|
except OSError:
|
||||||
|
return
|
||||||
|
with self.lock:
|
||||||
|
for thunk in self.thunks:
|
||||||
|
try:
|
||||||
|
thunk()
|
||||||
|
except:
|
||||||
|
nil, t, v, tbinfo = wasyncore.compact_traceback()
|
||||||
|
self.log_info(
|
||||||
|
"exception in trigger thunk: (%s:%s %s)" % (t, v, tbinfo)
|
||||||
|
)
|
||||||
|
self.thunks = []
|
||||||
|
|
||||||
|
|
||||||
|
if os.name == "posix":
|
||||||
|
|
||||||
|
class trigger(_triggerbase, wasyncore.file_dispatcher):
|
||||||
|
kind = "pipe"
|
||||||
|
|
||||||
|
def __init__(self, map):
|
||||||
|
_triggerbase.__init__(self)
|
||||||
|
r, self.trigger = self._fds = os.pipe()
|
||||||
|
wasyncore.file_dispatcher.__init__(self, r, map=map)
|
||||||
|
|
||||||
|
def _close(self):
|
||||||
|
for fd in self._fds:
|
||||||
|
os.close(fd)
|
||||||
|
self._fds = []
|
||||||
|
wasyncore.file_dispatcher.close(self)
|
||||||
|
|
||||||
|
def _physical_pull(self):
|
||||||
|
os.write(self.trigger, b"x")
|
||||||
|
|
||||||
|
|
||||||
|
else: # pragma: no cover
|
||||||
|
# Windows version; uses just sockets, because a pipe isn't select'able
|
||||||
|
# on Windows.
|
||||||
|
|
||||||
|
class trigger(_triggerbase, wasyncore.dispatcher):
|
||||||
|
kind = "loopback"
|
||||||
|
|
||||||
|
def __init__(self, map):
|
||||||
|
_triggerbase.__init__(self)
|
||||||
|
|
||||||
|
# Get a pair of connected sockets. The trigger is the 'w'
|
||||||
|
# end of the pair, which is connected to 'r'. 'r' is put
|
||||||
|
# in the wasyncore socket map. "pulling the trigger" then
|
||||||
|
# means writing something on w, which will wake up r.
|
||||||
|
|
||||||
|
w = socket.socket()
|
||||||
|
# Disable buffering -- pulling the trigger sends 1 byte,
|
||||||
|
# and we want that sent immediately, to wake up wasyncore's
|
||||||
|
# select() ASAP.
|
||||||
|
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
while True:
|
||||||
|
count += 1
|
||||||
|
# Bind to a local port; for efficiency, let the OS pick
|
||||||
|
# a free port for us.
|
||||||
|
# Unfortunately, stress tests showed that we may not
|
||||||
|
# be able to connect to that port ("Address already in
|
||||||
|
# use") despite that the OS picked it. This appears
|
||||||
|
# to be a race bug in the Windows socket implementation.
|
||||||
|
# So we loop until a connect() succeeds (almost always
|
||||||
|
# on the first try). See the long thread at
|
||||||
|
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
|
||||||
|
# for hideous details.
|
||||||
|
a = socket.socket()
|
||||||
|
a.bind(("127.0.0.1", 0))
|
||||||
|
connect_address = a.getsockname() # assigned (host, port) pair
|
||||||
|
a.listen(1)
|
||||||
|
try:
|
||||||
|
w.connect(connect_address)
|
||||||
|
break # success
|
||||||
|
except OSError as detail:
|
||||||
|
if detail[0] != errno.WSAEADDRINUSE:
|
||||||
|
# "Address already in use" is the only error
|
||||||
|
# I've seen on two WinXP Pro SP2 boxes, under
|
||||||
|
# Pythons 2.3.5 and 2.4.1.
|
||||||
|
raise
|
||||||
|
# (10048, 'Address already in use')
|
||||||
|
# assert count <= 2 # never triggered in Tim's tests
|
||||||
|
if count >= 10: # I've never seen it go above 2
|
||||||
|
a.close()
|
||||||
|
w.close()
|
||||||
|
raise RuntimeError("Cannot bind trigger!")
|
||||||
|
# Close `a` and try again. Note: I originally put a short
|
||||||
|
# sleep() here, but it didn't appear to help or hurt.
|
||||||
|
a.close()
|
||||||
|
|
||||||
|
r, addr = a.accept() # r becomes wasyncore's (self.)socket
|
||||||
|
a.close()
|
||||||
|
self.trigger = w
|
||||||
|
wasyncore.dispatcher.__init__(self, r, map=map)
|
||||||
|
|
||||||
|
def _close(self):
|
||||||
|
# self.socket is r, and self.trigger is w, from __init__
|
||||||
|
self.socket.close()
|
||||||
|
self.trigger.close()
|
||||||
|
|
||||||
|
def _physical_pull(self):
|
||||||
|
self.trigger.send(b"x")
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue