mirror of
https://github.com/evilhero/mylar
synced 2025-01-03 13:34:33 +00:00
21eee17344
torrents will now properly hide torrent information, IMP: Specified daemon port for deluge as an on-screen tip for more detail, IMP: Added 100,200,ALL as viewable watchlist views, FIX: When viewing pullist and annual integration enabled, if annual was present would incorrectly link to invalid annual series instead of the actual series itself, IMP: Added more detail error messages to metatagging errors and better handling of stranded files during cleanup, IMP: Improved some handling for weekly pull-list one-off's and refactored the nzb/oneoff post-processing into a seperate function for future callables, Moved all the main url locations for public torrent sites to the init module so that it can be cascaded down for use in other modules instead as a global, IMP: Added a 'deep_search_32p' variable in the config.ini for specific usage with 32p, where if there is more than one result will dig deeper into each result to try and figure out if there are series matches, as opposed to the default where it will only use ref32p table if available or just the first hit in a multiple series search results and ignore the remainder, FIX:Fixed some unknown characters appearing in the pullist due to unicode-related conversion problems, FIX: fixed some special cases of file parsing errors due to Volume label being named different than expected, FIX: Added a 3s pause between experimental searches to try and not hit their frequency limitation, IMP: Weekly Pullist One-off's will now show status of Snatched/Downloaded as required, FIX: Fixed some deluge parameter problems when using auto-snatch torrent script/option, IMP: Changed the downlocation in the auto-snatch option to an env variable instead of being passed to avoid unicode-related problems, FIX: Fixed some magnet-related issues for torrents when using a watchdir + TPSE, FIX: Added more verbose error message for rtorrent connection issues, FIX: Could not connect to rtorrent client if no username/password were provided, IMP: Set the db updater to run every 5 minutes on the watchlist, automatically refreshing the oldest updated series each time that is more than 5 hours old (force db update from the activity/job schedulers page will run the db updater against the entire watchlist in sequence), IMP: Attempt to handle long paths in windows (ie. > 256c) by prepending the unicode windows api character to the import a directory path (windows only), IMP: When manual metatagging a series, will update the series after all the metatagging has been completed as opposed to after each issue, IMP: Will now display available inkdrops on Config/Search Providers tab when using 32P (future will utilize/indicate inkdrop threshold when downloading)
137 lines
4.9 KiB
Python
137 lines
4.9 KiB
Python
from abc import ABCMeta, abstractmethod
|
|
from collections import defaultdict
|
|
from datetime import datetime, timedelta
|
|
from traceback import format_tb
|
|
import logging
|
|
import sys
|
|
|
|
from pytz import utc
|
|
import six
|
|
|
|
from apscheduler.events import (
|
|
JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED)
|
|
|
|
|
|
class MaxInstancesReachedError(Exception):
|
|
def __init__(self, job):
|
|
super(MaxInstancesReachedError, self).__init__(
|
|
'Job "%s" has already reached its maximum number of instances (%d)' %
|
|
(job.id, job.max_instances))
|
|
|
|
|
|
class BaseExecutor(six.with_metaclass(ABCMeta, object)):
|
|
"""Abstract base class that defines the interface that every executor must implement."""
|
|
|
|
_scheduler = None
|
|
_lock = None
|
|
_logger = logging.getLogger('apscheduler.executors')
|
|
|
|
def __init__(self):
|
|
super(BaseExecutor, self).__init__()
|
|
self._instances = defaultdict(lambda: 0)
|
|
|
|
def start(self, scheduler, alias):
|
|
"""
|
|
Called by the scheduler when the scheduler is being started or when the executor is being
|
|
added to an already running scheduler.
|
|
|
|
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
|
|
this executor
|
|
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
|
|
|
|
"""
|
|
self._scheduler = scheduler
|
|
self._lock = scheduler._create_lock()
|
|
self._logger = logging.getLogger('apscheduler.executors.%s' % alias)
|
|
|
|
def shutdown(self, wait=True):
|
|
"""
|
|
Shuts down this executor.
|
|
|
|
:param bool wait: ``True`` to wait until all submitted jobs
|
|
have been executed
|
|
"""
|
|
|
|
def submit_job(self, job, run_times):
|
|
"""
|
|
Submits job for execution.
|
|
|
|
:param Job job: job to execute
|
|
:param list[datetime] run_times: list of datetimes specifying
|
|
when the job should have been run
|
|
:raises MaxInstancesReachedError: if the maximum number of
|
|
allowed instances for this job has been reached
|
|
|
|
"""
|
|
assert self._lock is not None, 'This executor has not been started yet'
|
|
with self._lock:
|
|
if self._instances[job.id] >= job.max_instances:
|
|
raise MaxInstancesReachedError(job)
|
|
|
|
self._do_submit_job(job, run_times)
|
|
self._instances[job.id] += 1
|
|
|
|
@abstractmethod
|
|
def _do_submit_job(self, job, run_times):
|
|
"""Performs the actual task of scheduling `run_job` to be called."""
|
|
|
|
def _run_job_success(self, job_id, events):
|
|
"""
|
|
Called by the executor with the list of generated events when :func:`run_job` has been
|
|
successfully called.
|
|
|
|
"""
|
|
with self._lock:
|
|
self._instances[job_id] -= 1
|
|
if self._instances[job_id] == 0:
|
|
del self._instances[job_id]
|
|
|
|
for event in events:
|
|
self._scheduler._dispatch_event(event)
|
|
|
|
def _run_job_error(self, job_id, exc, traceback=None):
|
|
"""Called by the executor with the exception if there is an error calling `run_job`."""
|
|
with self._lock:
|
|
self._instances[job_id] -= 1
|
|
if self._instances[job_id] == 0:
|
|
del self._instances[job_id]
|
|
|
|
exc_info = (exc.__class__, exc, traceback)
|
|
self._logger.error('Error running job %s', job_id, exc_info=exc_info)
|
|
|
|
|
|
def run_job(job, jobstore_alias, run_times, logger_name):
|
|
"""
|
|
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
|
|
scheduler.
|
|
|
|
"""
|
|
events = []
|
|
logger = logging.getLogger(logger_name)
|
|
for run_time in run_times:
|
|
# See if the job missed its run time window, and handle
|
|
# possible misfires accordingly
|
|
if job.misfire_grace_time is not None:
|
|
difference = datetime.now(utc) - run_time
|
|
grace_time = timedelta(seconds=job.misfire_grace_time)
|
|
if difference > grace_time:
|
|
events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
|
|
run_time))
|
|
logger.warning('Run time of job "%s" was missed by %s', job, difference)
|
|
continue
|
|
|
|
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
|
|
try:
|
|
retval = job.func(*job.args, **job.kwargs)
|
|
except:
|
|
exc, tb = sys.exc_info()[1:]
|
|
formatted_tb = ''.join(format_tb(tb))
|
|
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
|
|
exception=exc, traceback=formatted_tb))
|
|
logger.exception('Job "%s" raised an exception', job)
|
|
else:
|
|
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
|
|
retval=retval))
|
|
logger.info('Job "%s" executed successfully', job)
|
|
|
|
return events
|