mirror of
https://github.com/evilhero/mylar
synced 2024-12-23 00:02:38 +00:00
21eee17344
torrents will now properly hide torrent information, IMP: Specified daemon port for deluge as an on-screen tip for more detail, IMP: Added 100,200,ALL as viewable watchlist views, FIX: When viewing pullist and annual integration enabled, if annual was present would incorrectly link to invalid annual series instead of the actual series itself, IMP: Added more detail error messages to metatagging errors and better handling of stranded files during cleanup, IMP: Improved some handling for weekly pull-list one-off's and refactored the nzb/oneoff post-processing into a seperate function for future callables, Moved all the main url locations for public torrent sites to the init module so that it can be cascaded down for use in other modules instead as a global, IMP: Added a 'deep_search_32p' variable in the config.ini for specific usage with 32p, where if there is more than one result will dig deeper into each result to try and figure out if there are series matches, as opposed to the default where it will only use ref32p table if available or just the first hit in a multiple series search results and ignore the remainder, FIX:Fixed some unknown characters appearing in the pullist due to unicode-related conversion problems, FIX: fixed some special cases of file parsing errors due to Volume label being named different than expected, FIX: Added a 3s pause between experimental searches to try and not hit their frequency limitation, IMP: Weekly Pullist One-off's will now show status of Snatched/Downloaded as required, FIX: Fixed some deluge parameter problems when using auto-snatch torrent script/option, IMP: Changed the downlocation in the auto-snatch option to an env variable instead of being passed to avoid unicode-related problems, FIX: Fixed some magnet-related issues for torrents when using a watchdir + TPSE, FIX: Added more verbose error message for rtorrent connection issues, FIX: Could not connect to rtorrent client if no username/password were provided, IMP: Set the db updater to run every 5 minutes on the watchlist, automatically refreshing the oldest updated series each time that is more than 5 hours old (force db update from the activity/job schedulers page will run the db updater against the entire watchlist in sequence), IMP: Attempt to handle long paths in windows (ie. > 256c) by prepending the unicode windows api character to the import a directory path (windows only), IMP: When manual metatagging a series, will update the series after all the metatagging has been completed as opposed to after each issue, IMP: Will now display available inkdrops on Config/Search Providers tab when using 32P (future will utilize/indicate inkdrop threshold when downloading)
179 lines
6.2 KiB
Python
179 lines
6.2 KiB
Python
from __future__ import absolute_import
|
|
|
|
import os
|
|
from datetime import datetime
|
|
|
|
from pytz import utc
|
|
from kazoo.exceptions import NoNodeError, NodeExistsError
|
|
|
|
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
|
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
|
from apscheduler.job import Job
|
|
|
|
try:
|
|
import cPickle as pickle
|
|
except ImportError: # pragma: nocover
|
|
import pickle
|
|
|
|
try:
|
|
from kazoo.client import KazooClient
|
|
except ImportError: # pragma: nocover
|
|
raise ImportError('ZooKeeperJobStore requires Kazoo installed')
|
|
|
|
|
|
class ZooKeeperJobStore(BaseJobStore):
|
|
"""
|
|
Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
|
|
kazoo's `KazooClient
|
|
<http://kazoo.readthedocs.io/en/latest/api/client.html>`_.
|
|
|
|
Plugin alias: ``zookeeper``
|
|
|
|
:param str path: path to store jobs in
|
|
:param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
|
|
providing connection arguments
|
|
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
|
highest available
|
|
"""
|
|
|
|
def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False,
|
|
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
|
|
super(ZooKeeperJobStore, self).__init__()
|
|
self.pickle_protocol = pickle_protocol
|
|
self.close_connection_on_exit = close_connection_on_exit
|
|
|
|
if not path:
|
|
raise ValueError('The "path" parameter must not be empty')
|
|
|
|
self.path = path
|
|
|
|
if client:
|
|
self.client = maybe_ref(client)
|
|
else:
|
|
self.client = KazooClient(**connect_args)
|
|
self._ensured_path = False
|
|
|
|
def _ensure_paths(self):
|
|
if not self._ensured_path:
|
|
self.client.ensure_path(self.path)
|
|
self._ensured_path = True
|
|
|
|
def start(self, scheduler, alias):
|
|
super(ZooKeeperJobStore, self).start(scheduler, alias)
|
|
if not self.client.connected:
|
|
self.client.start()
|
|
|
|
def lookup_job(self, job_id):
|
|
self._ensure_paths()
|
|
node_path = os.path.join(self.path, job_id)
|
|
try:
|
|
content, _ = self.client.get(node_path)
|
|
doc = pickle.loads(content)
|
|
job = self._reconstitute_job(doc['job_state'])
|
|
return job
|
|
except:
|
|
return None
|
|
|
|
def get_due_jobs(self, now):
|
|
timestamp = datetime_to_utc_timestamp(now)
|
|
jobs = [job_def['job'] for job_def in self._get_jobs()
|
|
if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp]
|
|
return jobs
|
|
|
|
def get_next_run_time(self):
|
|
next_runs = [job_def['next_run_time'] for job_def in self._get_jobs()
|
|
if job_def['next_run_time'] is not None]
|
|
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
|
|
|
|
def get_all_jobs(self):
|
|
jobs = [job_def['job'] for job_def in self._get_jobs()]
|
|
self._fix_paused_jobs_sorting(jobs)
|
|
return jobs
|
|
|
|
def add_job(self, job):
|
|
self._ensure_paths()
|
|
node_path = os.path.join(self.path, str(job.id))
|
|
value = {
|
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
|
'job_state': job.__getstate__()
|
|
}
|
|
data = pickle.dumps(value, self.pickle_protocol)
|
|
try:
|
|
self.client.create(node_path, value=data)
|
|
except NodeExistsError:
|
|
raise ConflictingIdError(job.id)
|
|
|
|
def update_job(self, job):
|
|
self._ensure_paths()
|
|
node_path = os.path.join(self.path, str(job.id))
|
|
changes = {
|
|
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
|
'job_state': job.__getstate__()
|
|
}
|
|
data = pickle.dumps(changes, self.pickle_protocol)
|
|
try:
|
|
self.client.set(node_path, value=data)
|
|
except NoNodeError:
|
|
raise JobLookupError(job.id)
|
|
|
|
def remove_job(self, job_id):
|
|
self._ensure_paths()
|
|
node_path = os.path.join(self.path, str(job_id))
|
|
try:
|
|
self.client.delete(node_path)
|
|
except NoNodeError:
|
|
raise JobLookupError(job_id)
|
|
|
|
def remove_all_jobs(self):
|
|
try:
|
|
self.client.delete(self.path, recursive=True)
|
|
except NoNodeError:
|
|
pass
|
|
self._ensured_path = False
|
|
|
|
def shutdown(self):
|
|
if self.close_connection_on_exit:
|
|
self.client.stop()
|
|
self.client.close()
|
|
|
|
def _reconstitute_job(self, job_state):
|
|
job_state = job_state
|
|
job = Job.__new__(Job)
|
|
job.__setstate__(job_state)
|
|
job._scheduler = self._scheduler
|
|
job._jobstore_alias = self._alias
|
|
return job
|
|
|
|
def _get_jobs(self):
|
|
self._ensure_paths()
|
|
jobs = []
|
|
failed_job_ids = []
|
|
all_ids = self.client.get_children(self.path)
|
|
for node_name in all_ids:
|
|
try:
|
|
node_path = os.path.join(self.path, node_name)
|
|
content, _ = self.client.get(node_path)
|
|
doc = pickle.loads(content)
|
|
job_def = {
|
|
'job_id': node_name,
|
|
'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None,
|
|
'job_state': doc['job_state'],
|
|
'job': self._reconstitute_job(doc['job_state']),
|
|
'creation_time': _.ctime
|
|
}
|
|
jobs.append(job_def)
|
|
except:
|
|
self._logger.exception('Unable to restore job "%s" -- removing it' % node_name)
|
|
failed_job_ids.append(node_name)
|
|
|
|
# Remove all the jobs we failed to restore
|
|
if failed_job_ids:
|
|
for failed_id in failed_job_ids:
|
|
self.remove_job(failed_id)
|
|
paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
|
|
return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key,
|
|
job_def['creation_time']))
|
|
|
|
def __repr__(self):
|
|
self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client))
|
|
return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
|