mirror of
https://github.com/evilhero/mylar
synced 2024-12-25 17:16:51 +00:00
21eee17344
torrents will now properly hide torrent information, IMP: Specified daemon port for deluge as an on-screen tip for more detail, IMP: Added 100,200,ALL as viewable watchlist views, FIX: When viewing pullist and annual integration enabled, if annual was present would incorrectly link to invalid annual series instead of the actual series itself, IMP: Added more detail error messages to metatagging errors and better handling of stranded files during cleanup, IMP: Improved some handling for weekly pull-list one-off's and refactored the nzb/oneoff post-processing into a seperate function for future callables, Moved all the main url locations for public torrent sites to the init module so that it can be cascaded down for use in other modules instead as a global, IMP: Added a 'deep_search_32p' variable in the config.ini for specific usage with 32p, where if there is more than one result will dig deeper into each result to try and figure out if there are series matches, as opposed to the default where it will only use ref32p table if available or just the first hit in a multiple series search results and ignore the remainder, FIX:Fixed some unknown characters appearing in the pullist due to unicode-related conversion problems, FIX: fixed some special cases of file parsing errors due to Volume label being named different than expected, FIX: Added a 3s pause between experimental searches to try and not hit their frequency limitation, IMP: Weekly Pullist One-off's will now show status of Snatched/Downloaded as required, FIX: Fixed some deluge parameter problems when using auto-snatch torrent script/option, IMP: Changed the downlocation in the auto-snatch option to an env variable instead of being passed to avoid unicode-related problems, FIX: Fixed some magnet-related issues for torrents when using a watchdir + TPSE, FIX: Added more verbose error message for rtorrent connection issues, FIX: Could not connect to rtorrent client if no username/password were provided, IMP: Set the db updater to run every 5 minutes on the watchlist, automatically refreshing the oldest updated series each time that is more than 5 hours old (force db update from the activity/job schedulers page will run the db updater against the entire watchlist in sequence), IMP: Attempt to handle long paths in windows (ie. > 256c) by prepending the unicode windows api character to the import a directory path (windows only), IMP: When manual metatagging a series, will update the series after all the metatagging has been completed as opposed to after each issue, IMP: Will now display available inkdrops on Config/Search Providers tab when using 32P (future will utilize/indicate inkdrop threshold when downloading)
146 lines
5.3 KiB
Python
146 lines
5.3 KiB
Python
from __future__ import absolute_import
|
|
from datetime import datetime
|
|
|
|
from pytz import utc
|
|
import six
|
|
|
|
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
|
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
|
from apscheduler.job import Job
|
|
|
|
try:
|
|
import cPickle as pickle
|
|
except ImportError: # pragma: nocover
|
|
import pickle
|
|
|
|
try:
|
|
from redis import StrictRedis
|
|
except ImportError: # pragma: nocover
|
|
raise ImportError('RedisJobStore requires redis installed')
|
|
|
|
|
|
class RedisJobStore(BaseJobStore):
|
|
"""
|
|
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
|
|
:class:`~redis.StrictRedis`.
|
|
|
|
Plugin alias: ``redis``
|
|
|
|
:param int db: the database number to store jobs in
|
|
:param str jobs_key: key to store jobs in
|
|
:param str run_times_key: key to store the jobs' run times in
|
|
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
|
|
highest available
|
|
"""
|
|
|
|
def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
|
|
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
|
|
super(RedisJobStore, self).__init__()
|
|
|
|
if db is None:
|
|
raise ValueError('The "db" parameter must not be empty')
|
|
if not jobs_key:
|
|
raise ValueError('The "jobs_key" parameter must not be empty')
|
|
if not run_times_key:
|
|
raise ValueError('The "run_times_key" parameter must not be empty')
|
|
|
|
self.pickle_protocol = pickle_protocol
|
|
self.jobs_key = jobs_key
|
|
self.run_times_key = run_times_key
|
|
self.redis = StrictRedis(db=int(db), **connect_args)
|
|
|
|
def lookup_job(self, job_id):
|
|
job_state = self.redis.hget(self.jobs_key, job_id)
|
|
return self._reconstitute_job(job_state) if job_state else None
|
|
|
|
def get_due_jobs(self, now):
|
|
timestamp = datetime_to_utc_timestamp(now)
|
|
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
|
|
if job_ids:
|
|
job_states = self.redis.hmget(self.jobs_key, *job_ids)
|
|
return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
|
|
return []
|
|
|
|
def get_next_run_time(self):
|
|
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
|
|
if next_run_time:
|
|
return utc_timestamp_to_datetime(next_run_time[0][1])
|
|
|
|
def get_all_jobs(self):
|
|
job_states = self.redis.hgetall(self.jobs_key)
|
|
jobs = self._reconstitute_jobs(six.iteritems(job_states))
|
|
paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
|
|
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
|
|
|
|
def add_job(self, job):
|
|
if self.redis.hexists(self.jobs_key, job.id):
|
|
raise ConflictingIdError(job.id)
|
|
|
|
with self.redis.pipeline() as pipe:
|
|
pipe.multi()
|
|
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
|
self.pickle_protocol))
|
|
if job.next_run_time:
|
|
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
|
pipe.execute()
|
|
|
|
def update_job(self, job):
|
|
if not self.redis.hexists(self.jobs_key, job.id):
|
|
raise JobLookupError(job.id)
|
|
|
|
with self.redis.pipeline() as pipe:
|
|
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
|
|
self.pickle_protocol))
|
|
if job.next_run_time:
|
|
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
|
else:
|
|
pipe.zrem(self.run_times_key, job.id)
|
|
pipe.execute()
|
|
|
|
def remove_job(self, job_id):
|
|
if not self.redis.hexists(self.jobs_key, job_id):
|
|
raise JobLookupError(job_id)
|
|
|
|
with self.redis.pipeline() as pipe:
|
|
pipe.hdel(self.jobs_key, job_id)
|
|
pipe.zrem(self.run_times_key, job_id)
|
|
pipe.execute()
|
|
|
|
def remove_all_jobs(self):
|
|
with self.redis.pipeline() as pipe:
|
|
pipe.delete(self.jobs_key)
|
|
pipe.delete(self.run_times_key)
|
|
pipe.execute()
|
|
|
|
def shutdown(self):
|
|
self.redis.connection_pool.disconnect()
|
|
|
|
def _reconstitute_job(self, job_state):
|
|
job_state = pickle.loads(job_state)
|
|
job = Job.__new__(Job)
|
|
job.__setstate__(job_state)
|
|
job._scheduler = self._scheduler
|
|
job._jobstore_alias = self._alias
|
|
return job
|
|
|
|
def _reconstitute_jobs(self, job_states):
|
|
jobs = []
|
|
failed_job_ids = []
|
|
for job_id, job_state in job_states:
|
|
try:
|
|
jobs.append(self._reconstitute_job(job_state))
|
|
except:
|
|
self._logger.exception('Unable to restore job "%s" -- removing it', job_id)
|
|
failed_job_ids.append(job_id)
|
|
|
|
# Remove all the jobs we failed to restore
|
|
if failed_job_ids:
|
|
with self.redis.pipeline() as pipe:
|
|
pipe.hdel(self.jobs_key, *failed_job_ids)
|
|
pipe.zrem(self.run_times_key, *failed_job_ids)
|
|
pipe.execute()
|
|
|
|
return jobs
|
|
|
|
def __repr__(self):
|
|
return '<%s>' % self.__class__.__name__
|