1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2024-12-23 00:02:38 +00:00
mylar/lib/apscheduler/jobstores/memory.py
evilhero 21eee17344 IMP:New scheduler tab (Manage / Activity) where you can see job current run status', next runtime, and prev runtimes as well as force/pause a job, FIX: Disabling
torrents will now properly hide torrent information, IMP: Specified daemon port for deluge as an on-screen tip for more detail, IMP: Added 100,200,ALL as viewable
watchlist views, FIX: When viewing pullist and annual integration enabled, if annual was present would incorrectly link to invalid annual series instead of the
actual series itself, IMP: Added more detail error messages to metatagging errors and better handling of stranded files during cleanup, IMP: Improved some handling for weekly pull-list one-off's and refactored the
nzb/oneoff post-processing into a seperate function for future callables, Moved all the main url locations for public torrent sites to the init module so that it
can be cascaded down for use in other modules instead as a global, IMP: Added a 'deep_search_32p' variable in the config.ini for specific usage with 32p, where
if there is more than one result will dig deeper into each result to try and figure out if there are series matches, as opposed to the default where it will only use ref32p
table if available or just the first hit in a multiple series search results and ignore the remainder, FIX:Fixed some unknown characters appearing in the pullist due
to unicode-related conversion problems, FIX: fixed some special cases of file parsing errors due to Volume label being named different than expected, FIX: Added a
3s pause between experimental searches to try and not hit their frequency limitation, IMP: Weekly Pullist One-off's will now show status of Snatched/Downloaded as
required, FIX: Fixed some deluge parameter problems when using auto-snatch torrent script/option, IMP: Changed the downlocation in the auto-snatch option to an
env variable instead of being passed to avoid unicode-related problems, FIX: Fixed some magnet-related issues for torrents when using a watchdir + TPSE, FIX: Added
more verbose error message for rtorrent connection issues, FIX: Could not connect to rtorrent client if no username/password were provided, IMP: Set the db updater to run
every 5 minutes on the watchlist, automatically refreshing the oldest updated series each time that is more than 5 hours old (force db update from the activity/job schedulers page will run the db updater
against the entire watchlist in sequence), IMP: Attempt to handle long paths in windows (ie. > 256c) by prepending the unicode windows api character to the import
a directory path (windows only), IMP: When manual metatagging a series, will update the series after all the metatagging has been completed as opposed to after each
issue, IMP: Will now display available inkdrops on Config/Search Providers tab when using 32P (future will utilize/indicate inkdrop threshold when downloading)
2017-09-10 11:50:08 -04:00

108 lines
3.6 KiB
Python

from __future__ import absolute_import
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import datetime_to_utc_timestamp
class MemoryJobStore(BaseJobStore):
"""
Stores jobs in an array in RAM. Provides no persistence support.
Plugin alias: ``memory``
"""
def __init__(self):
super(MemoryJobStore, self).__init__()
# list of (job, timestamp), sorted by next_run_time and job id (ascending)
self._jobs = []
self._jobs_index = {} # id -> (job, timestamp) lookup table
def lookup_job(self, job_id):
return self._jobs_index.get(job_id, (None, None))[0]
def get_due_jobs(self, now):
now_timestamp = datetime_to_utc_timestamp(now)
pending = []
for job, timestamp in self._jobs:
if timestamp is None or timestamp > now_timestamp:
break
pending.append(job)
return pending
def get_next_run_time(self):
return self._jobs[0][0].next_run_time if self._jobs else None
def get_all_jobs(self):
return [j[0] for j in self._jobs]
def add_job(self, job):
if job.id in self._jobs_index:
raise ConflictingIdError(job.id)
timestamp = datetime_to_utc_timestamp(job.next_run_time)
index = self._get_job_index(timestamp, job.id)
self._jobs.insert(index, (job, timestamp))
self._jobs_index[job.id] = (job, timestamp)
def update_job(self, job):
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
if old_job is None:
raise JobLookupError(job.id)
# If the next run time has not changed, simply replace the job in its present index.
# Otherwise, reinsert the job to the list to preserve the ordering.
old_index = self._get_job_index(old_timestamp, old_job.id)
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
if old_timestamp == new_timestamp:
self._jobs[old_index] = (job, new_timestamp)
else:
del self._jobs[old_index]
new_index = self._get_job_index(new_timestamp, job.id)
self._jobs.insert(new_index, (job, new_timestamp))
self._jobs_index[old_job.id] = (job, new_timestamp)
def remove_job(self, job_id):
job, timestamp = self._jobs_index.get(job_id, (None, None))
if job is None:
raise JobLookupError(job_id)
index = self._get_job_index(timestamp, job_id)
del self._jobs[index]
del self._jobs_index[job.id]
def remove_all_jobs(self):
self._jobs = []
self._jobs_index = {}
def shutdown(self):
self.remove_all_jobs()
def _get_job_index(self, timestamp, job_id):
"""
Returns the index of the given job, or if it's not found, the index where the job should be
inserted based on the given timestamp.
:type timestamp: int
:type job_id: str
"""
lo, hi = 0, len(self._jobs)
timestamp = float('inf') if timestamp is None else timestamp
while lo < hi:
mid = (lo + hi) // 2
mid_job, mid_timestamp = self._jobs[mid]
mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
if mid_timestamp > timestamp:
hi = mid
elif mid_timestamp < timestamp:
lo = mid + 1
elif mid_job.id > job_id:
hi = mid
elif mid_job.id < job_id:
lo = mid + 1
else:
return mid
return lo