FIX:(#2179) Post-processing item would fail if match would occur on story-arc check, FIX: Fixed some sub-directory problems when doing various types of scans, IMP: Added booktype to filechecker parsing results, FIX: When downloading via DDL, would not adhere to the booktype as a restraint, IMP: Pack support added for DDL (available as a per series option), IMP: Added BookType & Aliases to the arc's section which will impact how issues/series are searched/post-processed/cheked when they're an issue from an arc, IMP: Initial codebase for the a queue manager section, IMP: Write DDL-Queue data to the sql table so that stalled/broken downloads can be resumed/deleted etc eventually, FIX: If a filename didn't have a valid issue number and it is a Print Edition, will now throw a warning indicating other options to try instead of causing a traceback, IMP: Updated snatch notifications so the notification header will just say 'Issue Snatched' with a brief description, FIX: Removed multiple import db lines from the helpers module, IMP: cleanup_cache variable (true/false) added to config section which will initiate a cleanup of items in the cache directory on startup which will remove items that are no longer needed, IMP: Changed some logging string concatenation lines to try and avoid traceback errors due to logging

This commit is contained in:
evilhero 2019-02-01 16:25:24 -05:00
parent 2220d04755
commit 41e5f42471
11 changed files with 750 additions and 429 deletions

File diff suppressed because it is too large Load Diff

View File

@ -504,7 +504,7 @@ def dbcheck():
c.execute('SELECT ReleaseDate from storyarcs') c.execute('SELECT ReleaseDate from storyarcs')
except sqlite3.OperationalError: except sqlite3.OperationalError:
try: try:
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist') c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
c.execute('DROP TABLE readinglist') c.execute('DROP TABLE readinglist')
except sqlite3.OperationalError: except sqlite3.OperationalError:
@ -527,7 +527,7 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)')
conn.commit conn.commit
c.close c.close
@ -1043,6 +1043,11 @@ def dbcheck():
except sqlite3.OperationalError: except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT') c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT')
try:
c.execute('SELECT Aliases from storyarcs')
except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN Aliases TEXT')
## -- searchresults Table -- ## -- searchresults Table --
try: try:
c.execute('SELECT SRID from searchresults') c.execute('SELECT SRID from searchresults')

View File

@ -3,6 +3,7 @@ from collections import OrderedDict
from operator import itemgetter from operator import itemgetter
import os import os
import glob
import codecs import codecs
import shutil import shutil
import re import re
@ -74,6 +75,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False), 'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False),
'SHOW_ICONS': (bool, 'General', False), 'SHOW_ICONS': (bool, 'General', False),
'FORMAT_BOOKTYPE': (bool, 'General', False), 'FORMAT_BOOKTYPE': (bool, 'General', False),
'CLEANUP_CACHE': (bool, 'General', False),
'RSS_CHECKINTERVAL': (int, 'Scheduler', 20), 'RSS_CHECKINTERVAL': (int, 'Scheduler', 20),
'SEARCH_INTERVAL': (int, 'Scheduler', 360), 'SEARCH_INTERVAL': (int, 'Scheduler', 360),
@ -771,6 +773,26 @@ class Config(object):
except OSError: except OSError:
logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR)
if self.CLEANUP_CACHE is True:
logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.')
cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*']
cntr = 0
for x in cache_types:
for f in glob.glob(os.path.join(self.CACHE_DIR,x)):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except Exception as e:
logger.warn('[ERROR] Unable to remove %s from cache. Could be a possible permissions issue ?' % f)
cntr+=1
if cntr > 1:
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items')
else:
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!')
if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]): if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]):
self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag') self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag')
logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR) logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR)
@ -843,8 +865,10 @@ class Config(object):
else: else:
logger.fdebug('Successfully created ComicTagger Settings location.') logger.fdebug('Successfully created ComicTagger Settings location.')
if self.DDL_LOCATION is None: if not self.DDL_LOCATION:
self.DDL_LOCATION = self.CACHE_DIR self.DDL_LOCATION = self.CACHE_DIR
if self.ENABLE_DDL is True:
logger.info('Setting DDL Location set to : %s' % self.DDL_LOCATION)
if self.MODE_32P is False and self.RSSFEED_32P is not None: if self.MODE_32P is False and self.RSSFEED_32P is not None:
mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P) mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P)

View File

@ -72,7 +72,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
elif type == 'storyarc': elif type == 'storyarc':
PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date' PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date'
elif type == 'comicyears': elif type == 'comicyears':
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(offset) PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck,aliases&offset=' + str(offset)
elif type == 'import': elif type == 'import':
PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset) PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset)
elif type == 'update_dates': elif type == 'update_dates':
@ -340,10 +340,10 @@ def GetComicInfo(comicid, dom, safechk=None):
comic['Type'] = 'TPB' comic['Type'] = 'TPB'
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower(): elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
comic['Type'] = 'HC' comic['Type'] = 'HC'
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and 'can be found' not in comic_desc.lower(): elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
i = 0 i = 0
comic['Type'] = 'One-Shot' comic['Type'] = 'One-Shot'
avoidwords = ['preceding', 'after the special'] avoidwords = ['preceding', 'after the special', 'following the']
while i < 2: while i < 2:
if i == 0: if i == 0:
cbd = 'one-shot' cbd = 'one-shot'
@ -718,11 +718,12 @@ def GetSeriesYears(dom):
tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1] tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1]
desdeck = 0 desdeck = 0
tempseries['Volume'] = 'None'
#the description field actually holds the Volume# - so let's grab it #the description field actually holds the Volume# - so let's grab it
desc_soup = None
try: try:
descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText
desc_soup = Soup(descchunk, "html.parser")
desclinks = desc_soup.findAll('a')
comic_desc = drophtml(descchunk) comic_desc = drophtml(descchunk)
desdeck +=1 desdeck +=1
except: except:
@ -736,6 +737,139 @@ def GetSeriesYears(dom):
except: except:
comic_deck = 'None' comic_deck = 'None'
#comic['ComicDescription'] = comic_desc
try:
tempseries['Aliases'] = dm.getElementsByTagName('aliases')[0].firstChild.wholeText
tempseries['Aliases'] = re.sub('\n', '##', tempseries['Aliases']).strip()
if tempseries['Aliases'][-2:] == '##':
tempseries['Aliases'] = tempseries['Aliases'][:-2]
#logger.fdebug('Aliases: ' + str(aliases))
except:
tempseries['Aliases'] = 'None'
tempseries['Volume'] = 'None' #noversion'
#figure out if it's a print / digital edition.
tempseries['Type'] = 'None'
if comic_deck != 'None':
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]):
if 'print' in comic_deck.lower():
tempseries['Type'] = 'Print'
elif 'digital' in comic_deck.lower():
tempseries['Type'] = 'Digital'
elif 'paperback' in comic_deck.lower():
tempseries['Type'] = 'TPB'
elif 'hardcover' in comic_deck.lower():
tempseries['Type'] = 'HC'
elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip():
tempseries['Type'] = 'One-Shot'
if comic_desc != 'None' and tempseries['Type'] == 'None':
if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower():
tempseries['Type'] = 'Print'
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
tempseries['Type'] = 'Digital'
elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower():
tempseries['Type'] = 'TPB'
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
tempseries['Type'] = 'HC'
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
i = 0
tempseries['Type'] = 'One-Shot'
avoidwords = ['preceding', 'after the special', 'following the']
while i < 2:
if i == 0:
cbd = 'one-shot'
elif i == 1:
cbd = 'one shot'
tmp1 = comic_desc[:60].lower().find(cbd)
if tmp1 != -1:
for x in avoidwords:
tmp2 = comic_desc[:tmp1].lower().find(x)
if tmp2 != -1:
logger.fdebug('FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print')
tempseries['Type'] = 'Print'
i = 3
break
i+=1
else:
tempseries['Type'] = 'Print'
if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]):
#ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145.
first_collect = comic_desc.lower().find('collecting')
#logger.info('first_collect: %s' % first_collect)
#logger.info('comic_desc: %s' % comic_desc)
#logger.info('desclinks: %s' % desclinks)
issue_list = []
micdrop = []
if desc_soup is not None:
#if it's point form bullets, ignore it cause it's not the current volume stuff.
test_it = desc_soup.find('ul')
if test_it:
for x in test_it.findAll('li'):
if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]):
mic_check = x.find('a')
micdrop.append(mic_check['data-ref-id'])
for fc in desclinks:
#logger.info('fc: %s' % fc)
fc_id = fc['data-ref-id']
#logger.info('fc_id: %s' % fc_id)
if fc_id in micdrop:
continue
fc_name = fc.findNext(text=True)
if fc_id.startswith('4000'):
fc_cid = None
fc_isid = fc_id
iss_start = fc_name.find('#')
issuerun = fc_name[iss_start:].strip()
fc_name = fc_name[:iss_start].strip()
elif fc_id.startswith('4050'):
fc_cid = fc_id
fc_isid = None
issuerun = fc.next_sibling
if issuerun is not None:
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
if len(lines) > 0:
for x in sorted(lines, reverse=True):
srchline = issuerun.rfind(x)
if srchline != -1:
try:
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
issuerun = issuerun[:srchline+len(x)]
break
except Exception as e:
logger.warn('[ERROR] %s' % e)
continue
else:
iss_start = fc_name.find('#')
issuerun = fc_name[iss_start:].strip()
fc_name = fc_name[:iss_start].strip()
if issuerun.endswith('.') or issuerun.endswith(','):
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
issuerun = issuerun[:-1]
if issuerun.endswith(' and '):
issuerun = issuerun[:-4].strip()
elif issuerun.endswith(' and'):
issuerun = issuerun[:-3].strip()
else:
continue
# except:
# pass
issue_list.append({'series': fc_name,
'comicid': fc_cid,
'issueid': fc_isid,
'issues': issuerun})
#first_collect = cis
logger.info('Collected issues in volume: %s' % issue_list)
tempseries['Issue_List'] = issue_list
else:
tempseries['Issue_List'] = 'None'
while (desdeck > 0): while (desdeck > 0):
if desdeck == 1: if desdeck == 1:
if comic_desc == 'None': if comic_desc == 'None':
@ -760,11 +894,11 @@ def GetSeriesYears(dom):
if i == 0: if i == 0:
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind) logger.fdebug('volume X format - %s: %s' % (i, vfind))
else: else:
vfind = comicDes[:v_find] # if it's fifth volume format vfind = comicDes[:v_find] # if it's fifth volume format
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind) logger.fdebug('X volume format - %s: %s' % (i, vfind))
volconv = '' volconv = ''
for nums in basenums: for nums in basenums:
if nums in vfind.lower(): if nums in vfind.lower():
@ -773,6 +907,7 @@ def GetSeriesYears(dom):
break break
#logger.info('volconv: ' + str(volconv)) #logger.info('volconv: ' + str(volconv))
#now we attempt to find the character position after the word 'volume'
if i == 0: if i == 0:
volthis = vfind.lower().find('volume') volthis = vfind.lower().find('volume')
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
@ -790,7 +925,7 @@ def GetSeriesYears(dom):
ledigit = re.sub("[^0-9]", "", vf[0]) ledigit = re.sub("[^0-9]", "", vf[0])
if ledigit != '': if ledigit != '':
tempseries['Volume'] = ledigit tempseries['Volume'] = ledigit
logger.fdebug("Volume information found! Adding to series record : volume " + tempseries['Volume']) logger.fdebug("Volume information found! Adding to series record : volume %s" % tempseries['Volume'])
break break
except: except:
pass pass
@ -800,7 +935,7 @@ def GetSeriesYears(dom):
i += 1 i += 1
if tempseries['Volume'] == 'None': if tempseries['Volume'] == 'None':
logger.fdebug('tempseries[Volume]:' + str(tempseries['Volume'])) logger.fdebug('tempseries[Volume]: %s' % tempseries['Volume'])
desdeck -= 1 desdeck -= 1
else: else:
break break
@ -810,7 +945,9 @@ def GetSeriesYears(dom):
"ComicName": tempseries['Series'], "ComicName": tempseries['Series'],
"SeriesYear": tempseries['SeriesYear'], "SeriesYear": tempseries['SeriesYear'],
"Publisher": tempseries['Publisher'], "Publisher": tempseries['Publisher'],
"Volume": tempseries['Volume']}) "Volume": tempseries['Volume'],
"Aliases": tempseries['Aliases'],
"Type": tempseries['Type']})
return serieslist return serieslist

View File

@ -26,8 +26,7 @@ import time
import Queue import Queue
import mylar import mylar
import logger
from mylar import logger
db_lock = threading.Lock() db_lock = threading.Lock()
mylarQueue = Queue.Queue() mylarQueue = Queue.Queue()

View File

@ -234,7 +234,7 @@ class FileChecker(object):
ab = len(path) ab = len(path)
tmppath = subpath[ab:] tmppath = subpath[ab:]
else: else:
tmppath = re.sub(path, '', subpath).strip() tmppath = subpath.replace(path, '').strip()
path_list = os.path.normpath(tmppath) path_list = os.path.normpath(tmppath)
if '/' == path_list[0] or '\\' == path_list[0]: if '/' == path_list[0] or '\\' == path_list[0]:

View File

@ -27,8 +27,10 @@ import json
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import requests import requests
import cfscrape import cfscrape
import zipfile
import logger import logger
import mylar import mylar
from mylar import db
class GC(object): class GC(object):
@ -63,7 +65,8 @@ class GC(object):
return self.search_results() return self.search_results()
def loadsite(self, title, link): def loadsite(self, id, link):
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
with cfscrape.create_scraper() as s: with cfscrape.create_scraper() as s:
self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers) self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers)
@ -89,6 +92,31 @@ class GC(object):
link = lk['href'] link = lk['href']
titlefind = f.find("h1", {"class": "post-title"}) titlefind = f.find("h1", {"class": "post-title"})
title = titlefind.get_text(strip=True) title = titlefind.get_text(strip=True)
title = re.sub(u'\u2013', '-', title).strip()
filename = title
issues = None
pack = False
#see if it's a pack type
issfind_st = title.find('#')
issfind_en = title.find('-', issfind_st)
if issfind_en != -1:
if all([title[issfind_en+1] == ' ', title[issfind_en+2].isdigit()]):
iss_en = title.find(' ', issfind_en+2)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
if title[issfind_en+1].isdigit():
iss_en = title.find(' ', issfind_en+1)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
# if it's a pack - remove the issue-range and the possible issue years (cause it most likely will span) and pass thru as separate items
if pack is True:
title = re.sub(issues, '', title).strip()
if title.endswith('#'):
title = title[:-1].strip()
option_find = f.find("p", {"style": "text-align: center;"}) option_find = f.find("p", {"style": "text-align: center;"})
i = 0 i = 0
while i <= 2: while i <= 2:
@ -96,6 +124,8 @@ class GC(object):
if 'Year' in option_find: if 'Year' in option_find:
year = option_find.findNext(text=True) year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip() year = re.sub('\|', '', year).strip()
if pack is True and '-' in year:
title = re.sub('\('+year+'\)', '', title).strip()
else: else:
size = option_find.findNext(text=True) size = option_find.findNext(text=True)
if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]): if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]):
@ -114,7 +144,10 @@ class GC(object):
datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d")) datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d"))
resultlist.append({"title": title, resultlist.append({"title": title,
"pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'), "pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'),
"filename": filename,
"size": re.sub(' ', '', size).strip(), "size": re.sub(' ', '', size).strip(),
"pack": pack,
"issues": issues,
"link": link, "link": link,
"year": year, "year": year,
"id": re.sub('post-', '', id).strip(), "id": re.sub('post-', '', id).strip(),
@ -126,8 +159,9 @@ class GC(object):
return results return results
def parse_downloadresults(self, title, mainlink): def parse_downloadresults(self, id, mainlink):
myDB = db.DBConnection()
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
soup = BeautifulSoup(open(title+'.html'), 'html.parser') soup = BeautifulSoup(open(title+'.html'), 'html.parser')
orig_find = soup.find("p", {"style": "text-align: center;"}) orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0 i = 0
@ -201,23 +235,35 @@ class GC(object):
for x in links: for x in links:
logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link'])) logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link']))
ctrlval = {'id': id}
vals = {'series': series,
'year': year,
'size': size,
'issueid': self.issueid,
'comicid': self.comicid,
'link': link,
'status': 'Queued'}
myDB.upsert('ddl_info', vals, ctrlval)
mylar.DDL_QUEUE.put({'link': link, mylar.DDL_QUEUE.put({'link': link,
'mainlink': mainlink, 'mainlink': mainlink,
'series': series, 'series': series,
'year': year, 'year': year,
'size': size, 'size': size,
'comicid': self.comicid, 'comicid': self.comicid,
'issueid': self.issueid}) 'issueid': self.issueid,
'id': id})
return {'success': True} return {'success': True}
def downloadit(self, link, mainlink): def downloadit(self, id, link, mainlink):
if mylar.DDL_LOCK is True: if mylar.DDL_LOCK is True:
logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.') logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.')
return return
else: else:
mylar.DDL_LOCK = True mylar.DDL_LOCK = True
myDB = db.DBConnection()
filename = None filename = None
try: try:
with cfscrape.create_scraper() as s: with cfscrape.create_scraper() as s:
@ -228,6 +274,9 @@ class GC(object):
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename) path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
#write the filename to the db for tracking purposes...
myDB.upsert('ddl_info', {'filename': filename}, {'id': id})
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip': if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
buf = StringIO(t.content) buf = StringIO(t.content)
f = gzip.GzipFile(fileobj=buf) f = gzip.GzipFile(fileobj=buf)
@ -248,9 +297,29 @@ class GC(object):
else: else:
mylar.DDL_LOCK = False mylar.DDL_LOCK = False
if os.path.isfile(path): if os.path.isfile(path):
if path.endswith('.zip'):
new_path = os.path.join(mylar.CONFIG.DDL_LOCATION, re.sub('.zip', '', filename).strip())
logger.info('Zip file detected. Unzipping into new modified path location: %s' % new_path)
try:
zip_f = zipfile.ZipFile(path, 'r')
zip_f.extractall(new_path)
zip_f.close()
except Exception as e:
logger.warn('[ERROR: %s] Unable to extract zip file: %s' % (e, new_path))
return ({"success": False,
"filename": filename,
"path": None})
else:
try:
os.remove(path)
except Exception as e:
logger.warn('[ERROR: %s] Unable to remove zip file from %s after extraction.' % (e, path))
filename = None
else:
new_path = path
return ({"success": True, return ({"success": True,
"filename": filename, "filename": filename,
"path": mylar.CONFIG.DDL_LOCATION}) "path": new_path})
def issue_list(self, pack): def issue_list(self, pack):
#packlist = [x.strip() for x in pack.split(',)] #packlist = [x.strip() for x in pack.split(',)]

View File

@ -21,6 +21,7 @@ from datetime import timedelta, date
import subprocess import subprocess
import requests import requests
import shlex import shlex
import Queue
import json import json
import re import re
import sys import sys
@ -37,7 +38,7 @@ from apscheduler.triggers.interval import IntervalTrigger
import mylar import mylar
import logger import logger
from mylar import sabnzbd, nzbget, process, getcomics from mylar import db, sabnzbd, nzbget, process, getcomics
def multikeysort(items, columns): def multikeysort(items, columns):
@ -266,7 +267,7 @@ def decimal_issue(iss):
return deciss, dec_except return deciss, dec_except
def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False): def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
comicid = str(comicid) # it's coming in unicoded... comicid = str(comicid) # it's coming in unicoded...
@ -718,7 +719,7 @@ def ComicSort(comicorder=None, sequence=None, imported=None):
if sequence: if sequence:
# if it's on startup, load the sql into a tuple for use to avoid record-locking # if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0 i = 0
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE") comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
comicorderlist = [] comicorderlist = []
@ -803,7 +804,7 @@ def updateComicLocation():
# - set NEWCOMDIR = new ComicLocation # - set NEWCOMDIR = new ComicLocation
#after running, set ComicLocation to new location in Configuration GUI #after running, set ComicLocation to new location in Configuration GUI
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if mylar.CONFIG.NEWCOM_DIR is not None: if mylar.CONFIG.NEWCOM_DIR is not None:
logger.info('Performing a one-time mass update to Comic Location') logger.info('Performing a one-time mass update to Comic Location')
@ -935,7 +936,7 @@ def cleanhtml(raw_html):
def issuedigits(issnum): def issuedigits(issnum):
import db #import db
int_issnum = None int_issnum = None
@ -1129,7 +1130,7 @@ def issuedigits(issnum):
def checkthepub(ComicID): def checkthepub(ComicID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
publishers = ['marvel', 'dc', 'darkhorse'] publishers = ['marvel', 'dc', 'darkhorse']
pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone() pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
@ -1146,7 +1147,7 @@ def checkthepub(ComicID):
return mylar.CONFIG.INDIE_PUB return mylar.CONFIG.INDIE_PUB
def annual_update(): def annual_update():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
annuallist = myDB.select('SELECT * FROM annuals') annuallist = myDB.select('SELECT * FROM annuals')
if annuallist is None: if annuallist is None:
@ -1202,7 +1203,7 @@ def renamefile_readingorder(readorder):
return readord return readord
def latestdate_fix(): def latestdate_fix():
import db #import db
datefix = [] datefix = []
cnupdate = [] cnupdate = []
myDB = db.DBConnection() myDB = db.DBConnection()
@ -1254,7 +1255,7 @@ def latestdate_fix():
return return
def upgrade_dynamic(): def upgrade_dynamic():
import db #import db
dynamic_comiclist = [] dynamic_comiclist = []
myDB = db.DBConnection() myDB = db.DBConnection()
#update the comicdb to include the Dynamic Names (and any futher changes as required) #update the comicdb to include the Dynamic Names (and any futher changes as required)
@ -1293,7 +1294,6 @@ def upgrade_dynamic():
def checkFolder(folderpath=None): def checkFolder(folderpath=None):
from mylar import PostProcessor from mylar import PostProcessor
import Queue
queue = Queue.Queue() queue = Queue.Queue()
#monitor a selected folder for 'snatched' files that haven't been processed #monitor a selected folder for 'snatched' files that haven't been processed
@ -1339,7 +1339,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid):
return Alternate_Names return Alternate_Names
def havetotals(refreshit=None): def havetotals(refreshit=None):
import db #import db
comics = [] comics = []
myDB = db.DBConnection() myDB = db.DBConnection()
@ -1827,7 +1827,7 @@ def IssueDetails(filelocation, IssueID=None, justinfo=False):
return issuedetails return issuedetails
def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None): def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if IssueID: if IssueID:
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
@ -1859,7 +1859,7 @@ def int_num(s):
return float(s) return float(s)
def listPull(weeknumber, year): def listPull(weeknumber, year):
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
# Get individual comics # Get individual comics
@ -1869,7 +1869,7 @@ def listPull(weeknumber, year):
return library return library
def listLibrary(comicid=None): def listLibrary(comicid=None):
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
if comicid is None: if comicid is None:
@ -1896,7 +1896,7 @@ def listLibrary(comicid=None):
return library return library
def listStoryArcs(): def listStoryArcs():
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
# Get Distinct Arc IDs # Get Distinct Arc IDs
@ -1910,7 +1910,7 @@ def listStoryArcs():
return library return library
def listoneoffs(weeknumber, year): def listoneoffs(weeknumber, year):
import db #import db
library = [] library = []
myDB = db.DBConnection() myDB = db.DBConnection()
# Get Distinct one-off issues from the pullist that have already been downloaded / snatched # Get Distinct one-off issues from the pullist that have already been downloaded / snatched
@ -1926,7 +1926,7 @@ def listoneoffs(weeknumber, year):
return library return library
def manualArc(issueid, reading_order, storyarcid): def manualArc(issueid, reading_order, storyarcid):
import db #import db
if issueid.startswith('4000-'): if issueid.startswith('4000-'):
issueid = issueid[5:] issueid = issueid[5:]
@ -2062,7 +2062,7 @@ def manualArc(issueid, reading_order, storyarcid):
return return
def listIssues(weeknumber, year): def listIssues(weeknumber, year):
import db #import db
library = [] library = []
myDB = db.DBConnection() myDB = db.DBConnection()
# Get individual issues # Get individual issues
@ -2107,7 +2107,7 @@ def listIssues(weeknumber, year):
return library return library
def incr_snatched(ComicID): def incr_snatched(ComicID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone() incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1)) logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
@ -2123,7 +2123,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, r
#storyarcid = the storyarcid of the issue that's being checked for duplication. #storyarcid = the storyarcid of the issue that's being checked for duplication.
#rtnval = the return value of a previous duplicate_filecheck that's re-running against new values #rtnval = the return value of a previous duplicate_filecheck that's re-running against new values
# #
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
logger.info('[DUPECHECK] Duplicate check for ' + filename) logger.info('[DUPECHECK] Duplicate check for ' + filename)
@ -2401,7 +2401,7 @@ def humanize_time(amount, units = 'seconds'):
return buf return buf
def issue_status(IssueID): def issue_status(IssueID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
IssueID = str(IssueID) IssueID = str(IssueID)
@ -2435,7 +2435,7 @@ def crc(filename):
return hashlib.md5(filename).hexdigest() return hashlib.md5(filename).hexdigest()
def issue_find_ids(ComicName, ComicID, pack, IssueNumber): def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
@ -2562,7 +2562,7 @@ def cleanHost(host, protocol = True, ssl = False, username = None, password = No
return host return host
def checkthe_id(comicid=None, up_vals=None): def checkthe_id(comicid=None, up_vals=None):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if not up_vals: if not up_vals:
chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone() chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone()
@ -2593,7 +2593,7 @@ def checkthe_id(comicid=None, up_vals=None):
myDB.upsert("ref32p", newVal, ctrlVal) myDB.upsert("ref32p", newVal, ctrlVal)
def updatearc_locs(storyarcid, issues): def updatearc_locs(storyarcid, issues):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
issuelist = [] issuelist = []
for x in issues: for x in issues:
@ -2683,7 +2683,7 @@ def updatearc_locs(storyarcid, issues):
def spantheyears(storyarcid): def spantheyears(storyarcid):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid]) totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid])
@ -2747,7 +2747,7 @@ def arcformat(arc, spanyears, publisher):
return dstloc return dstloc
def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False): def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
import db #import db
from base64 import b16encode, b32decode from base64 import b16encode, b32decode
#check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent. #check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent.
@ -3009,7 +3009,7 @@ def weekly_info(week=None, year=None, current=None):
return weekinfo return weekinfo
def latestdate_update(): def latestdate_update():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID') ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID')
if ccheck is None or len(ccheck) == 0: if ccheck is None or len(ccheck) == 0:
@ -3031,6 +3031,7 @@ def latestdate_update():
myDB.upsert("comics", newVal, ctrlVal) myDB.upsert("comics", newVal, ctrlVal)
def ddl_downloader(queue): def ddl_downloader(queue):
myDB = db.DBConnection()
while True: while True:
if mylar.DDL_LOCK is True: if mylar.DDL_LOCK is True:
time.sleep(5) time.sleep(5)
@ -3042,19 +3043,37 @@ def ddl_downloader(queue):
logger.info('Cleaning up workers for shutdown') logger.info('Cleaning up workers for shutdown')
break break
#write this to the table so we have a record of what's going on.
ctrlval = {'id': item['id']}
val = {'status': 'Downloading'}
myDB.upsert('ddl_info', val, ctrlval)
ddz = getcomics.GC() ddz = getcomics.GC()
ddzstat = ddz.downloadit(item['link'], item['mainlink']) ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'])
nval = {'status': 'Completed'}
myDB.upsert('ddl_info', nval, ctrlval)
if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]): if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]):
logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename']))
try: try:
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'], if ddzstat['filename'] is None:
'nzb_folder': ddzstat['path'], logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path'])))
'failed': False, mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
'issueid': item['issueid'], 'nzb_folder': ddzstat['path'],
'comicid': item['comicid'], 'failed': False,
'apicall': True, 'issueid': None,
'ddl': True}) 'comicid': item['comicid'],
'apicall': True,
'ddl': True})
else:
logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename']))
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': item['issueid'],
'comicid': item['comicid'],
'apicall': True,
'ddl': True})
except Exception as e: except Exception as e:
logger.info('process error: %s [%s]' %(e, ddzstat)) logger.info('process error: %s [%s]' %(e, ddzstat))
elif mylar.CONFIG.POST_PROCESSING is True: elif mylar.CONFIG.POST_PROCESSING is True:
@ -3323,7 +3342,7 @@ def date_conversion(originaldate):
def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None): def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None):
jobresults = [] jobresults = []
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if job is None: if job is None:
@ -3540,7 +3559,7 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
def stupidchk(): def stupidchk():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'") CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'")
ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'") ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'")
@ -3854,7 +3873,7 @@ def publisherImages(publisher):
return comicpublisher return comicpublisher
def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate): def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
watchlist = listLibrary() watchlist = listLibrary()
matchlist = [] matchlist = []
@ -3894,7 +3913,7 @@ def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
def DateAddedFix(): def DateAddedFix():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
DA_A = datetime.datetime.today() DA_A = datetime.datetime.today()
DateAdded = DA_A.strftime('%Y-%m-%d') DateAdded = DA_A.strftime('%Y-%m-%d')
@ -3905,8 +3924,6 @@ def DateAddedFix():
for an in annuals: for an in annuals:
myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]}) myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]})
def file_ops(path,dst,arc=False,one_off=False): def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename # # path = source path + filename
# # dst = destination path + filename # # dst = destination path + filename
@ -4051,7 +4068,6 @@ def file_ops(path,dst,arc=False,one_off=False):
else: else:
return False return False
from threading import Thread from threading import Thread
class ThreadWithReturnValue(Thread): class ThreadWithReturnValue(Thread):

View File

@ -841,7 +841,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
pack_warning = True pack_warning = True
continue continue
logger.fdebug("checking search result: " + entry['title']) logger.fdebug("checking search result: %s" % entry['title'])
#some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're #some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're
#dealing with the actual raw header which causes incorrect matches below. #dealing with the actual raw header which causes incorrect matches below.
#this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually. #this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually.
@ -938,20 +938,20 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
if entry['title'][:17] != '0-Day Comics Pack': if entry['title'][:17] != '0-Day Comics Pack':
comsize_m = helpers.human_size(comsize_b) comsize_m = helpers.human_size(comsize_b)
logger.fdebug("size given as: " + str(comsize_m)) logger.fdebug('size given as: %s' % comsize_m)
#----size constraints. #----size constraints.
#if it's not within size constaints - dump it now and save some time. #if it's not within size constaints - dump it now and save some time.
if mylar.CONFIG.USE_MINSIZE: if mylar.CONFIG.USE_MINSIZE:
conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M") conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M")
logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b)) logger.fdebug('comparing Min threshold %s .. to .. nzb %s' % (conv_minsize, comsize_b))
if int(conv_minsize) > int(comsize_b): if int(conv_minsize) > int(comsize_b):
logger.fdebug("Failure to meet the Minimum size threshold - skipping") logger.fdebug('Failure to meet the Minimum size threshold - skipping')
continue continue
if mylar.CONFIG.USE_MAXSIZE: if mylar.CONFIG.USE_MAXSIZE:
conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M") conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M")
logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b)) logger.fdebug('comparing Max threshold %s .. to .. nzb %s' % (conv_maxsize, comsize_b))
if int(comsize_b) > int(conv_maxsize): if int(comsize_b) > int(conv_maxsize):
logger.fdebug("Failure to meet the Maximium size threshold - skipping") logger.fdebug('Failure to meet the Maximium size threshold - skipping')
continue continue
#---- date constaints. #---- date constaints.
@ -1014,7 +1014,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) postdate_int = time.mktime(dateconv[:len(dateconv) -1])
except: except:
logger.warn('Unable to parse posting date from provider result set for :' + entry['title']) logger.warn('Unable to parse posting date from provider result set for : %s' % entry['title'])
continue continue
if all([digitaldate != '0000-00-00', digitaldate is not None]): if all([digitaldate != '0000-00-00', digitaldate is not None]):
@ -1068,23 +1068,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.info('dateconv2: %s' % dateconv2.date()) #logger.info('dateconv2: %s' % dateconv2.date())
#logger.info('digconv2: %s' % digconv2.date()) #logger.info('digconv2: %s' % digconv2.date())
if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date(): if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date():
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
elif dateconv2.date() < issconv2.date(): elif dateconv2.date() < issconv2.date():
logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date())) logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date()))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
continue continue
else: else:
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
except: except:
#if the above fails, drop down to the integer compare method as a failsafe. #if the above fails, drop down to the integer compare method as a failsafe.
if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int: if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int:
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
elif postdate_int < issuedate_int: elif postdate_int < issuedate_int:
logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int)) logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
continue continue
else: else:
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
# -- end size constaints. # -- end size constaints.
if '(digital first)' in ComicTitle.lower(): #entry['title'].lower(): if '(digital first)' in ComicTitle.lower(): #entry['title'].lower():
@ -1095,7 +1095,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
thisentry = ComicTitle #entry['title'] thisentry = ComicTitle #entry['title']
logger.fdebug("Entry: " + thisentry) logger.fdebug('Entry: %s' % thisentry)
cleantitle = thisentry cleantitle = thisentry
if 'mixed format' in cleantitle.lower(): if 'mixed format' in cleantitle.lower():
@ -1286,7 +1286,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if all([nzbprov == '32P', allow_packs == True, RSS == 'no']): if all([nzbprov == '32P', allow_packs == True, RSS == 'no']):
logger.fdebug('pack:' + entry['pack']) logger.fdebug('pack:' + entry['pack'])
if all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2']): if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', entry['pack'] is True])): #allow_packs is True
if nzbprov == '32P': if nzbprov == '32P':
if entry['pack'] == '2': if entry['pack'] == '2':
logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.') logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.')
@ -1294,21 +1294,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.') logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.')
else: else:
logger.fdebug('[PACK-QUEUE] Invalid Pack.') logger.fdebug('[PACK-QUEUE] Invalid Pack.')
else:
logger.fdebug('[PACK-QUEUE] DDL Pack detected for %s.' % entry['filename'])
#find the pack range. #find the pack range.
pack_issuelist = None pack_issuelist = None
issueid_info = None issueid_info = None
if not entry['title'].startswith('0-Day Comics Pack'): if not entry['title'].startswith('0-Day Comics Pack'):
pack_issuelist = entry['issues'] pack_issuelist = entry['issues']
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber) issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
if issueid_info['valid'] == True: if issueid_info['valid'] == True:
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.') logger.info('Issue Number %s exists within pack. Continuing.' % IssueNumber)
else: else:
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping') logger.fdebug('Issue Number %s does NOT exist within this pack. Skipping' % IssueNumber)
continue continue
#pack support. #pack support.
nowrite = False nowrite = False
nzbid = generate_id(nzbprov, entry['link']) if all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
nzbid = entry['id']
else:
nzbid = generate_id(nzbprov, entry['link'])
if manual is not True: if manual is not True:
downloadit = True downloadit = True
else: else:
@ -1382,6 +1387,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
nzbid = generate_id(nzbprov, entry['id']) nzbid = generate_id(nzbprov, entry['id'])
elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]): elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
nzbid = entry['id'] nzbid = entry['id']
entry['title'] = entry['filename']
else: else:
nzbid = generate_id(nzbprov, entry['link']) nzbid = generate_id(nzbprov, entry['link'])
if manual is not True: if manual is not True:
@ -1516,9 +1522,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
for isid in issinfo['issues']: for isid in issinfo['issues']:
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff) updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov) updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov)
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov) notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov, True)
#notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov)
else: else:
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov) notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], None, nzbprov, True)
#notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov)
else: else:
if alt_nzbname is None or alt_nzbname == '': if alt_nzbname is None or alt_nzbname == '':
@ -1534,7 +1542,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
cyear = ComicYear cyear = ComicYear
else: else:
cyear = comyear cyear = comyear
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov) #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov)
notify_snatch(ComicName, sent_to, cyear, IssueNumber, nzbprov, False)
prov_count == 0 prov_count == 0
mylar.TMP_PROV = nzbprov mylar.TMP_PROV = nzbprov
@ -2304,8 +2313,8 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
t_hash = None t_hash = None
if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl': if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl':
ggc = getcomics.GC(issueid=IssueID, comicid=ComicID) ggc = getcomics.GC(issueid=IssueID, comicid=ComicID)
sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) sendsite = ggc.loadsite(nzbid, link)
ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) ddl_it = ggc.parse_downloadresults(nzbid, link)
logger.info("ddl status response: %s" % ddl_it) logger.info("ddl status response: %s" % ddl_it)
if ddl_it['success'] is True: if ddl_it['success'] is True:
logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize())) logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize()))
@ -2687,37 +2696,43 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip() if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff) updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff)
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts) #send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov) #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
notify_snatch(sent_to, ComicName, comyear, IssueNumber, nzbprov, False)
mylar.TMP_PROV = nzbprov mylar.TMP_PROV = nzbprov
return return_val return return_val
def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov): #def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov):
def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack):
if IssueNumber is not None: if pack is False:
snline = '%s (%s) #%s snatched!' % (modcomicname, comyear, IssueNumber) snline = 'Issue snatched!'
else: else:
snline = '%s (%s) snatched!' % (modcomicname, comyear) snline = 'Pack snatched!'
if IssueNumber is not None:
snatched_name = '%s (%s) #%s' % (comicname, comyear, IssueNumber)
else:
snatched_name= '%s (%s)' % (comicname, comyear)
if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH: if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification") logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL() prowl = notifiers.PROWL()
prowl.notify(nzbname, "Download started using " + sent_to) prowl.notify(snatched_name, "Download started using " + sent_to)
if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH: if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH:
logger.info(u"Sending NMA notification") logger.info(u"Sending NMA notification")
nma = notifiers.NMA() nma = notifiers.NMA()
nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) nma.notify(snline=snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH: if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH:
logger.info(u"Sending Pushover notification") logger.info(u"Sending Pushover notification")
pushover = notifiers.PUSHOVER() pushover = notifiers.PUSHOVER()
pushover.notify(snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) pushover.notify(snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH: if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH:
logger.info(u"Sending Boxcar notification") logger.info(u"Sending Boxcar notification")
boxcar = notifiers.BOXCAR() boxcar = notifiers.BOXCAR()
boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline) boxcar.notify(snatched_nzb=snatched_name, sent_to=sent_to, snline=snline)
if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH: if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH:
logger.info(u"Sending Pushbullet notification") logger.info(u"Sending Pushbullet notification")
pushbullet = notifiers.PUSHBULLET() pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST') pushbullet.notify(snline=snline, snatched=snatched_name, sent_to=sent_to, prov=nzbprov, method='POST')
if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH: if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH:
logger.info(u"Sending Telegram notification") logger.info(u"Sending Telegram notification")
telegram = notifiers.TELEGRAM() telegram = notifiers.TELEGRAM()
@ -2725,7 +2740,7 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH: if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH:
logger.info(u"Sending Slack notification") logger.info(u"Sending Slack notification")
slack = notifiers.SLACK() slack = notifiers.SLACK()
slack.notify("Snatched", snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) slack.notify("Snatched", snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
return return

View File

@ -1104,6 +1104,8 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
temploc = '1' temploc = '1'
else: else:
temploc = None temploc = None
logger.warn('The filename [%s] does not have a valid issue number, and the Edition of the series is %s. You might need to Forcibly Mark the Series as TPB/GN and try this again.' % (tmpfc['ComicFilename'], rescan['Type']))
return
if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]): if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]):
#remove the extension here #remove the extension here

View File

@ -645,6 +645,8 @@ class WebInterface(object):
seriesYear = cid['SeriesYear'] seriesYear = cid['SeriesYear']
issuePublisher = cid['Publisher'] issuePublisher = cid['Publisher']
seriesVolume = cid['Volume'] seriesVolume = cid['Volume']
bookType = cid['Type']
seriesAliases = cid['Aliases']
if storyarcpublisher is None: if storyarcpublisher is None:
#assume that the arc is the same #assume that the arc is the same
storyarcpublisher = issuePublisher storyarcpublisher = issuePublisher
@ -670,6 +672,8 @@ class WebInterface(object):
"IssuePublisher": issuePublisher, "IssuePublisher": issuePublisher,
"CV_ArcID": arcid, "CV_ArcID": arcid,
"Int_IssueNumber": AD['Int_IssueNumber'], "Int_IssueNumber": AD['Int_IssueNumber'],
"Type": bookType,
"Aliases": seriesAliases,
"Manual": AD['Manual']} "Manual": AD['Manual']}
myDB.upsert("storyarcs", newVals, newCtrl) myDB.upsert("storyarcs", newVals, newCtrl)
@ -2194,6 +2198,41 @@ class WebInterface(object):
annualDelete.exposed = True annualDelete.exposed = True
def queueManage(self): # **args):
myDB = db.DBConnection()
activelist = 'There are currently no items currently downloading via Direct Download (DDL).'
active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone()
if active is not None:
activelist ={'series': active['series'],
'year': active['year'],
'size': active['size'],
'filename': active['filename'],
'status': active['status'],
'id': active['id']}
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'")
if s_info:
resultlist = []
for si in s_info:
issue = si['Issue_Number']
if issue is not None:
issue = '#%s' % issue
resultlist.append({'series': si['ComicName'],
'issue': issue,
'id': si['id'],
'volume': si['ComicVersion'],
'year': si['ComicYear'],
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status']})
logger.info('resultlist: %s' % resultlist)
return serve_template(templatename="queue_management.html", title="Queue Management", activelist=activelist, resultlist=resultlist)
queueManage.exposed = True
def previewRename(self, **args): #comicid=None, comicidlist=None): def previewRename(self, **args): #comicid=None, comicidlist=None):
file_format = mylar.CONFIG.FILE_FORMAT file_format = mylar.CONFIG.FILE_FORMAT
myDB = db.DBConnection() myDB = db.DBConnection()
@ -4104,7 +4143,7 @@ class WebInterface(object):
import random import random
SRID = str(random.randint(100000, 999999)) SRID = str(random.randint(100000, 999999))
logger.info('[IMPORT] Issues found with valid ComicID information for : ' + comicinfo['ComicName'] + ' [' + str(comicinfo['ComicID']) + ']') logger.info('[IMPORT] Issues found with valid ComicID information for : %s [%s]' % (comicinfo['ComicName'], comicinfo['ComicID']))
imported = {'ComicName': comicinfo['ComicName'], imported = {'ComicName': comicinfo['ComicName'],
'DynamicName': comicinfo['DynamicName'], 'DynamicName': comicinfo['DynamicName'],
'Volume': comicinfo['Volume'], 'Volume': comicinfo['Volume'],
@ -4127,7 +4166,7 @@ class WebInterface(object):
# "ComicName": comicinfo['ComicName'], # "ComicName": comicinfo['ComicName'],
# "DynamicName": comicinfo['DynamicName']} # "DynamicName": comicinfo['DynamicName']}
# myDB.upsert("importresults", newVal, ctrlVal) # myDB.upsert("importresults", newVal, ctrlVal)
logger.info('[IMPORT] Successfully verified import sequence data for : ' + comicinfo['ComicName'] + '. Currently adding to your watchlist.') logger.info('[IMPORT] Successfully verified import sequence data for : %s. Currently adding to your watchlist.' % comicinfo['ComicName'])
RemoveIDS.append(comicinfo['ComicID']) RemoveIDS.append(comicinfo['ComicID'])
#we need to remove these items from the comiclist now, so they don't get processed again #we need to remove these items from the comiclist now, so they don't get processed again
@ -4200,9 +4239,10 @@ class WebInterface(object):
else: else:
raise cherrypy.HTTPRedirect("importResults") raise cherrypy.HTTPRedirect("importResults")
else: else:
comicstoIMP.append(result['ComicLocation'])#.decode(mylar.SYS_ENCODING, 'replace')) #logger.fdebug('result: %s' % result)
comicstoIMP.append(result['ComicLocation']) #.decode(mylar.SYS_ENCODING, 'replace'))
getiss = result['IssueNumber'] getiss = result['IssueNumber']
#logger.info('getiss:' + getiss) #logger.fdebug('getiss: %s' % getiss)
if 'annual' in getiss.lower(): if 'annual' in getiss.lower():
tmpiss = re.sub('[^0-9]','', getiss).strip() tmpiss = re.sub('[^0-9]','', getiss).strip()
if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4: if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4:
@ -4217,10 +4257,10 @@ class WebInterface(object):
miniss_num = helpers.issuedigits(minISSUE) miniss_num = helpers.issuedigits(minISSUE)
startiss_num = helpers.issuedigits(startISSUE) startiss_num = helpers.issuedigits(startISSUE)
if int(getiss_num) > int(miniss_num): if int(getiss_num) > int(miniss_num):
#logger.fdebug('Minimum issue now set to : ' + getiss + ' - it was : ' + minISSUE) logger.fdebug('Minimum issue now set to : %s - it was %s' % (getiss, minISSUE))
minISSUE = getiss minISSUE = getiss
if int(getiss_num) < int(startiss_num): if int(getiss_num) < int(startiss_num):
#logger.fdebug('Start issue now set to : ' + getiss + ' - it was : ' + startISSUE) logger.fdebug('Start issue now set to : %s - it was %s' % (getiss, startISSUE))
startISSUE = str(getiss) startISSUE = str(getiss)
if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start. if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start.
startyear = result['ComicYear'] startyear = result['ComicYear']