IMP: When comic location is changed for a given series, will rename the directory if possible as opposed to creating a new directory, IMP:(#1920) Ability to directly input a 32p series ID for more accurate search results via the series details page/edit tab, IMP: Better error messages regarding: no CV API key being set when searching, and no Comic Location being set when trying to add a series, FIX: When using test SABnzbd button, if SABnzbd was not enabled previously (meaning config saved after the option selected), the test would return a fail status, FIX: When post-processing annuals, would fail to update the correct table resulting in a null table entry that would cause further problems, IMP: Added 1 day grace to the date check when comparing search results to allow for some leniency with regards to timezone differences, FIX: Fixed slight display issue when 32p provider was enabled, but torrents were turned off (pack option was being shown in series page when it shouldn't have been)

This commit is contained in:
evilhero 2018-04-26 11:27:51 -04:00
parent 37cd7346bc
commit 0c25634684
7 changed files with 159 additions and 105 deletions

View File

@ -220,10 +220,14 @@
<input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="0" ${comicConfig['fuzzy_year0']} /> Default&nbsp;<input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="1" ${comicConfig['fuzzy_year1']} /> Year Removal&nbsp;<input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="2" ${comicConfig['fuzzy_year2']} /> Fuzzy the Year
</div>
%if mylar.CONFIG.ENABLE_32P and mylar.CONFIG.MODE_32P == 1:
%if all([mylar.CONFIG.ENABLE_32P is True, mylar.CONFIG.ENABLE_TORRENT_SEARCH is True, mylar.CONFIG.MODE_32P == 1]):
<div class="row checkbox right clearfix">
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="allow_packs" value="1" ${comicConfig['allow_packs']} /><label>Enable Pack Downloads<a href="#" title="Will allow downloading of multiple issues in one file (packs), but will search individual issues first"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a></label>
</div>
<div class="row">
<label>Manual specify series ID for 32p</label>
<input type="text" name="torrentid_32p" placeholder="torrent id #" value="${comicConfig['torrentid_32p']}" size="40">
</div>
%endif
</fieldset>
<input type="submit" style="float:right;" value="Update"/>

View File

@ -412,6 +412,7 @@ def start():
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
#initiate startup rss feeds for torrents/nzbs here...
rs = rsscheckit.tehMain()
if CONFIG.ENABLE_RSS:
logger.info('[RSS-FEEDS] Initiating startup-RSS feed checks.')
if SCHED_RSS_LAST is not None:
@ -419,7 +420,6 @@ def start():
logger.info('[RSS-FEEDS] RSS last run @ %s' % datetime.datetime.utcfromtimestamp(rss_timestamp))
else:
rss_timestamp = helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) *60)
rs = rsscheckit.tehMain()
duration_diff = (helpers.utctimestamp() - rss_timestamp)/60
if duration_diff >= int(CONFIG.RSS_CHECKINTERVAL):
SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
@ -427,6 +427,9 @@ def start():
rss_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) * 60) - (duration_diff * 60))
logger.fdebug('[RSS-FEEDS] Scheduling next run for @ %s every %s minutes' % (rss_diff, CONFIG.RSS_CHECKINTERVAL))
SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=rss_diff, trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
#else:
# SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
# SCHED.pause_job('rss')
if CONFIG.CHECK_GITHUB:
vs = versioncheckit.CheckVersion()
@ -468,7 +471,7 @@ def dbcheck():
except sqlite3.OperationalError:
logger.warn('Unable to update readinglist table to new storyarc table format.')
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT, TorrentID_32P TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT, Hash TEXT, crc TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
@ -589,6 +592,11 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE comics ADD COLUMN Corrected_SeriesYear TEXT')
try:
c.execute('SELECT TorrentID_32P from comics')
except sqlite3.OperationalError:
c.execute('ALTER TABLE comics ADD COLUMN TorrentID_32P TEXT')
try:
c.execute('SELECT DynamicComicName from comics')
if CONFIG.DYNAMIC_UPDATE < 3:

View File

@ -170,23 +170,28 @@ class info32p(object):
def searchit(self):
chk_id = None
#logger.info('searchterm: %s' % self.searchterm)
series_search = self.searchterm['series']
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
series_search = self.searchterm['series']
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
if series_search.startswith('0-Day Comics Pack'):
#issue = '21' = WED, #volume='2' = 2nd month
torrentid = 22247 #2018
issue_search = self.searchterm['issue'] #'21' #Wed
volume_search = self.searchterm['volume'] #'2' #2nd month
publisher_search = None #'2' #2nd month
comic_id = None
elif self.searchterm['torrentid_32p'] is not None:
torrentid = self.searchterm['torrentid_32p']
comic_id = self.searchterm['id']
publisher_search = self.searchterm['publisher']
else:
torrentid = None
comic_id = self.searchterm['id']
annualize = False
if 'annual' in series_search.lower():
series_search = re.sub(' annual', '', series_search.lower()).strip()
annualize = True
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
publisher_search = self.searchterm['publisher']
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
@ -250,7 +255,7 @@ class info32p(object):
pdata = []
pubmatch = False
if series_search.startswith('0-Day Comics Pack'):
if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]):
data.append({"id": torrentid,
"series": series_search})
else:
@ -308,11 +313,14 @@ class info32p(object):
dataset += pdata
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack')]) and any([len(data) == 1, len(pdata) == 1]):
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None]) and any([len(data) == 1, len(pdata) == 1]):
#update the 32p_reference so we avoid doing a url lookup next time
helpers.checkthe_id(comic_id, dataset)
else:
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None]):
pass
else:
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
results32p = []
resultlist = {}

View File

@ -224,12 +224,20 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
'$VolumeN': comicVol.upper(),
'$Annual': 'Annual'
}
if mylar.CONFIG.FOLDER_FORMAT == '':
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")")
else:
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
try:
if mylar.CONFIG.FOLDER_FORMAT == '':
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")")
else:
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
except Exception as e:
if 'TypeError' in e:
if mylar.CONFIG.DESTINATION_DIR is None:
logger.error('[ERROR] %s' % e)
logger.error('No Comic Location specified. This NEEDS to be set before anything can be added successfully.')
return
logger.error('[ERROR] %s' % e)
logger.error('Cannot determine Comic Location path properly. Check your Comic Location and Folder Format for any errors.')
return
#comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
if mylar.CONFIG.DESTINATION_DIR == "":
@ -269,7 +277,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
ComicImage = helpers.replacetheslash(PRComicImage)
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.CONFIG.COMIC_COVER_LOCAL and os.path.isdir(comlocation):
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]):
try:
comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)

View File

@ -38,7 +38,7 @@ from base64 import b16encode, b32decode
from operator import itemgetter
from wsgiref.handlers import format_date_time
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False):
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False, torrentid_32p=None):
mylar.COMICINFO = []
unaltered_ComicName = None
@ -331,7 +331,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
break
else:
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host)
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p)
if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]):
checked_once = True
if findit['status'] is False:
@ -343,7 +343,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host)
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p)
if findit['status'] is True:
break
if findit['status'] is True:
@ -403,7 +403,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
return findit, 'None'
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None):
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None, torrentid_32p=None):
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0, allow_packs == '0']) and all([mylar.CONFIG.ENABLE_TORRENT_SEARCH, mylar.CONFIG.ENABLE_32P]):
allow_packs = False
@ -597,11 +597,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == '':
bb = "no results"
if nzbprov == '32P':
if all([mylar.CONFIG.MODE_32P == 1,mylar.CONFIG.ENABLE_32P]):
if all([mylar.CONFIG.MODE_32P == 1, mylar.CONFIG.ENABLE_32P is True]):
if ComicName[:17] == '0-Day Comics Pack':
searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7]}
else:
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p}
#first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm)
@ -954,7 +954,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
continue
#convert it to a Thu, 06 Feb 2014 00:00:00 format
issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
issue_converted = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
issue_convert = issue_converted + datetime.timedelta(days=-1)
# to get past different locale's os-dependent dates, let's convert it to a generic datetime format
try:
stamp = time.mktime(issue_convert.timetuple())
@ -980,6 +981,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
try:
#try new method to get around issues populating in a diff timezone thereby putting them in a different day.
if dateconv2.date() < econv2.date():
logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), econv2.date()))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
continue
else:
@ -987,6 +989,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
except:
#if the above fails, drop down to the integer compare method as a failsafe.
if postdate_int < issuedate_int:
logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
continue
else:
@ -1986,6 +1989,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
AlternateSearch = None
UseFuzzy = None
ComicVersion = comic['Volume']
TorrentID_32p = None
else:
Comicname_filesafe = comic['ComicName_Filesafe']
SeriesYear = comic['ComicYear']
@ -1993,6 +1997,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
AlternateSearch = comic['AlternateSearch']
UseFuzzy = comic['UseFuzzy']
ComicVersion = comic['ComicVersion']
TorrentID_32p = comic['TorrentID_32P']
if any([comic['AllowPacks'] == 1, comic['AllowPacks'] == '1']):
AllowPacks = True
@ -2005,7 +2010,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
ComicYear = str(result['IssueDate'])[:4]
mode = result['mode']
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff)
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff, torrentid_32p=TorrentID_32p)
if foundNZB['status'] is True:
#logger.info(foundNZB)
updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov, SARC=result['SARC'], IssueArcID=result['IssueArcID'], hash=foundNZB['info']['t_hash'])
@ -2045,7 +2050,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
SARC = result['StoryArc']
IssueArcID = issueid
actissueid = None
TorrentID_32p = None
else:
comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
Comicname_filesafe = comic['ComicName_Filesafe']
@ -2059,6 +2064,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
SARC = None
IssueArcID = None
actissueid = issueid
TorrentID_32p = comic['TorrentID_32P']
if any([comic['AllowPacks'] == 1, comic['AllowPacks'] == '1']):
allow_packs = True
@ -2070,7 +2076,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
else:
IssueYear = str(result['IssueDate'])[:4]
foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual)
foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual, torrentid_32p=TorrentID_32p)
if manual is True:
return foundNZB
if foundNZB['status'] is True:
@ -2111,6 +2117,7 @@ def searchIssueIDList(issuelist):
Publisher = comic['ComicPublisher']
UseFuzzy = comic['UseFuzzy']
ComicVersion = comic['ComicVersion']
TorrentID_32p = comic['TorrentID_32P']
if issue['IssueDate'] == None:
IssueYear = comic['ComicYear']
else:
@ -2120,7 +2127,7 @@ def searchIssueIDList(issuelist):
else:
AllowPacks = False
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
if foundNZB['status'] is True:
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash'])
logger.info('Completed search request.')

View File

@ -1556,6 +1556,7 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
if module is None:
module = '[FILE-RESCAN]'
myDB = db.DBConnection()
filetable = 'issues'
if any([havefiles is None, havefiles == '+1']):
if havefiles is None:
hf = myDB.selectone("SELECT Have, Total FROM comics WHERE ComicID=?", [ComicID]).fetchone()
@ -1565,6 +1566,7 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
hf = myDB.selectone("SELECT a.Have, a.Total, b.Status as IssStatus FROM comics AS a INNER JOIN issues as b ON a.ComicID=b.ComicID WHERE b.IssueID=?", [issueid]).fetchone()
if hf is None:
hf = myDB.selectone("SELECT a.Have, a.Total, b.Status as IssStatus FROM comics AS a INNER JOIN annuals as b ON a.ComicID=b.ComicID WHERE b.IssueID=?", [issueid]).fetchone()
filetable = 'annuals'
totalfiles = int(hf['Total'])
logger.fdebug('totalfiles: %s' % totalfiles)
logger.fdebug('status: %s' % hf['IssStatus'])
@ -1585,4 +1587,4 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
controlValueStat = {"IssueID": issueid,
"ComicID": ComicID}
newValueStat = {"ComicSize": os.path.getsize(file)}
myDB.upsert("issues", newValueStat, controlValueStat)
myDB.upsert(filetable, newValueStat, controlValueStat)

View File

@ -187,6 +187,7 @@ class WebInterface(object):
"delete_dir": helpers.checked(mylar.CONFIG.DELETE_REMOVE_DIR),
"allow_packs": helpers.checked(int(allowpacks)),
"corrected_seriesyear": comic['ComicYear'],
"torrentid_32p": comic['TorrentID_32P'],
"totalissues": totalissues,
"haveissues": haveissues,
"percent": percent,
@ -285,7 +286,13 @@ class WebInterface(object):
logger.error('Unable to perform required story-arc search for : [arc: ' + name + '][mode: ' + mode + ']')
return
searchresults = sorted(searchresults, key=itemgetter('comicyear', 'issues'), reverse=True)
try:
searchresults = sorted(searchresults, key=itemgetter('comicyear', 'issues'), reverse=True)
except Exception as e:
logger.error('Unable to retrieve results from ComicVine: %s' % e)
if mylar.COMICVINE_API is None:
logger.error('You NEED to set a ComicVine API key prior to adding anything. It\'s Free - Go get one!')
return
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type, imported=None, ogcname=None, name=name, serinfo=serinfo)
searchit.exposed = True
@ -1371,6 +1378,7 @@ class WebInterface(object):
AllowPacks= cdname['AllowPacks']
ComicVersion = cdname['ComicVersion']
ComicName = cdname['ComicName']
TorrentID_32p = cdname['TorrentID_32P']
controlValueDict = {"IssueID": IssueID}
newStatus = {"Status": "Wanted"}
if mode == 'want':
@ -1416,7 +1424,7 @@ class WebInterface(object):
#Publisher = miy['ComicPublisher']
#UseAFuzzy = miy['UseFuzzy']
#ComicVersion = miy['ComicVersion']
foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks)
foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
if foundcom['status'] is True:
# file check to see if issue exists and update 'have' count
if IssueID is not None:
@ -4670,8 +4678,13 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
manual_annual_add.exposed = True
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None):
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None, torrentid_32p=None):
myDB = db.DBConnection()
chk1 = myDB.selectone('SELECT ComicLocation FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if chk1 is None:
orig_location = com_location
else:
orig_location = chk1['ComicLocation']
#--- this is for multiple search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
ffs_alt = []
@ -4732,22 +4745,31 @@ class WebInterface(object):
else:
newValues['AllowPacks'] = 1
newValues['TorrentID_32P'] = torrentid_32p
if alt_filename is None or alt_filename == 'None':
newValues['AlternateFileName'] = "None"
else:
newValues['AlternateFileName'] = str(alt_filename)
#force the check/creation of directory com_location here
if mylar.CONFIG.CREATE_FOLDERS is True:
if any([mylar.CONFIG.CREATE_FOLDERS is True, os.path.isdir(orig_location)]):
if os.path.isdir(str(com_location)):
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
if orig_location != com_location:
logger.fdebug('Renaming existing location [%s] to new location: %s' % (orig_location, com_location))
try:
os.rename(orig_location, com_location)
except Exception as e:
logger.warn('Unable to rename existing directory: %s' % e)
return
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
myDB.upsert("comics", newValues, controlValueDict)
logger.fdebug('Updated Series options!')
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
@ -4900,82 +4922,77 @@ class WebInterface(object):
if sabapikey is None:
sabapikey = mylar.CONFIG.SAB_APIKEY
logger.fdebug('Now attempting to test SABnzbd connection')
if mylar.USE_SABNZBD:
#if user/pass given, we can auto-fill the API ;)
if sabusername is None or sabpassword is None:
logger.error('No Username / Password provided for SABnzbd credentials. Unable to test API key')
return "Invalid Username/Password provided"
logger.fdebug('testing connection to SABnzbd @ ' + sabhost)
if sabhost.endswith('/'):
sabhost = sabhost
else:
sabhost = sabhost + '/'
#if user/pass given, we can auto-fill the API ;)
if sabusername is None or sabpassword is None:
logger.error('No Username / Password provided for SABnzbd credentials. Unable to test API key')
return "Invalid Username/Password provided"
logger.fdebug('testing connection to SABnzbd @ ' + sabhost)
if sabhost.endswith('/'):
sabhost = sabhost
else:
sabhost = sabhost + '/'
querysab = sabhost + 'api'
payload = {'mode': 'get_config',
'section': 'misc',
'output': 'json',
'keyword': 'api_key',
'apikey': sabapikey}
querysab = sabhost + 'api'
payload = {'mode': 'get_config',
'section': 'misc',
'output': 'json',
'keyword': 'api_key',
'apikey': sabapikey}
if sabhost.startswith('https'):
verify = True
else:
verify = False
try:
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (querysab, e))
if requests.exceptions.SSLError:
logger.warn('Cannot verify ssl certificate. Attempting to authenticate with no ssl-certificate verification.')
try:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
if sabhost.startswith('https'):
verify = True
else:
verify = False
try:
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (querysab, e))
if requests.exceptions.SSLError:
logger.warn('Cannot verify ssl certificate. Attempting to authenticate with no ssl-certificate verification.')
try:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
verify = False
try:
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
return 'Unable to retrieve data from SABnzbd'
else:
try:
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
return 'Unable to retrieve data from SABnzbd'
logger.info('status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
data = False
else:
data = r.json()
return 'Unable to retrieve data from SABnzbd'
try:
q_apikey = data['config']['misc']['api_key']
except:
logger.error('Error detected attempting to retrieve SAB data using FULL APIKey')
if all([sabusername is not None, sabpassword is not None]):
try:
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
q_apikey = sp.sab_get()
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
if q_apikey is None:
return "Invalid APIKey provided"
mylar.CONFIG.SAB_APIKEY = q_apikey
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
logger.info('status code: ' + str(r.status_code))
logger.info('Connection to SABnzbd tested sucessfully')
return "Successfully verified APIkey"
if str(r.status_code) != '200':
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
data = False
else:
logger.error('You do not have anything stated for SAB Host. Please correct and try again.')
return "Invalid SABnzbd host specified"
data = r.json()
try:
q_apikey = data['config']['misc']['api_key']
except:
logger.error('Error detected attempting to retrieve SAB data using FULL APIKey')
if all([sabusername is not None, sabpassword is not None]):
try:
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
q_apikey = sp.sab_get()
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
if q_apikey is None:
return "Invalid APIKey provided"
mylar.CONFIG.SAB_APIKEY = q_apikey
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
logger.info('Connection to SABnzbd tested sucessfully')
return "Successfully verified APIkey"
SABtest.exposed = True
def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None):