Merge branch 'development'

This commit is contained in:
evilhero 2019-03-10 17:57:12 -04:00
commit 2dd10e20ee
24 changed files with 1246 additions and 723 deletions

1
.gitignore vendored
View File

@ -11,3 +11,4 @@ Thumbs.db
ehtumbs.db
Thumbs.db
lib/comictaggerlib/ct_settings/
settings.json

View File

@ -58,11 +58,8 @@
but you can contribute and support the development</br>
by buying me a coffee (or several)</strong></label></br></br>
</div>
<div style="width: 60%; margin: 0px auto;">
<div style="width: 60%; margin: 0px auto;" align="center">
<a id="navDonate" href="https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&amp;hosted_button_id=EWQADB5AMVRFU" rel="noreferrer" onclick="window.open('http://dereferer.org/?' + this.href); return false;"><img src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif" alt="[donate]" /></a>
<a href="https://flattr.com/submit/auto?user_id=evilhero&url=https://github.com/evilhero/mylar&title=Mylar%20Donation%description=Supporting&20the%development%20of%20Mylar&language=en_CA&hidden=1&category=software" target="_blank">
<img src="//api.flattr.com/button/flattr-badge-large.png" alt="Flattr this" title="Flattr this" border="0" align="center">
</a>
<a href="#" onclick="javascript:window.prompt('Please copy/paste my Bitcoin address into your Bitcoin client.', '18eCE9wZxnNiZgE4Cc5pwJMnMjEfhdmH4U');"><img src="interfaces/default/images/bitcoin.png" alt="Bitcoin" height="20" align="center"></a></div>
</div>
</fieldset>
@ -584,14 +581,12 @@
<div class="row">
<label>rTorrent Host:port(optional)</label>
<input type="text" id="rtorrent_host" name="rtorrent_host" value="${config['rtorrent_host']}" size="30">
<small>ie. my.rtorrent:80, 192.168.1.1, scgi://localhost:5000</small>
</div>
<div class="row checkbox left clearfix">
<input id="rtorrent_ssl" type="checkbox" onclick="initConfigCheckbox($this));" name="rtorrent_ssl" value="1" ${config['rtorrent_ssl']} /><label>SSL</label>
<small>ie. https://my.rtorrent, http://192.168.1.1, http://localhost:80, scgi://localhost:5000</small>
</div>
<div class="config">
<div class="row checkbox left clearfix">
<input id="rtorrent_verify" type="checkbox" name="rtorrent_verify" value="1" ${config['rtorrent_verify']} /><label>Verify SSL</label>
<small>(only optionally used for https connections)</small>
</div>
</div>
<div class="row">
@ -839,16 +834,16 @@
</div>
<div class="row">
<label>&nbspUserName</label>
<input type="text" name="username_32p" value="${config['username_32p']}" size="36">
<input type="text" name="username_32p" id="username_32p" value="${config['username_32p']}" size="36">
</div>
<div class="row">
<label>&nbspPassword</label>
<input type="password" name="password_32p" value="${config['password_32p']| h}" size="36">
<input type="password" name="password_32p" id="password_32p" value="${config['password_32p']| h}" size="36">
<small>( monitor the NEW releases feed & your personal notifications )</small>
</div>
<div align="center" class="row">
<img name="test32p_statusicon" id="test32p_statusicon" src="interfaces/default/images/success.png" style="float:right;visibility:hidden;" height="20" width="20" />
<input type="button" value="Test Connection" id="test_32p" style="float:center" /></br>
<input type="button" value="Test Connection" id="test32p" style="float:center" /></br>
<input type="text" name="status32p" style="text-align:center; font-size:11px;" id="status32p" size="50" DISABLED />
</div>
<div name="inkdrops32p" id="inkdrops32p" style="font-size:11px;" align="center">
@ -1982,9 +1977,12 @@
function numberWithDecimals(x) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ".");
};
$("#test_32p").click(function(){
$("#test32p").click(function(){
var imagechk = document.getElementById("test32p_statusicon");
$.get('test_32p',
var user32p = document.getElementById("username_32p").value;
var pass32p = document.getElementById("password_32p").value;
$.get("test_32p",
{ username: user32p, password: pass32p },
function(data){
if (data.error != undefined) {
alert(data.error);
@ -2200,10 +2198,9 @@
var password = document.getElementById("rtorrent_password").value;
var auth = document.getElementById("rtorrent_authentication").value;
var verify = document.getElementById("rtorrent_verify").value;
var ssl = document.getElementById("rtorrent_ssl").value;
var rpc_url = document.getElementById("rtorrent_rpc_url").value;
$.get("testrtorrent",
{ host: host, username: username, password: password, auth: auth, verify: verify, ssl: ssl, rpc_url: rpc_url },
{ host: host, username: username, password: password, auth: auth, verify: verify, rpc_url: rpc_url },
function(data){
if (data.error != undefined) {
alert(data.error);
@ -2480,7 +2477,6 @@
initConfigCheckbox("#enforce_perms");
initConfigCheckbox("#enable_api");
initConfigCheckbox("#sab_to_mylar");
initConfigCheckbox("#rtorrent_ssl");
initConfigCheckbox("#usenewznab");
initConfigCheckbox("#enable_torznab");
initConfigCheckbox("#usenzbsu");

View File

@ -208,6 +208,7 @@
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Now searching for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Grab</a>
%elif item['Status'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to search again for issue"><span class="ui-icon ui-icon-plus"></span>Retry</a>
<a href="#" onclick="doAjaxCall('clear_arcstatus?issuearcid=${item['IssueArcID']}',$(this),'table')"><data success="Clearing status of ${item['Status']} for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Clear Status</a>
%elif item['Status'] == 'Downloaded' and item['Location'] is not None:
<a href="downloadthis?pathfile=${item['Location'] |u}"><span class="ui-icon ui-icon-plus"></span>Download</a>
%endif
@ -320,6 +321,7 @@
"iDisplayLength": 25,
"sPaginationType": "full_numbers",
"stateDuration": 0,
"stateSave": true,
"aaSorting": []
})
resetFilters("item");

View File

@ -202,6 +202,7 @@
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Now searching for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Grab</a>
%elif item['Status'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to search again for issue"><span class="ui-icon ui-icon-plus"></span>Retry</a>
<a href="#" onclick="doAjaxCall('clear_arcstatus?issuearcid=${item['IssueArcID']}',$(this),'table')"><data success="Clearing status of ${item['Status']} for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Clear Status</a>
%elif item['Status'] == 'Downloaded' and item['Location'] is not None:
<a href="downloadthis?pathfile=${item['Location'] |u}"><span class="ui-icon ui-icon-plus"></span>Download</a>
%endif

View File

@ -143,7 +143,7 @@
<td class="options">
%if weekly['HAVEIT'] == 'OneOff':
%if weekly['STATUS'] == 'Snatched' or weekly['STATUS'] == 'Downloaded':
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="Snatch issue again as a One-Off">
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}&BookType=${weekly['FORMAT']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="Snatch issue again as a One-Off">
%if mylar.CONFIG.SHOW_ICONS:
<img style="margin: 0px 5px" src="interfaces/default/images/retry.png" height="25" width="25" class="highqual" />
%else:
@ -193,7 +193,7 @@
<% dl = False %>
%endif
%if weekly['HAVEIT'] == 'No' and weekly['STATUS'] == 'Skipped':
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="One off download">
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}&BookType=${weekly['FORMAT']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="One off download">
%if mylar.CONFIG.SHOW_ICONS:
<img style="margin: 0px 5px" src="interfaces/default/images/search.png" height="25" width="25" class="highqual" />
%else:
@ -233,7 +233,7 @@
%endif
</a>
%elif weekly['STATUS'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="Snatch issue again">
<a href="#" onclick="doAjaxCall('queueit?ComicName=${weekly['COMIC'] | u}&ComicID=${weekly['COMICID']}&IssueID=${weekly['ISSUEID']}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}&pullinfo=${weekinfo['midweek']}&pullweek=${weekinfo['weeknumber']}&pullyear=${weekinfo['year']}&BookType=${weekly['FORMAT']}',$(this),'table')" data-success="Successfully submitted search request for ${weekly['COMIC']} #${weekly['ISSUE']}" title="Snatch issue again">
%if mylar.CONFIG.SHOW_ICONS:
<img style="margin: 0px 5px" src="interfaces/default/images/retry.png" height="25" width="25" class="highqual" />
%else:

View File

@ -209,13 +209,21 @@ class PostProcessor(object):
if mylar.CONFIG.FILE_OPTS == 'move':
#check to make sure duplicate_dump directory exists:
checkdirectory = filechecker.validateAndCreateDirectory(mylar.CONFIG.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]')
if mylar.CONFIG.DUPLICATE_DATED_FOLDERS is True:
todaydate = datetime.datetime.now().strftime("%Y-%m-%d")
dump_folder = os.path.join(mylar.CONFIG.DUPLICATE_DUMP, todaydate)
checkdirectory = filechecker.validateAndCreateDirectory(dump_folder, True, module='[DUPLICATE-DATED CLEANUP]')
else:
dump_folder = mylar.CONFIG.DUPLICATE_DUMP
try:
shutil.move(path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move))
shutil.move(path_to_move, os.path.join(dump_folder, file_to_move))
except (OSError, IOError):
logger.warn('[DUPLICATE-CLEANUP] Failed to move %s ... to ... %s' % (path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move)))
logger.warn('[DUPLICATE-CLEANUP] Failed to move %s ... to ... %s' % (path_to_move, os.path.join(dump_folder, file_to_move)))
return False
logger.warn('[DUPLICATE-CLEANUP] Successfully moved %s ... to ... %s' % (path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move)))
logger.warn('[DUPLICATE-CLEANUP] Successfully moved %s ... to ... %s' % (path_to_move, os.path.join(dump_folder, file_to_move)))
return True
def tidyup(self, odir=None, del_nzbdir=False, sub_path=None, cacheonly=False, filename=None):
@ -525,9 +533,9 @@ class PostProcessor(object):
loopchk.append(re.sub('[\|\s]', '', orig_seriesname.lower()))
tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk)))
comicseries = myDB.select(tmpsql, tuple(loopchk))
if not comicseries:
logger.error('%s No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?' % module)
break
#if not comicseries:
# logger.error('[%s][%s] No Series named %s - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?' % (module, fl['comicfilename'], fl['series_name']))
# continue
watchvals = []
for wv in comicseries:
logger.info('Now checking: %s [%s]' % (wv['ComicName'], wv['ComicID']))
@ -773,13 +781,12 @@ class PostProcessor(object):
else:
logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s' % (module, watch_values['SeriesYear'], tmp_watchmatch_vol))
datematch = "False"
if len(watchvals) > 1 and int(tmp_watchmatch_vol) > 1:
elif len(watchvals) > 1 and int(tmp_watchmatch_vol) >= 1:
if int(tmp_watchmatch_vol) == int(tmp_watchlist_vol):
logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (module, watch_values['ComicVersion'], watchmatch['series_volume']))
else:
logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s' % (module, watch_values['ComicVersion'], watchmatch['series_volume']))
continue
#datematch = "False"
datematch = "False"
else:
if any([tmp_watchlist_vol is None, tmp_watchlist_vol == 'None', tmp_watchlist_vol == '']):
logger.fdebug('%s[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.' % module)
@ -873,32 +880,6 @@ class PostProcessor(object):
elif self.matched is True:
logger.warn('%s[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list.' % (module, cs['ComicName'], cs['ComicID']))
#mlp = []
#xmld = filechecker.FileChecker()
#if len(manual_list) > 1:
# #in case the manual pp matches on more than one series in the watchlist, drop back down to exact name matching to see if we can narrow
# #the matches down further to the point where there's only one exact match. Not being able to match specifically when there is more than
# #one item in the manual list that's matched to the same file will result in a dupe_src error and/or mistakingly PP'ing against the
# #wrong series.
# for x in manual_list:
# xmld1 = xmld.dynamic_replace(helpers.conversion(x['ComicName']))
# xseries = xmld1['mod_seriesname'].lower()
# xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
# xfile = xmld2['mod_seriesname'].lower()
# #logger.info('[xseries:%s][xfile:%s]' % (xseries,xfile))
# if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
# logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, x['ComicName'], x['ComicID']))
# mlp.append(x)
# else:
# pass
# if len(mlp) == 1:
# manual_list = mlp
# logger.fdebug('%s[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series' % module)
# else:
# logger.warn('%s[CONFIRMATION-PROBLEM] Unable to determine proper match for series as more than one successful match came up.' % module)
#we should setup for manual post-processing of story-arc issues here
#we can also search by ComicID to just grab those particular arcs as an alternative as well (not done)
@ -1141,12 +1122,12 @@ class PostProcessor(object):
else:
logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s' % (module, arc_values['SeriesYear'], tmp_arcmatch_vol))
datematch = "False"
if len(arcvals) > 1 and int(tmp_arcmatch_vol) > 1:
if len(arcvals) > 1 and int(tmp_arcmatch_vol) >= 1:
if int(tmp_arcmatch_vol) == int(tmp_arclist_vol):
logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (module, arc_values['ComicVersion'], arcmatch['series_volume']))
else:
logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s' % (module, arc_values['ComicVersion'], arcmatch['series_volume']))
continue
datematch = "False"
else:
if any([tmp_arclist_vol is None, tmp_arclist_vol == 'None', tmp_arclist_vol == '']):
logger.fdebug('%s[ARC ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.' % module)
@ -1315,7 +1296,7 @@ class PostProcessor(object):
logger.fdebug('%s There are %s files found that match on your watchlist, %s files are considered one-off\'s, and %s files do not match anything' % (module, len(manual_list), len(oneoff_issuelist), int(filelist['comiccount']) - len(manual_list)))
delete_arc = []
if len(manual_arclist) > 0:
if len(manual_arclist) > 0: # and mylar.CONFIG.copy2arcdir is True:
logger.info('[STORY-ARC MANUAL POST-PROCESSING] I have found %s issues that belong to Story Arcs. Flinging them into the correct directories.' % len(manual_arclist))
for ml in manual_arclist:
issueid = ml['IssueID']
@ -1323,6 +1304,7 @@ class PostProcessor(object):
logger.info('[STORY-ARC POST-PROCESSING] Enabled for %s' % ml['StoryArc'])
grdst = helpers.arcformat(ml['StoryArc'], helpers.spantheyears(ml['StoryArcID']), ml['Publisher'])
logger.info('grdst: %s' % grdst)
#tag the meta.
metaresponse = None
@ -1436,55 +1418,60 @@ class PostProcessor(object):
if (all([self.nzb_name != 'Manual Run', self.apicall is False]) or (self.oneoffinlist is True or all([self.issuearcid is not None, self.issueid is None]))) and not self.nzb_name.startswith('0-Day'): # and all([self.issueid is None, self.comicid is None, self.apicall is False]):
ppinfo = []
if self.oneoffinlist is False:
nzbname = self.nzb_name
#remove extensions from nzb_name if they somehow got through (Experimental most likely)
if nzbname.lower().endswith(self.extensions):
fd, ext = os.path.splitext(nzbname)
self._log("Removed extension from nzb: " + ext)
nzbname = re.sub(str(ext), '', str(nzbname))
#replace spaces
# let's change all space to decimals for simplicity
logger.fdebug('[NZBNAME]: ' + nzbname)
#gotta replace & or escape it
nzbname = re.sub("\&", 'and', nzbname)
nzbname = re.sub('[\,\:\?\'\+]', '', nzbname)
nzbname = re.sub('[\(\)]', ' ', nzbname)
logger.fdebug('[NZBNAME] nzbname (remove chars): ' + nzbname)
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): ' + nzbname)
nzbname = re.sub('\s', '.', nzbname)
logger.fdebug('%s After conversions, nzbname is : %s' % (module, nzbname))
# if mylar.USE_NZBGET==1:
# nzbname=self.nzb_name
self._log("nzbname: %s" % nzbname)
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
self.oneoff = False
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.")
logger.fdebug('%s Failure - could not locate nzbfile initially' % module)
# if failed on spaces, change it all to decimals and try again.
nzbname = re.sub('[\(\)]', '', str(nzbname))
self._log("trying again with this nzbname: %s" % nzbname)
logger.fdebug('%s Trying to locate nzbfile again with nzbname of : %s' % (module, nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
logger.error('%s Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.' % module)
#set it up to run manual post-processing on self.nzb_folder
self._log('Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.')
self.valreturn.append({"self.log": self.log,
"mode": 'outside'})
return self.queue.put(self.valreturn)
if any([self.issueid is not None, self.issuearcid is not None]):
if self.issueid is not None:
s_id = self.issueid
else:
self._log("I corrected and found the nzb as : %s" % nzbname)
logger.fdebug('%s Auto-corrected and found the nzb as : %s' % (module, nzbname))
#issueid = nzbiss['IssueID']
s_id = self.issuearcid
nzbiss = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=?', [s_id]).fetchone()
else:
nzbname = self.nzb_name
#remove extensions from nzb_name if they somehow got through (Experimental most likely)
if nzbname.lower().endswith(self.extensions):
fd, ext = os.path.splitext(nzbname)
self._log("Removed extension from nzb: " + ext)
nzbname = re.sub(str(ext), '', str(nzbname))
#replace spaces
# let's change all space to decimals for simplicity
logger.fdebug('[NZBNAME]: ' + nzbname)
#gotta replace & or escape it
nzbname = re.sub("\&", 'and', nzbname)
nzbname = re.sub('[\,\:\?\'\+]', '', nzbname)
nzbname = re.sub('[\(\)]', ' ', nzbname)
logger.fdebug('[NZBNAME] nzbname (remove chars): ' + nzbname)
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): ' + nzbname)
nzbname = re.sub('\s', '.', nzbname)
logger.fdebug('%s After conversions, nzbname is : %s' % (module, nzbname))
self._log("nzbname: %s" % nzbname)
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.")
logger.fdebug('%s Failure - could not locate nzbfile initially' % module)
# if failed on spaces, change it all to decimals and try again.
nzbname = re.sub('[\(\)]', '', str(nzbname))
self._log("trying again with this nzbname: %s" % nzbname)
logger.fdebug('%s Trying to locate nzbfile again with nzbname of : %s' % (module, nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
logger.error('%s Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.' % module)
#set it up to run manual post-processing on self.nzb_folder
self._log('Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.')
self.valreturn.append({"self.log": self.log,
"mode": 'outside'})
return self.queue.put(self.valreturn)
else:
self._log("I corrected and found the nzb as : %s" % nzbname)
logger.fdebug('%s Auto-corrected and found the nzb as : %s' % (module, nzbname))
#issueid = nzbiss['IssueID']
issueid = nzbiss['IssueID']
logger.fdebug('%s Issueid: %s' % (module, issueid))
@ -1940,7 +1927,8 @@ class PostProcessor(object):
'ComicName': tinfo['comicname'],
'IssueNumber': tinfo['issuenumber'],
'Publisher': tinfo['publisher'],
'OneOff': tinfo['oneoff']}
'OneOff': tinfo['oneoff'],
'ForcedMatch': False}
else:
@ -2079,6 +2067,9 @@ class PostProcessor(object):
elif 'mu' in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = '.MU'
elif 'hu' in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = '.HU'
elif u'\xbd' in issuenum:
issuenum = '0.5'
elif u'\xbc' in issuenum:
@ -2624,48 +2615,50 @@ class PostProcessor(object):
if arcinfo is None:
logger.warn('Unable to locate IssueID within givin Story Arc. Ensure everything is up-to-date (refreshed) for the Arc.')
else:
if mylar.CONFIG.COPY2ARCDIR is True:
if arcinfo['Publisher'] is None:
arcpub = arcinfo['IssuePublisher']
else:
arcpub = arcinfo['Publisher']
if arcinfo['Publisher'] is None:
arcpub = arcinfo['IssuePublisher']
grdst = helpers.arcformat(arcinfo['StoryArc'], helpers.spantheyears(arcinfo['StoryArcID']), arcpub)
logger.info('grdst:' + grdst)
checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module)
if not checkdirectory:
logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module)
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
if mylar.CONFIG.READ2FILENAME:
logger.fdebug('%s readingorder#: %s' % (module, arcinfo['ReadingOrder']))
if int(arcinfo['ReadingOrder']) < 10: readord = "00" + str(arcinfo['ReadingOrder'])
elif int(arcinfo['ReadingOrder']) >= 10 and int(arcinfo['ReadingOrder']) <= 99: readord = "0" + str(arcinfo['ReadingOrder'])
else: readord = str(arcinfo['ReadingOrder'])
dfilename = str(readord) + "-" + os.path.split(dst)[1]
else:
dfilename = os.path.split(dst)[1]
grab_dst = os.path.join(grdst, dfilename)
logger.fdebug('%s Destination Path : %s' % (module, grab_dst))
grab_src = dst
logger.fdebug('%s Source Path : %s' % (module, grab_src))
logger.info('%s[%s] %s into directory: %s' % (module, mylar.CONFIG.ARC_FILEOPS.upper(), dst, grab_dst))
try:
#need to ensure that src is pointing to the series in order to do a soft/hard-link properly
checkspace = helpers.get_free_space(grdst)
if checkspace is False:
raise OSError
fileoperation = helpers.file_ops(grab_src, grab_dst, arc=True)
if not fileoperation:
raise OSError
except Exception as e:
logger.error('%s Failed to %s %s: %s' % (module, mylar.CONFIG.ARC_FILEOPS, grab_src, e))
return
else:
arcpub = arcinfo['Publisher']
grdst = helpers.arcformat(arcinfo['StoryArc'], helpers.spantheyears(arcinfo['StoryArcID']), arcpub)
logger.info('grdst:' + grdst)
checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module)
if not checkdirectory:
logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module)
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
if mylar.CONFIG.READ2FILENAME:
logger.fdebug('%s readingorder#: %s' % (module, arcinfo['ReadingOrder']))
if int(arcinfo['ReadingOrder']) < 10: readord = "00" + str(arcinfo['ReadingOrder'])
elif int(arcinfo['ReadingOrder']) >= 10 and int(arcinfo['ReadingOrder']) <= 99: readord = "0" + str(arcinfo['ReadingOrder'])
else: readord = str(arcinfo['ReadingOrder'])
dfilename = str(readord) + "-" + os.path.split(dst)[1]
else:
dfilename = os.path.split(dst)[1]
grab_dst = os.path.join(grdst, dfilename)
logger.fdebug('%s Destination Path : %s' % (module, grab_dst))
grab_src = dst
logger.fdebug('%s Source Path : %s' % (module, grab_src))
logger.info('%s[%s] %s into directory: %s' % (module, mylar.CONFIG.ARC_FILEOPS.upper(), dst, grab_dst))
try:
#need to ensure that src is pointing to the series in order to do a soft/hard-link properly
checkspace = helpers.get_free_space(grdst)
if checkspace is False:
raise OSError
fileoperation = helpers.file_ops(grab_src, grab_dst, arc=True)
if not fileoperation:
raise OSError
except Exception as e:
logger.error('%s Failed to %s %s: %s' % (module, mylar.CONFIG.ARC_FILEOPS, grab_src, e))
return
grab_dst = dst
#delete entry from nzblog table in case it was forced via the Story Arc Page
IssArcID = 'S' + str(ml['IssueArcID'])

View File

@ -32,7 +32,6 @@ import Queue
import platform
import locale
import re
from threading import Lock, Thread
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
@ -168,6 +167,7 @@ def initialize(config_file):
global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, DDL_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \
DDLPOOL, NZBPOOL, SNPOOL, PPPOOL, SEARCHPOOL, \
USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, DDL_LOCK, LOG_LEVEL, \
@ -375,40 +375,21 @@ def start():
ss = searchit.CurrentSearcher()
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=None, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
#thread queue control..
queue_schedule('search_queue', 'start')
if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
SNPOOL = threading.Thread(target=helpers.worker_main, args=(SNATCHED_QUEUE,), name="AUTO-SNATCHER")
SNPOOL.start()
logger.info('[AUTO-SNATCHER] Succesfully started Auto-Snatch add-on - will now monitor for completed torrents on client....')
queue_schedule('snatched_queue', 'start')
if CONFIG.POST_PROCESSING is True and ( all([CONFIG.NZB_DOWNLOADER == 0, CONFIG.SAB_CLIENT_POST_PROCESSING is True]) or all([CONFIG.NZB_DOWNLOADER == 1, CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]) ):
if CONFIG.NZB_DOWNLOADER == 0:
logger.info('[SAB-MONITOR] Completed post-processing handling enabled for SABnzbd. Attempting to background load....')
elif CONFIG.NZB_DOWNLOADER == 1:
logger.info('[NZBGET-MONITOR] Completed post-processing handling enabled for NZBGet. Attempting to background load....')
NZBPOOL = threading.Thread(target=helpers.nzb_monitor, args=(NZB_QUEUE,), name="AUTO-COMPLETE-NZB")
NZBPOOL.start()
if CONFIG.NZB_DOWNLOADER == 0:
logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for SABnzbd - will now monitor for completed nzbs within sabnzbd and post-process automatically....')
elif CONFIG.NZB_DOWNLOADER == 1:
logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for NZBGet - will now monitor for completed nzbs within nzbget and post-process automatically....')
queue_schedule('nzb_queue', 'start')
logger.info('[SEARCH-QUEUE] Attempting to background load the search queue....')
SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE")
SEARCHPOOL.start()
if CONFIG.POST_PROCESSING is True:
logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....')
PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE")
PPPOOL.start()
logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....')
queue_schedule('pp_queue', 'start')
if CONFIG.ENABLE_DDL is True:
logger.info('[DDL-QUEUE] DDL Download queue enabled & monitoring for requests....')
DDLPOOL = threading.Thread(target=helpers.ddl_downloader, args=(DDL_QUEUE,), name="DDL-QUEUE")
DDLPOOL.start()
logger.info('[DDL-QUEUE] Succesfully started DDL Download Queuer....')
queue_schedule('ddl_queue', 'start')
helpers.latestdate_fix()
if CONFIG.ALT_PULL == 2:
@ -495,6 +476,183 @@ def start():
started = True
def queue_schedule(queuetype, mode):
#global _INITIALIZED
if mode == 'start':
if queuetype == 'snatched_queue':
try:
if mylar.SNPOOL.isAlive() is True:
return
except Exception as e:
pass
logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
mylar.SNPOOL = threading.Thread(target=helpers.worker_main, args=(SNATCHED_QUEUE,), name="AUTO-SNATCHER")
mylar.SNPOOL.start()
logger.info('[AUTO-SNATCHER] Succesfully started Auto-Snatch add-on - will now monitor for completed torrents on client....')
elif queuetype == 'nzb_queue':
try:
if mylar.NZBPOOL.isAlive() is True:
return
except Exception as e:
pass
if CONFIG.NZB_DOWNLOADER == 0:
logger.info('[SAB-MONITOR] Completed post-processing handling enabled for SABnzbd. Attempting to background load....')
elif CONFIG.NZB_DOWNLOADER == 1:
logger.info('[NZBGET-MONITOR] Completed post-processing handling enabled for NZBGet. Attempting to background load....')
mylar.NZBPOOL = threading.Thread(target=helpers.nzb_monitor, args=(NZB_QUEUE,), name="AUTO-COMPLETE-NZB")
mylar.NZBPOOL.start()
if CONFIG.NZB_DOWNLOADER == 0:
logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for SABnzbd - will now monitor for completed nzbs within sabnzbd and post-process automatically...')
elif CONFIG.NZB_DOWNLOADER == 1:
logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for NZBGet - will now monitor for completed nzbs within nzbget and post-process automatically...')
elif queuetype == 'search_queue':
try:
if mylar.SEARCHPOOL.isAlive() is True:
return
except Exception as e:
pass
logger.info('[SEARCH-QUEUE] Attempting to background load the search queue....')
mylar.SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE")
mylar.SEARCHPOOL.start()
logger.info('[SEARCH-QUEUE] Successfully started the Search Queuer...')
elif queuetype == 'pp_queue':
try:
if mylar.PPPOOL.isAlive() is True:
return
except Exception as e:
pass
logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....')
mylar.PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE")
mylar.PPPOOL.start()
logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....')
elif queuetype == 'ddl_queue':
try:
if mylar.DDLPOOL.isAlive() is True:
return
except Exception as e:
pass
logger.info('[DDL-QUEUE] DDL Download queue enabled & monitoring for requests....')
mylar.DDLPOOL = threading.Thread(target=helpers.ddl_downloader, args=(DDL_QUEUE,), name="DDL-QUEUE")
mylar.DDLPOOL.start()
logger.info('[DDL-QUEUE:] Succesfully started DDL Download Queuer....')
else:
if (queuetype == 'nzb_queue') or mode == 'shutdown':
try:
if mylar.NZBPOOL.isAlive() is False:
return
elif all([mode!= 'shutdown', mylar.CONFIG.POST_PROCESSING is True]) and ( all([mylar.CONFIG.NZB_DOWNLOADER == 0, mylar.CONFIG.SAB_CLIENT_POST_PROCESSING is True]) or all([mylar.CONFIG.NZB_DOWNLOADER == 1, mylar.CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]) ):
return
except Exception as e:
return
logger.fdebug('Terminating the NZB auto-complete queue thread')
try:
mylar.NZB_QUEUE.put('exit')
mylar.NZBPOOL.join(5)
logger.fdebug('Joined pool for termination - successful')
except KeyboardInterrupt:
mylar.NZB_QUEUE.put('exit')
mylar.NZBPOOL.join(5)
except AssertionError:
if mode == 'shutdown':
os._exit(0)
if (queuetype == 'snatched_queue') or mode == 'shutdown':
try:
if mylar.SNPOOL.isAlive() is False:
return
elif all([mode != 'shutdown', mylar.CONFIG.ENABLE_TORRENTS is True, mylar.CONFIG.AUTO_SNATCH is True, OS_DETECT != 'Windows']) and any([mylar.CONFIG.TORRENT_DOWNLOADER == 2, mylar.CONFIG.TORRENT_DOWNLOADER == 4]):
return
except Exception as e:
return
logger.fdebug('Terminating the auto-snatch thread.')
try:
mylar.SNATCHED_QUEUE.put('exit')
mylar.SNPOOL.join(5)
logger.fdebug('Joined pool for termination - successful')
except KeyboardInterrupt:
mylar.SNATCHED_QUEUE.put('exit')
mylar.SNPOOL.join(5)
except AssertionError:
if mode == 'shutdown':
os._exit(0)
if (queuetype == 'search_queue') or mode == 'shutdown':
try:
if mylar.SEARCHPOOL.isAlive() is False:
return
except Exception as e:
return
logger.fdebug('Terminating the search queue thread.')
try:
mylar.SEARCH_QUEUE.put('exit')
mylar.SEARCHPOOL.join(5)
logger.fdebug('Joined pool for termination - successful')
except KeyboardInterrupt:
mylar.SEARCH_QUEUE.put('exit')
mylar.SEARCHPOOL.join(5)
except AssertionError:
if mode == 'shutdown':
os._exit(0)
if (queuetype == 'pp_queue') or mode == 'shutdown':
try:
if mylar.PPPOOL.isAlive() is False:
return
elif all([mylar.CONFIG.POST_PROCESSING is True, mode != 'shutdown']):
return
except Exception as e:
return
logger.fdebug('Terminating the post-processing queue thread.')
try:
mylar.PP_QUEUE.put('exit')
mylar.PPPOOL.join(5)
logger.fdebug('Joined pool for termination - successful')
except KeyboardInterrupt:
mylar.PP_QUEUE.put('exit')
mylar.PPPOOL.join(5)
except AssertionError:
if mode == 'shutdown':
os._exit(0)
if (queuetype == 'ddl_queue') or mode == 'shutdown':
try:
if mylar.DDLPOOL.isAlive() is False:
return
elif all([mylar.CONFIG.ENABLE_DDL is True, mode != 'shutdown']):
return
except Exception as e:
return
logger.fdebug('Terminating the DDL download queue thread')
try:
mylar.DDL_QUEUE.put('exit')
mylar.DDLPOOL.join(5)
logger.fdebug('Joined pool for termination - successful')
except KeyboardInterrupt:
mylar.DDL_QUEUE.put('exit')
DDLPOOL.join(5)
except AssertionError:
if mode == 'shutdown':
os._exit(0)
def dbcheck():
conn = sqlite3.connect(DB_FILE)
c_error = 'sqlite3.OperationalError'
@ -528,7 +686,7 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT, remote_filesize TEXT, updated_date TEXT, mainlink TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT, remote_filesize TEXT, updated_date TEXT, mainlink TEXT, issues TEXT)')
conn.commit
c.close
@ -1115,6 +1273,11 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE ddl_info ADD COLUMN mainlink TEXT')
try:
c.execute('SELECT issues from ddl_info')
except sqlite3.OperationalError:
c.execute('ALTER TABLE ddl_info ADD COLUMN issues TEXT')
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
#value in the sql so we can display it in the details screen for everyone to wonder at.
@ -1230,61 +1393,20 @@ def halt():
logger.info('Shutting down the background schedulers...')
SCHED.shutdown(wait=False)
if NZBPOOL is not None:
logger.info('Terminating the nzb auto-complete thread.')
try:
NZBPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
NZB_QUEUE.put('exit')
NZBPOOL.join(5)
except AssertionError:
os._exit(0)
queue_schedule('all', 'shutdown')
#if NZBPOOL is not None:
# queue_schedule('nzb_queue', 'shutdown')
#if SNPOOL is not None:
# queue_schedule('snatched_queue', 'shutdown')
if SNPOOL is not None:
logger.info('Terminating the auto-snatch thread.')
try:
SNPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
SNATCHED_QUEUE.put('exit')
SNPOOL.join(5)
except AssertionError:
os._exit(0)
#if SEARCHPOOL is not None:
# queue_schedule('search_queue', 'shutdown')
#if PPPOOL is not None:
# queue_schedule('pp_queue', 'shutdown')
if SEARCHPOOL is not None:
logger.info('Terminating the search queue thread.')
try:
SEARCHPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
SEARCH_QUEUE.put('exit')
SEARCHPOOL.join(5)
except AssertionError:
os._exit(0)
if PPPOOL is not None:
logger.info('Terminating the post-processing queue thread.')
try:
PPPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
PP_QUEUE.put('exit')
PPPOOL.join(5)
except AssertionError:
os._exit(0)
if DDLPOOL is not None:
logger.info('Terminating the DDL download queue thread.')
try:
DDLPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
DDL_QUEUE.put('exit')
DDLPOOL.join(5)
except AssertionError:
os._exit(0)
#if DDLPOOL is not None:
# queue_schedule('ddl_queue', 'shutdown')
_INITIALIZED = False

View File

@ -41,35 +41,41 @@ class info32p(object):
'Accept-Charset': 'utf-8',
'User-Agent': 'Mozilla/5.0'}
if test is True:
if test:
self.username_32p = test['username']
self.password_32p = test['password']
self.test = True
else:
self.username_32p = mylar.CONFIG.USERNAME_32P
self.password_32p = mylar.CONFIG.PASSWORD_32P
self.test = False
self.error = None
self.method = None
lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P)
if not lses.login():
if not self.test:
logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module)
return "disable"
else:
if self.error:
return self.error #rtnmsg
if any([mylar.CONFIG.MODE_32P is True, self.test is True]):
lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P)
if not lses.login():
if not self.test:
logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module)
return "disable"
else:
return self.method
if self.error:
return self.error #rtnmsg
else:
return self.method
else:
logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module)
self.authkey = lses.authkey
self.passkey = lses.passkey
self.session = lses.ses
self.uid = lses.uid
try:
mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops'])))
except:
mylar.INKDROPS_32P = lses.inkdrops['results'][0]['inkdrops']
else:
logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module)
self.authkey = lses.authkey
self.passkey = lses.passkey
self.session = lses.ses
self.uid = lses.uid
try:
mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops'])))
except:
mylar.INKDROPS_32P = lses.inkdrops['results'][0]['inkdrops']
self.session = requests.Session()
self.reauthenticate = reauthenticate
self.searchterm = searchterm
self.publisher_list = {'Entertainment', 'Press', 'Comics', 'Publishing', 'Comix', 'Studios!'}

View File

@ -6,6 +6,7 @@ import os
import glob
import codecs
import shutil
import threading
import re
import ConfigParser
import mylar
@ -146,6 +147,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'DUPECONSTRAINT': (str, 'Duplicates', None),
'DDUMP': (bool, 'Duplicates', False),
'DUPLICATE_DUMP': (str, 'Duplicates', None),
'DUPLICATE_DATED_FOLDERS': (bool, 'Duplicates', False),
'PROWL_ENABLED': (bool, 'Prowl', False),
'PROWL_PRIORITY': (int, 'Prowl', 0),
@ -287,6 +289,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'ENABLE_DDL': (bool, 'DDL', False),
'ALLOW_PACKS': (bool, 'DDL', False),
'DDL_LOCATION': (str, 'DDL', None),
'DDL_AUTORESUME': (bool, 'DDL', True),
'AUTO_SNATCH': (bool, 'AutoSnatch', False),
'AUTO_SNATCH_SCRIPT': (str, 'AutoSnatch', None),
@ -382,7 +385,7 @@ class Config(object):
count = sum(1 for line in open(self._config_file))
else:
count = 0
self.newconfig = 8
self.newconfig = 9
if count == 0:
CONFIG_VERSION = 0
MINIMALINI = False
@ -502,13 +505,12 @@ class Config(object):
shutil.move(self._config_file, os.path.join(mylar.DATA_DIR, 'config.ini.backup'))
except:
print('Unable to make proper backup of config file in %s' % os.path.join(mylar.DATA_DIR, 'config.ini.backup'))
if self.newconfig == 8:
if self.CONFIG_VERSION < 9:
print('Attempting to update configuration..')
#torznab multiple entries merged into extra_torznabs value
self.config_update()
setattr(self, 'CONFIG_VERSION', str(self.newconfig))
config.set('General', 'CONFIG_VERSION', str(self.newconfig))
print('Updating config to newest version : %s' % self.newconfig)
self.writeconfig()
else:
self.provider_sequence()
@ -533,12 +535,12 @@ class Config(object):
print('Logging level over-ridden by startup value. Changing from %s to %s' % (self.LOG_LEVEL, mylar.LOG_LEVEL))
logger.mylar_log.initLogger(loglevel=mylar.LOG_LEVEL, log_dir=self.LOG_DIR, max_logsize=self.MAX_LOGSIZE, max_logfiles=self.MAX_LOGFILES)
self.configure()
self.configure(startup=startup)
return self
def config_update(self):
if self.newconfig == 8:
print('Updating Configuration from %s to %s' % (self.CONFIG_VERSION, self.newconfig))
print('Updating Configuration from %s to %s' % (self.CONFIG_VERSION, self.newconfig))
if self.CONFIG_VERSION < 8:
print('Checking for existing torznab configuration...')
if not any([self.TORZNAB_NAME is None, self.TORZNAB_HOST is None, self.TORZNAB_APIKEY is None, self.TORZNAB_CATEGORY is None]):
torznabs =[(self.TORZNAB_NAME, self.TORZNAB_HOST, self.TORZNAB_APIKEY, self.TORZNAB_CATEGORY, str(int(self.ENABLE_TORZNAB)))]
@ -552,7 +554,17 @@ class Config(object):
config.remove_option('Torznab', 'torznab_apikey')
config.remove_option('Torznab', 'torznab_category')
config.remove_option('Torznab', 'torznab_verify')
print('Successfully removed old entries.')
print('Successfully removed outdated config entries.')
if self.newconfig == 9:
#rejig rtorrent settings due to change.
try:
if all([self.RTORRENT_SSL is True, not self.RTORRENT_HOST.startswith('http')]):
self.RTORRENT_HOST = 'https://' + self.RTORRENT_HOST
config.set('Rtorrent', 'rtorrent_host', self.RTORRENT_HOST)
except:
pass
config.remove_option('Rtorrent', 'rtorrent_ssl')
print('Successfully removed oudated config entries.')
print('Configuration upgraded to version %s' % self.newconfig)
def check_section(self, section, key):
@ -729,7 +741,12 @@ class Config(object):
except IOError as e:
logger.warn("Error writing configuration file: %s", e)
def configure(self, update=False):
def configure(self, update=False, startup=False):
#force alt_pull = 2 on restarts regardless of settings
if self.ALT_PULL != 2:
self.ALT_PULL = 2
config.set('Weekly', 'alt_pull', str(self.ALT_PULL))
try:
if not any([self.SAB_HOST is None, self.SAB_HOST == '', 'http://' in self.SAB_HOST[:7], 'https://' in self.SAB_HOST[:8]]):
@ -885,6 +902,18 @@ class Config(object):
else:
logger.fdebug('Successfully created ComicTagger Settings location.')
#make sure queues are running here...
if startup is False:
if self.POST_PROCESSING is True and ( all([self.NZB_DOWNLOADER == 0, self.SAB_CLIENT_POST_PROCESSING is True]) or all([self.NZB_DOWNLOADER == 1, self.NZBGET_CLIENT_POST_PROCESSING is True]) ):
mylar.queue_schedule('nzb_queue', 'start')
elif self.POST_PROCESSING is True and ( all([self.NZB_DOWNLOADER == 0, self.SAB_CLIENT_POST_PROCESSING is False]) or all([self.NZB_DOWNLOADER == 1, self.NZBGET_CLIENT_POST_PROCESSING is False]) ):
mylar.queue_schedule('nzb_queue', 'stop')
if self.ENABLE_DDL is True:
mylar.queue_schedule('ddl_queue', 'start')
elif self.ENABLE_DDL is False:
mylar.queue_schedule('ddl_queue', 'stop')
if not self.DDL_LOCATION:
self.DDL_LOCATION = self.CACHE_DIR
if self.ENABLE_DDL is True:

View File

@ -320,7 +320,7 @@ def GetComicInfo(comicid, dom, safechk=None):
comic['Type'] = 'None'
if comic_deck != 'None':
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]):
if 'print' in comic_deck.lower():
if all(['print' in comic_deck.lower(), 'reprint' not in comic_deck.lower()]):
comic['Type'] = 'Print'
elif 'digital' in comic_deck.lower():
comic['Type'] = 'Digital'
@ -330,9 +330,11 @@ def GetComicInfo(comicid, dom, safechk=None):
comic['Type'] = 'HC'
elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip():
comic['Type'] = 'One-Shot'
else:
comic['Type'] = 'Print'
if comic_desc != 'None' and comic['Type'] == 'None':
if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower():
if 'print' in comic_desc[:60].lower() and all(['print edition can be found' not in comic_desc.lower(), 'reprints' not in comic_desc.lower()]):
comic['Type'] = 'Print'
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
comic['Type'] = 'Digital'
@ -464,7 +466,10 @@ def GetComicInfo(comicid, dom, safechk=None):
#arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #)
#increased to 10 to allow for text numbering (+5 max)
#sometimes it's volume 5 and ocassionally it's fifth volume.
if i == 0:
if comicDes[v_find+7:comicDes.find(' ', v_find+7)].isdigit():
comic['ComicVersion'] = re.sub("[^0-9]", "", comicDes[v_find+7:comicDes.find(' ', v_find+7)]).strip()
break
elif i == 0:
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)

View File

@ -159,10 +159,10 @@ class FileChecker(object):
'sub': runresults['sub'],
'comicfilename': runresults['comicfilename'],
'comiclocation': runresults['comiclocation'],
'series_name': runresults['series_name'],
'series_name': helpers.conversion(runresults['series_name']),
'series_name_decoded': runresults['series_name_decoded'],
'issueid': runresults['issueid'],
'alt_series': runresults['alt_series'],
'alt_series': helpers.conversion(runresults['alt_series']),
'alt_issue': runresults['alt_issue'],
'dynamic_name': runresults['dynamic_name'],
'series_volume': runresults['series_volume'],
@ -178,7 +178,7 @@ class FileChecker(object):
'ComicFilename': runresults['comicfilename'],
'ComicLocation': runresults['comiclocation'],
'ComicSize': files['comicsize'],
'ComicName': runresults['series_name'],
'ComicName': helpers.conversion(runresults['series_name']),
'SeriesVolume': runresults['series_volume'],
'IssueYear': runresults['issue_year'],
'JusttheDigits': runresults['justthedigits'],
@ -194,9 +194,9 @@ class FileChecker(object):
'sub': runresults['sub'],
'comicfilename': runresults['comicfilename'],
'comiclocation': runresults['comiclocation'],
'series_name': runresults['series_name'],
'series_name': helpers.conversion(runresults['series_name']),
'series_volume': runresults['series_volume'],
'alt_series': runresults['alt_series'],
'alt_series': helpers.conversion(runresults['alt_series']),
'alt_issue': runresults['alt_issue'],
'issue_year': runresults['issue_year'],
'issue_number': runresults['issue_number'],
@ -227,8 +227,9 @@ class FileChecker(object):
#basepath the sub if it exists to get the parent folder.
logger.fdebug('[SUB-PATH] Checking Folder Name for more information.')
#sub = re.sub(origpath, '', path).strip()})
logger.fdebug('[SUB-PATH] Original Path : ' + str(path))
logger.fdebug('[SUB-PATH] Sub-directory : ' + str(subpath))
logger.fdebug('[SUB-PATH] Original Path : %s' % path)
logger.fdebug('[SUB-PATH] Sub-directory : %s' % subpath)
subpath = helpers.conversion(subpath)
if 'windows' in mylar.OS_DETECT.lower():
if path in subpath:
ab = len(path)
@ -407,7 +408,7 @@ class FileChecker(object):
lastmod_position = 0
booktype = 'issue'
#exceptions that are considered alpha-numeric issue numbers
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU', 'SUMMER', 'SPRING', 'FALL', 'WINTER')
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU', 'HU', 'SUMMER', 'SPRING', 'FALL', 'WINTER')
#unicode characters, followed by int value
# num_exceptions = [{iss:u'\xbd',val:.5},{iss:u'\xbc',val:.25}, {iss:u'\xe',val:.75}, {iss:u'\221e',val:'infinity'}]
@ -733,6 +734,7 @@ class FileChecker(object):
highest_series_pos = len(split_file)
issue2year = False
issue_year = None
possible_years = []
yearmodposition = None
@ -765,21 +767,28 @@ class FileChecker(object):
issue_year = ab
logger.fdebug('date verified as: ' + str(issue_year))
if highest_series_pos > dc['position']: highest_series_pos = dc['position']
if len(possible_years) == 1:
issueyear = possible_years[0]['year']
yearposition = possible_years[0]['yearposition']
yearmodposition = possible_years[0]['yearmodposition']
else:
yearposition = dc['position']
yearmodposition = dc['mod_position']
for x in possible_years:
logger.info('yearposition[%s] -- dc[position][%s]' % (yearposition, x['yearposition']))
if yearposition < x['yearposition']:
if all([len(possible_issuenumbers) == 1, possible_issuenumbers[0]['number'] == x['year'], x['yearposition'] != possible_issuenumbers[0]['position']]):
issue2year = True
highest_series_pos = x['yearposition']
yearposition = x['yearposition']
yearmodposition = x['yearmodposition']
if highest_series_pos > yearposition: highest_series_pos = yearposition #dc['position']: highest_series_pos = dc['position']
else:
issue_year = None
yearposition = None
yearmodposition = None
logger.fdebug('No year present within title - ignoring as a variable.')
logger.fdebug('highest_series_position: ' + str(highest_series_pos))
issue_number = None
@ -787,7 +796,7 @@ class FileChecker(object):
issue_number_position = len(split_file)
if len(possible_issuenumbers) > 0:
logger.fdebug('possible_issuenumbers: ' + str(possible_issuenumbers))
if len(possible_issuenumbers) > 1:
if len(possible_issuenumbers) >= 1:
p = 1
if '-' not in split_file[0]:
finddash = modfilename.find('-')
@ -830,7 +839,7 @@ class FileChecker(object):
issue_number = pis['number']
issue_number_position = pis['position']
logger.fdebug('issue number :' + issue_number) #(pis)
if highest_series_pos > pis['position']: highest_series_pos = pis['position']
if highest_series_pos > pis['position'] and issue2year is False: highest_series_pos = pis['position']
#else:
#logger.fdebug('numeric probably belongs to series title: ' + str(pis))
p+=1
@ -915,7 +924,7 @@ class FileChecker(object):
#make sure if we have multiple years detected, that the right one gets picked for the actual year vs. series title
if len(possible_years) > 1:
for x in possible_years:
for x in sorted(possible_years, key=operator.itemgetter('yearposition'), reverse=False):
if x['yearposition'] <= highest_series_pos:
logger.fdebug('year ' + str(x['year']) + ' is within series title. Ignoring as YEAR value')
else:
@ -946,12 +955,12 @@ class FileChecker(object):
else:
if tmpval > 2:
logger.fdebug('There are %s extra words between the issue # and the year position. Deciphering if issue title or part of series title.' % tmpval)
tmpval1 = ' '.join(split_file[issue_number_position+1:yearposition])
tmpval1 = ' '.join(split_file[issue_number_position:yearposition])
if split_file[issue_number_position+1] == '-':
usevalue = ' '.join(split_file[issue_number_position+2:yearposition])
splitv = split_file[issue_number_position+2:yearposition]
else:
splitv = split_file[issue_number_position+1:yearposition]
splitv = split_file[issue_number_position:yearposition]
splitvalue = ' '.join(splitv)
else:
#store alternate naming of title just in case

View File

@ -116,6 +116,9 @@ class GC(object):
title = re.sub(issues, '', title).strip()
if title.endswith('#'):
title = title[:-1].strip()
else:
if any(['Marvel Week+' in title, 'INDIE Week+' in title, 'Image Week' in title, 'DC Week+' in title]):
continue
option_find = f.find("p", {"style": "text-align: center;"})
i = 0
@ -137,7 +140,7 @@ class GC(object):
nwsize = size.find('//')
size = re.sub('\[', '', size[:nwsize]).strip()
else:
size = '0 M'
size = '0M'
i+=1
dateline = f.find('time')
datefull = dateline['datetime']
@ -156,20 +159,23 @@ class GC(object):
logger.fdebug('%s [%s]' % (title, size))
results['entries'] = resultlist
return results
def parse_downloadresults(self, id, mainlink):
myDB = db.DBConnection()
series = None
year = None
size = None
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
soup = BeautifulSoup(open(title+'.html'), 'html.parser')
orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0
option_find = orig_find
possible_more = None
while True: #i <= 10:
prev_option = option_find
option_find = option_find.findNext(text=True)
if i == 0:
if i == 0 and series is None:
series = option_find
elif 'Year' in option_find:
year = option_find.findNext(text=True)
@ -187,28 +193,52 @@ class GC(object):
for f in soup.findAll("div", {"class": "aio-pulse"}):
lk = f.find('a')
if lk['title'] == 'Download Now':
link = lk['href']
site = lk['title']
break #get the first link just to test
link = {"series": series,
"site": lk['title'],
"year": year,
"issues": None,
"size": size,
"link": lk['href']}
if link is None:
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
return
break #get the first link just to test
links = []
if possible_more.name == 'ul':
bb = possible_more.findAll('li')
for x in bb:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
linkline = x.find('a')
link = linkline['href']
site = linkline.findNext(text=True)
links.append({"volume": volume,
"site": site,
"link": link})
if link is None and possible_more.name == 'ul':
try:
bb = possible_more.findAll('li')
except:
pass
else:
for x in bb:
linkline = x.find('a')
if linkline:
if 'go.php' in linkline['href']:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
#volume label contains series, issue(s), year(s), and size
series_st = volume.find('(')
issues_st = volume.find('#')
series = volume[:series_st]
if any([issues_st == -1, series_st == -1]):
issues = None
else:
series = volume[:issues_st].strip()
issues = volume[issues_st+1:series_st].strip()
year_end = volume.find(')', series_st+1)
year = re.sub('[\(\)]', '', volume[series_st+1: year_end]).strip()
size_end = volume.find(')', year_end+1)
size = re.sub('[\(\)]', '', volume[year_end+1: size_end]).strip()
linked = linkline['href']
site = linkline.findNext(text=True)
if site == 'Main Server':
links.append({"series": series,
"site": site,
"year": year,
"issues": issues,
"size": size,
"link": linked})
else:
check_extras = soup.findAll("h3")
for sb in check_extras:
@ -222,41 +252,56 @@ class GC(object):
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
linkline = x.find('a')
link = linkline['href']
linked = linkline['href']
site = linkline.findNext(text=True)
links.append({"volume": volume,
"site": site,
"link": link})
"link": linked})
if link is None:
if all([link is None, len(links) == 0]):
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
return {'success': False}
if all([link is not None, len(links) == 0]):
logger.info('only one item discovered, changing queue length to accomodate: %s [%s]' % (link, type(link)))
links = [link]
elif len(links) > 0:
if len(links) > 1:
logger.info('[DDL-QUEUER] This pack has been broken up into %s separate packs - queueing each in sequence for your enjoyment.' % len(links))
cnt = 1
for x in links:
logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link']))
if len(links) == 1:
mod_id = id
else:
mod_id = id+'-'+str(cnt)
#logger.fdebug('[%s] %s (%s) %s [%s][%s]' % (x['site'], x['series'], x['year'], x['issues'], x['size'], x['link']))
ctrlval = {'id': id}
vals = {'series': series,
'year': year,
'size': size,
'issueid': self.issueid,
'comicid': self.comicid,
'link': link,
'status': 'Queued'}
myDB.upsert('ddl_info', vals, ctrlval)
ctrlval = {'id': mod_id}
vals = {'series': x['series'],
'year': x['year'],
'size': x['size'],
'issues': x['issues'],
'issueid': self.issueid,
'comicid': self.comicid,
'link': x['link'],
'mainlink': mainlink,
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M'),
'status': 'Queued'}
myDB.upsert('ddl_info', vals, ctrlval)
mylar.DDL_QUEUE.put({'link': link,
'mainlink': mainlink,
'series': series,
'year': year,
'size': size,
'comicid': self.comicid,
'issueid': self.issueid,
'id': id})
mylar.DDL_QUEUE.put({'link': x['link'],
'mainlink': mainlink,
'series': x['series'],
'year': x['year'],
'size': x['size'],
'comicid': self.comicid,
'issueid': self.issueid,
'id': mod_id,
'resume': None})
cnt+=1
return {'success': True}
def downloadit(self, id, link, mainlink):
def downloadit(self, id, link, mainlink, resume=None):
if mylar.DDL_LOCK is True:
logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.')
return
@ -267,25 +312,50 @@ class GC(object):
filename = None
try:
with cfscrape.create_scraper() as s:
if resume is not None:
logger.info('[DDL-RESUME] Attempting to resume from: %s bytes' % resume)
self.headers['Range'] = 'bytes=%d-' % resume
cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True)
filename = os.path.basename(urllib.unquote(t.url).decode('utf-8'))
if 'GetComics.INFO' in filename:
filename = re.sub('GetComics.INFO', '', filename, re.I).strip()
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
try:
remote_filesize = int(t.headers['Content-length'])
logger.fdebug('remote filesize: %s' % remote_filesize)
except Exception as e:
logger.warn('[WARNING] Unable to retrieve remote file size - this is usually due to the page being behind a different click-bait/ad page. Error returned as : %s' % e)
logger.warn('[WARNING] Considering this particular download as invalid and will ignore this result.')
remote_filesize = 0
mylar.DDL_LOCK = False
return ({"success": False,
"filename": filename,
"path": None})
#write the filename to the db for tracking purposes...
myDB.upsert('ddl_info', {'filename': filename}, {'id': id})
myDB.upsert('ddl_info', {'filename': filename, 'remote_filesize': remote_filesize}, {'id': id})
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
buf = StringIO(t.content)
f = gzip.GzipFile(fileobj=buf)
with open(path, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
if resume is not None:
with open(path, 'ab') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
else:
with open(path, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
except Exception as e:
logger.error('[ERROR] %s' % e)

View File

@ -990,6 +990,12 @@ def issuedigits(issnum):
int_issnum = (int(issnum[:-2]) * 1000) + ord('m') + ord('u')
else:
int_issnum = (int(issnum[:-3]) * 1000) + ord('m') + ord('u')
elif 'hu' in issnum.lower():
remdec = issnum.find('.') #find the decimal position.
if remdec == -1:
int_issnum = (int(issnum[:-2]) * 1000) + ord('h') + ord('u')
else:
int_issnum = (int(issnum[:-3]) * 1000) + ord('h') + ord('u')
except ValueError as e:
logger.error('[' + issnum + '] Unable to properly determine the issue number. Error: %s', e)
@ -3038,27 +3044,31 @@ def ddl_downloader(queue):
elif mylar.DDL_LOCK is False and queue.qsize() >= 1:
item = queue.get(True)
logger.info('Now loading request from DDL queue: %s' % item['series'])
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
logger.info('Now loading request from DDL queue: %s' % item['series'])
#write this to the table so we have a record of what's going on.
ctrlval = {'id': item['id']}
val = {'status': 'Downloading'}
val = {'status': 'Downloading',
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', val, ctrlval)
ddz = getcomics.GC()
ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'])
ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'], item['resume'])
nval = {'status': 'Completed'}
myDB.upsert('ddl_info', nval, ctrlval)
if ddzstat['success'] is True:
tdnow = datetime.datetime.now()
nval = {'status': 'Completed',
'updated_date': tdnow.strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', nval, ctrlval)
if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]):
try:
if ddzstat['filename'] is None:
logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path'])))
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
mylar.PP_QUEUE.put({'nzb_name': os.path.basename(ddzstat['path']),
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': None,
@ -3076,10 +3086,15 @@ def ddl_downloader(queue):
'ddl': True})
except Exception as e:
logger.info('process error: %s [%s]' %(e, ddzstat))
elif mylar.CONFIG.POST_PROCESSING is True:
elif all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is False]):
logger.info('File successfully downloaded. Post Processing is not enabled - item retained here: %s' % os.path.join(ddzstat['path'],ddzstat['filename']))
else:
logger.info('[Status: %s] Failed to download: %s ' % (ddzstat['success'], ddzstat))
nval = {'status': 'Failed',
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', nval, ctrlval)
else:
time.sleep(5)
def postprocess_main(queue):
while True:
@ -3115,11 +3130,11 @@ def search_queue(queue):
elif mylar.SEARCHLOCK is False and queue.qsize() >= 1: #len(queue) > 1:
item = queue.get(True)
logger.info('[SEARCH-QUEUE] Now loading item from search queue: %s' % item)
if item == 'exit':
logger.info('[SEARCH-QUEUE] Cleaning up workers for shutdown')
break
logger.info('[SEARCH-QUEUE] Now loading item from search queue: %s' % item)
if mylar.SEARCHLOCK is False:
ss_queue = mylar.search.searchforissue(item['issueid'])
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
@ -3133,59 +3148,66 @@ def search_queue(queue):
def worker_main(queue):
while True:
item = queue.get(True)
logger.info('Now loading from queue: ' + item)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
snstat = torrentinfo(torrent_hash=item, download=True)
if snstat['snatch_status'] == 'IN PROGRESS':
logger.info('Still downloading in client....let us try again momentarily.')
time.sleep(30)
mylar.SNATCHED_QUEUE.put(item)
elif any([snstat['snatch_status'] == 'MONITOR FAIL', snstat['snatch_status'] == 'MONITOR COMPLETE']):
logger.info('File copied for post-processing - submitting as a direct pp.')
threading.Thread(target=self.checkFolder, args=[os.path.abspath(os.path.join(snstat['copied_filepath'], os.pardir))]).start()
if queue.qsize() >= 1:
item = queue.get(True)
logger.info('Now loading from queue: ' + item)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
snstat = torrentinfo(torrent_hash=item, download=True)
if snstat['snatch_status'] == 'IN PROGRESS':
logger.info('Still downloading in client....let us try again momentarily.')
time.sleep(30)
mylar.SNATCHED_QUEUE.put(item)
elif any([snstat['snatch_status'] == 'MONITOR FAIL', snstat['snatch_status'] == 'MONITOR COMPLETE']):
logger.info('File copied for post-processing - submitting as a direct pp.')
threading.Thread(target=self.checkFolder, args=[os.path.abspath(os.path.join(snstat['copied_filepath'], os.pardir))]).start()
else:
time.sleep(15)
def nzb_monitor(queue):
while True:
item = queue.get(True)
logger.info('Now loading from queue: %s' % item)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
if all([mylar.USE_SABNZBD is True, mylar.CONFIG.SAB_CLIENT_POST_PROCESSING is True]):
nz = sabnzbd.SABnzbd(item)
nzstat = nz.processor()
elif all([mylar.USE_NZBGET is True, mylar.CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]):
nz = nzbget.NZBGet()
nzstat = nz.processor(item)
else:
logger.warn('There are no NZB Completed Download handlers enabled. Not sending item to completed download handling...')
break
if nzstat['status'] is False:
logger.info('Could not find NZBID %s in the downloader\'s queue. I will requeue this item for post-processing...' % item['NZBID'])
time.sleep(5)
mylar.NZB_QUEUE.put(item)
elif nzstat['status'] is True:
if nzstat['failed'] is False:
logger.info('File successfully downloaded - now initiating completed downloading handling.')
if queue.qsize() >= 1:
item = queue.get(True)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
logger.info('Now loading from queue: %s' % item)
if all([mylar.USE_SABNZBD is True, mylar.CONFIG.SAB_CLIENT_POST_PROCESSING is True]):
nz = sabnzbd.SABnzbd(item)
nzstat = nz.processor()
elif all([mylar.USE_NZBGET is True, mylar.CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]):
nz = nzbget.NZBGet()
nzstat = nz.processor(item)
else:
logger.info('File failed either due to being corrupt or incomplete - now initiating completed failed downloading handling.')
try:
mylar.PP_QUEUE.put({'nzb_name': nzstat['name'],
'nzb_folder': nzstat['location'],
'failed': nzstat['failed'],
'issueid': nzstat['issueid'],
'comicid': nzstat['comicid'],
'apicall': nzstat['apicall'],
'ddl': False})
#cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed'])
#nzpp = cc.post_process()
except Exception as e:
logger.info('process error: %s' % e)
logger.warn('There are no NZB Completed Download handlers enabled. Not sending item to completed download handling...')
break
if any([nzstat['status'] == 'file not found', nzstat['status'] == 'double-pp']):
logger.warn('Unable to complete post-processing call due to not finding file in the location provided. [%s]' % item)
elif nzstat['status'] is False:
logger.info('Could not find NZBID %s in the downloader\'s queue. I will requeue this item for post-processing...' % item['NZBID'])
time.sleep(5)
mylar.NZB_QUEUE.put(item)
elif nzstat['status'] is True:
if nzstat['failed'] is False:
logger.info('File successfully downloaded - now initiating completed downloading handling.')
else:
logger.info('File failed either due to being corrupt or incomplete - now initiating completed failed downloading handling.')
try:
mylar.PP_QUEUE.put({'nzb_name': nzstat['name'],
'nzb_folder': nzstat['location'],
'failed': nzstat['failed'],
'issueid': nzstat['issueid'],
'comicid': nzstat['comicid'],
'apicall': nzstat['apicall'],
'ddl': False})
#cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed'])
#nzpp = cc.post_process()
except Exception as e:
logger.info('process error: %s' % e)
else:
time.sleep(5)
def script_env(mode, vars):
#mode = on-snatch, pre-postprocess, post-postprocess

View File

@ -291,6 +291,15 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
else:
aliases = aliases
logger.fdebug('comicIssues: %s' % comicIssues)
logger.fdebug('seriesyear: %s / currentyear: %s' % (SeriesYear, helpers.today()[:4]))
logger.fdebug('comicType: %s' % comic['Type'])
if all([int(comicIssues) == 1, SeriesYear < helpers.today()[:4], comic['Type'] != 'One-Shot', comic['Type'] != 'TPB']):
logger.info('Determined to be a one-shot issue. Forcing Edition to One-Shot')
booktype = 'One-Shot'
else:
booktype = comic['Type']
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
@ -309,7 +318,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
"AlternateSearch": aliases,
# "ComicPublished": gcdinfo['resultPublished'],
"ComicPublished": "Unknown",
"Type": comic['Type'],
"Type": booktype,
"Corrected_Type": comic['Corrected_Type'],
"Collects": issue_list,
"DateAdded": helpers.today(),
@ -1118,6 +1127,8 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
int_issnum = (int(issnum[:-4]) * 1000) + ord('n') + ord('o') + ord('w')
elif 'mu' in issnum.lower():
int_issnum = (int(issnum[:-3]) * 1000) + ord('m') + ord('u')
elif 'hu' in issnum.lower():
int_issnum = (int(issnum[:-3]) * 1000) + ord('h') + ord('u')
elif u'\xbd' in issnum:
int_issnum = .5 * 1000
logger.fdebug('1/2 issue detected :' + issnum + ' === ' + str(int_issnum))

View File

@ -359,8 +359,8 @@ def findComic(name, mode, issue, limityear=None, type=None):
xmltype = None
if xmldeck != 'None':
if any(['print' in xmldeck.lower(), 'digital' in xmldeck.lower(), 'paperback' in xmldeck.lower(), 'hardcover' in xmldeck.lower()]):
if 'print' in xmldeck.lower():
if any(['print' in xmldeck.lower(), 'digital' in xmldeck.lower(), 'paperback' in xmldeck.lower(), 'one shot' in re.sub('-', '', xmldeck.lower()).strip(), 'hardcover' in xmldeck.lower()]):
if all(['print' in xmldeck.lower(), 'reprint' not in xmldeck.lower()]):
xmltype = 'Print'
elif 'digital' in xmldeck.lower():
xmltype = 'Digital'
@ -368,15 +368,38 @@ def findComic(name, mode, issue, limityear=None, type=None):
xmltype = 'TPB'
elif 'hardcover' in xmldeck.lower():
xmltype = 'HC'
elif 'oneshot' in re.sub('-', '', xmldeck.lower()).strip():
xmltype = 'One-Shot'
else:
xmltype = 'Print'
if xmldesc != 'None' and xmltype is None:
if 'print' in xmldesc[:60].lower() and 'print edition can be found' not in xmldesc.lower():
if 'print' in xmldesc[:60].lower() and all(['print edition can be found' not in xmldesc.lower(), 'reprints' not in xmldesc.lower()]):
xmltype = 'Print'
elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower():
xmltype = 'Digital'
elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc.lower():
elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc[:60].lower():
xmltype = 'TPB'
elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower():
xmltype = 'HC'
elif any(['one-shot' in xmldesc[:60].lower(), 'one shot' in xmldesc[:60].lower()]) and any(['can be found' not in xmldesc.lower(), 'following the' not in xmldesc.lower()]):
i = 0
xmltype = 'One-Shot'
avoidwords = ['preceding', 'after the special', 'following the']
while i < 2:
if i == 0:
cbd = 'one-shot'
elif i == 1:
cbd = 'one shot'
tmp1 = xmldesc[:60].lower().find(cbd)
if tmp1 != -1:
for x in avoidwords:
tmp2 = xmldesc[:tmp1].lower().find(x)
if tmp2 != -1:
xmltype = 'Print'
i = 3
break
i+=1
else:
xmltype = 'Print'

View File

@ -212,7 +212,7 @@ class NZBGet(object):
logger.fdebug('NZBGET Destination dir set to: %s' % destdir)
else:
logger.warn('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq[0]['DestDir'])
return {'status': False}
return {'status': 'file not found', 'failed': False}
if mylar.CONFIG.NZBGET_DIRECTORY is not None:
destdir2 = mylar.CONFIG.NZBGET_DIRECTORY

View File

@ -381,7 +381,7 @@ class OPDS(object):
image = None
thumbnail = None
if not 'ReleaseComicID' in issue:
title = escape('%s - %s' % (issue['Issue_Number'], issue['IssueName']))
title = escape('%s (%s) #%s - %s' % (issue['ComicName'], comic['ComicYear'], issue['Issue_Number'], issue['IssueName']))
image = issue['ImageURL_ALT']
thumbnail = issue['ImageURL']
else:
@ -398,7 +398,7 @@ class OPDS(object):
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': title,
'title': escape(title),
'id': escape('comic:%s (%s) [%s] - %s' % (issue['ComicName'], comic['ComicYear'], comic['ComicID'], issue['Issue_Number'])),
'updated': updated,
'content': escape('%s' % (metainfo[0]['summary'])),
@ -512,6 +512,7 @@ class OPDS(object):
elif 'filename' not in kwargs:
self.data = self._error_with_message('No filename provided')
else:
#logger.fdebug("file name: %s" % str(kwargs['file'])
self.filename = os.path.split(str(kwargs['file']))[1]
self.file = str(kwargs['file'])
return

View File

@ -24,6 +24,7 @@ from datetime import datetime, timedelta
import gzip
import time
import random
from bs4 import BeautifulSoup
from StringIO import StringIO
import mylar
@ -115,7 +116,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
lp=+1
continue
if mylar.CONFIG.MODE_32P == 0:
if mylar.CONFIG.MODE_32P is False:
logger.warn('[32P] Searching is not available in 32p Legacy mode. Switch to Auth mode to use the search functionality.')
lp=+1
continue
@ -384,6 +385,78 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
return torinfo
return
def ddl(forcerss=False):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
ddl_feed = 'https://getcomics.info/feed/'
try:
r = requests.get(ddl_feed, verify=True, headers=headers)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from DDL: %s' % (e))
return False
else:
if r.status_code != 200:
#typically 403 will not return results, but just catch anything other than a 200
if r.status_code == 403:
logger.warn('ERROR - status code:%s' % r.status_code)
return False
else:
logger.warn('[%s] Status code returned: %s' % (r.status_code))
return False
feedme = feedparser.parse(r.content)
results = []
for entry in feedme.entries:
soup = BeautifulSoup(entry.summary, 'html.parser')
orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0
option_find = orig_find
while True: #i <= 10:
prev_option = option_find
option_find = option_find.findNext(text=True)
if 'Year' in option_find:
year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip()
else:
if 'Size' in prev_option:
size = option_find #.findNext(text=True)
if '- MB' in size: size = '0 MB'
possible_more = orig_find.next_sibling
break
i+=1
link = entry.link
title = entry.title
updated = entry.updated
if updated.endswith('+0000'):
updated = updated[:-5].strip()
tmpid = entry.id
id = tmpid[tmpid.find('=')+1:]
if 'KB' in size:
szform = 'KB'
sz = 'K'
elif 'GB' in size:
szform = 'GB'
sz = 'G'
elif 'MB' in size:
szform = 'MB'
sz = 'M'
elif 'TB' in size:
szform = 'TB'
sz = 'T'
tsize = helpers.human2bytes(re.sub('[^0-9]', '', size).strip() + sz)
#link can be referenced with the ?p=id url
results.append({'Title': title,
'Size': tsize,
'Link': id,
'Site': 'DDL',
'Pubdate': updated})
if len(results) >0:
logger.info('[RSS][DDL] %s entries have been indexed and are now going to be stored for caching.' % len(results))
rssdbupdate(results, len(results), 'ddl')
return
def nzbs(provider=None, forcerss=False):
@ -569,6 +642,43 @@ def rssdbupdate(feeddata, i, type):
logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.CONFIG.RSS_CHECKINTERVAL) + ' minutes')
return
def ddl_dbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False):
myDB = db.DBConnection()
seriesname_alt = None
if any([comicid is None, comicid == 'None', oneoff is True]):
pass
else:
snm = myDB.selectone("SELECT * FROM comics WHERE comicid=?", [comicid]).fetchone()
if snm is None:
logger.fdebug('Invalid ComicID of %s. Aborting search' % comicid)
return "no results"
else:
seriesname = snm['ComicName']
seriesname_alt = snm['AlternateSearch']
dsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
dsearch_rem2 = re.sub("\\bthe\\b", "%", dsearch_rem1.lower())
dsearch_removed = re.sub('\s+', ' ', dsearch_rem2)
dsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s\,]', '%', dsearch_removed)
dsearch = '%' + dsearch_seriesname + '%'
dresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site='DDL'", [dsearch])
ddltheinfo = []
ddlinfo = {}
if not dresults:
return "no results"
else:
for dl in dresults:
ddltheinfo.append({
'title': dl['Title'],
'link': dl['Link'],
'pubdate': dl['Pubdate'],
'site': dl['Site'],
'length': dl['Size']
})
ddlinfo['entries'] = ddltheinfo
return ddlinfo
def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False):
myDB = db.DBConnection()
@ -886,7 +996,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
verify = False
logger.fdebug('[32P] Verify SSL set to : ' + str(verify))
if mylar.CONFIG.MODE_32P == 0:
if mylar.CONFIG.MODE_32P is False:
if mylar.KEYS_32P is None or mylar.CONFIG.PASSKEY_32P is None:
logger.warn('[32P] Unable to retrieve keys from provided RSS Feed. Make sure you have provided a CURRENT RSS Feed from 32P')
mylar.KEYS_32P = helpers.parse_32pfeed(mylar.FEED_32P)

View File

@ -57,7 +57,7 @@ class tehMain():
rsscheck.torrents(pickfeed='Public') #TPSE = DEM RSS Check + WWT RSS Check
if mylar.CONFIG.ENABLE_32P is True:
logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on 32P.')
if mylar.CONFIG.MODE_32P == 0:
if mylar.CONFIG.MODE_32P is False:
logger.fdebug('[RSS-FEEDS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
if any([mylar.CONFIG.PASSKEY_32P is None, mylar.CONFIG.PASSKEY_32P == '', mylar.CONFIG.RSSFEED_32P is None, mylar.CONFIG.RSSFEED_32P == '']):
logger.error('[RSS-FEEDS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.')
@ -91,6 +91,9 @@ class tehMain():
logger.info('[RSS-FEEDS] Initiating RSS Feed Check for NZB Providers.')
rsscheck.nzbs(forcerss=forcerss)
if mylar.CONFIG.ENABLE_DDL is True:
logger.info('[RSS-FEEDS] Initiating RSS Feed Check for DDL Provider.')
rsscheck.ddl(forcerss=forcerss)
logger.info('[RSS-FEEDS] RSS Feed Check/Update Complete')
logger.info('[RSS-FEEDS] Watchlist Check for new Releases')
mylar.search.searchforissue(rsscheck='yes')

View File

@ -136,7 +136,8 @@ class SABnzbd(object):
break
else:
logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage'])
break
return {'status': 'file not found', 'failed': False}
elif hq['nzo_id'] == sendresponse and hq['status'] == 'Failed':
#get the stage / error message and see what we can do
stage = hq['stage_log']
@ -160,6 +161,6 @@ class SABnzbd(object):
except Exception as e:
logger.warn('error %s' % e)
break
return {'status': False, 'failed': False}
return found

View File

@ -76,10 +76,10 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.info("Annual/Special issue search detected. Appending to issue #")
#anything for mode other than None indicates an annual.
if all(['annual' not in ComicName.lower(), 'special' not in ComicName.lower()]):
ComicName = ComicName + " Annual"
ComicName = '%s Annual' % ComicName
if all([AlternateSearch is not None, AlternateSearch != "None", 'special' not in ComicName.lower()]):
AlternateSearch = AlternateSearch + " Annual"
AlternateSearch = '%s Annual' % AlternateSearch
if mode == 'pullwant' or IssueID is None:
#one-off the download.
@ -112,7 +112,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
for torznab_host in mylar.CONFIG.EXTRA_TORZNABS:
if torznab_host[4] == '1' or torznab_host[4] == 1:
torznab_hosts.append(torznab_host)
torprovider.append('torznab:' + str(torznab_host[0]))
torprovider.append('torznab: %s' % torznab_host[0])
torznabs+=1
##nzb provider selection##
@ -140,7 +140,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
for newznab_host in mylar.CONFIG.EXTRA_NEWZNABS:
if newznab_host[5] == '1' or newznab_host[5] == 1:
newznab_hosts.append(newznab_host)
nzbprovider.append('newznab:' + str(newznab_host[0]))
nzbprovider.append('newznab: %s' % newznab_host[0])
newznabs+=1
ddls = 0
@ -150,7 +150,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
ddlprovider.append('DDL')
ddls+=1
logger.fdebug('nzbprovider(s): ' + str(nzbprovider))
logger.fdebug('nzbprovider(s): %s' % nzbprovider)
# --------
torproviders = torp + torznabs
logger.fdebug('There are %s torrent providers you have selected.' % torproviders)
@ -158,26 +158,26 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if torpr < 0:
torpr = -1
providercount = int(nzbp + newznabs)
logger.fdebug("there are : " + str(providercount) + " nzb providers you have selected.")
logger.fdebug('There are : %s nzb providers you have selected' % providercount)
if providercount > 0:
logger.fdebug("Usenet Retention : " + str(mylar.CONFIG.USENET_RETENTION) + " days")
logger.fdebug('Usenet Retention : %s days' % mylar.CONFIG.USENET_RETENTION)
if ddls > 0:
logger.fdebug("there are %s Direct Download providers that are currently enabled." % ddls)
logger.fdebug('there are %s Direct Download providers that are currently enabled.' % ddls)
findit = {}
findit['status'] = False
totalproviders = providercount + torproviders + ddls
if totalproviders == 0:
logger.error('[WARNING] You have ' + str(totalproviders) + ' search providers enabled. I need at least ONE provider to work. Aborting search.')
logger.error('[WARNING] You have %s search providers enabled. I need at least ONE provider to work. Aborting search.' % totalproviders)
findit['status'] = False
nzbprov = None
return findit, nzbprov
prov_order, torznab_info, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts, torznab_hosts, ddlprovider)
# end provider order sequencing
logger.fdebug('search provider order is ' + str(prov_order))
logger.fdebug('search provider order is %s' % prov_order)
#fix for issue dates between Nov-Dec/(Jan-Feb-Mar)
IssDt = str(IssueDate)[5:7]
@ -244,7 +244,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
c_number = str(c_number) + '.' + str(c_num_a4)
break
fcs+=1
logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number))
logger.fdebug('calpha/cnumber: %s / %s' % (dsp_c_alpha, c_number))
if c_number is None:
c_number = findcomiciss # if it's None, means no special alphas or decimals
@ -254,7 +254,6 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
c_number = c_number[:decst].rstrip()
while (srchloop <= searchcnt):
logger.fdebug('srchloop: %s' % srchloop)
#searchmodes:
# rss - will run through the built-cached db of entries
# api - will run through the providers via api (or non-api in the case of Experimental)
@ -281,7 +280,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.fdebug('Found result on first run, exiting search module now.')
break
logger.fdebug("Initiating Search via : " + str(searchmode))
logger.fdebug('Initiating Search via : %s' % searchmode)
while (cmloopit >= 1):
prov_count = 0
@ -334,9 +333,9 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
prov_count+=1
continue
if searchmode == 'rss':
if searchprov.lower() == 'ddl':
prov_count+=1
continue
#if searchprov.lower() == 'ddl':
# prov_count+=1
# continue
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype)
if findit['status'] is False:
if AlternateSearch is not None and AlternateSearch != "None":
@ -346,7 +345,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate))
logger.info('Alternate Search pattern detected...re-adjusting to : %s' % AS_Alternate)
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype)
if findit['status'] is True:
break
@ -368,7 +367,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate))
logger.info('Alternate Search pattern detected...re-adjusting to : %s' % AS_Alternate)
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate, booktype=booktype)
if findit['status'] is True:
break
@ -428,9 +427,9 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#if searchprov == '32P':
# pass
if manualsearch is None:
logger.info('Finished searching via :' + str(searchmode) + '. Issue not found - status kept as Wanted.')
logger.info('Finished searching via : %s. Issue not found - status kept as Wanted.' % searchmode)
else:
logger.fdebug('Could not find issue doing a manual search via : ' + str(searchmode))
logger.fdebug('Could not find issue doing a manual search via : %s' % searchmode)
if searchprov == '32P':
if mylar.CONFIG.MODE_32P == 0:
return findit, 'None'
@ -465,7 +464,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
category_torznab = torznab_host[3]
if any([category_torznab is None, category_torznab == 'None']):
category_torznab = '8020'
logger.fdebug("using Torznab host of : " + str(name_torznab))
logger.fdebug('Using Torznab host of : %s' % name_torznab)
elif nzbprov == 'newznab':
#updated to include Newznab Name now
name_newznab = newznab_host[0].rstrip()
@ -481,23 +480,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if '#' in newznab_host[4].rstrip():
catstart = newznab_host[4].find('#')
category_newznab = newznab_host[4][catstart +1:]
logger.fdebug('non-default Newznab category set to :' + str(category_newznab))
logger.fdebug('Non-default Newznab category set to : %s' % category_newznab)
else:
category_newznab = '7030'
logger.fdebug("using Newznab host of : " + str(name_newznab))
logger.fdebug('Using Newznab host of : %s' % name_newznab)
if RSS == "yes":
if 'newznab' in nzbprov:
tmpprov = name_newznab + '(' + nzbprov + ')' + ' [RSS]'
tmpprov = '%s (%s) [RSS]' % (name_newznab, nzbprov)
elif 'torznab' in nzbprov:
tmpprov = name_torznab + '(' + nzbprov + ')' + ' [RSS]'
tmpprov = '%s (%s) [RSS]' % (name_torznab, nzbprov)
else:
tmpprov = str(nzbprov) + " [RSS]"
tmpprov = '%s [RSS]' % nzbprov
else:
if 'newznab' in nzbprov:
tmpprov = name_newznab + ' (' + nzbprov + ')'
tmpprov = '%s (%s)' % (name_newznab, nzbprov)
elif 'torznab' in nzbprov:
tmpprov = name_torznab + ' (' + nzbprov + ')'
tmpprov = '%s (%s)' % (name_torznab, nzbprov)
else:
tmpprov = nzbprov
if cmloopit == 4:
@ -575,13 +574,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# results. '011' will return different than '11', as will '009' and '09'.
while (findloop < findcount):
logger.fdebug('findloop: ' + str(findloop) + ' / findcount: ' + str(findcount))
logger.fdebug('findloop: %s / findcount: %s' % (findloop, findcount))
comsrc = comsearch
if nzbprov == 'dognzb' and not mylar.CONFIG.DOGNZB:
foundc['status'] = False
done = True
break
if any([nzbprov == '32P', nzbprov == 'Public Torrents']):
if any([nzbprov == '32P', nzbprov == 'Public Torrents', nzbprov == 'ddl']):
#because 32p directly stores the exact issue, no need to worry about iterating over variations of the issue number.
findloop == 99
@ -619,19 +618,22 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.fdebug('RSS Check: %s' % RSS)
#logger.fdebug('nzbprov: %s' % nzbprov)
#logger.fdebug('comicid: %s' % ComicID)
if nzbprov == 'ddl':
if nzbprov == 'ddl' and RSS == "no":
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch))
b = getcomics.GC(query='%s %s' % (findcomic, isssearch))
bb = b.search()
#logger.info('bb returned from DDL: %s' % bb)
elif RSS == "yes":
if nzbprov == '32P' or nzbprov == 'Public Torrents':
if nzbprov == 'ddl':
logger.fdebug('Sending request to [%s] RSS for %s : %s' % (nzbprov, ComicName, mod_isssearch))
bb = rsscheck.ddl_dbsearch(ComicName, mod_isssearch, ComicID, nzbprov, oneoff)
elif nzbprov == '32P' or nzbprov == 'Public Torrents':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + ComicName + " : " + str(mod_isssearch))
logger.fdebug('Sending request to [%s] RSS for %s : %s' % (nzbprov, ComicName, mod_isssearch))
bb = rsscheck.torrentdbsearch(ComicName, mod_isssearch, ComicID, nzbprov, oneoff)
else:
logger.fdebug("Sending request to RSS for " + str(findcomic) + " : " + str(mod_isssearch) + " (" + str(ComicYear) + ")")
logger.fdebug('Sending request to RSS for %s : %s (%s)' % (findcomic, mod_isssearch, ComicYear))
if nzbprov == 'newznab':
nzbprov_fix = name_newznab
elif nzbprov == 'torznab':
@ -662,7 +664,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
bb = "no results"
elif nzbprov == 'Public Torrents':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [WWT-SEARCH] for " + str(cmname) + " : " + str(mod_isssearch))
logger.fdebug('Sending request to [WWT-SEARCH] for %s : %s' % (cmname, mod_isssearch))
ww = wwt.wwt(cmname, mod_isssearch)
bb = ww.wwt_connect()
#bb = rsscheck.torrents(pickfeed='TPSE-SEARCH', seriesname=cmname, issue=mod_isssearch)#cmname,issue=mod_isssearch)
@ -724,11 +726,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
hnc = host_newznab_fix
if any([hnc[:3] == '10.', hnc[:4] == '172.', hnc[:4] == '192.', hnc.startswith('localhost'), newznab_local is True]) and newznab_local != False:
logger.info('local domain bypass for ' + name_newznab + ' is active.')
logger.info('local domain bypass for %s is active.' % name_newznab)
localbypass = True
if localbypass == False:
logger.info("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering")
logger.info('Pausing for %s seconds before continuing to avoid hammering' % pause_the_search)
#time.sleep(pause_the_search)
# Add a user-agent
@ -746,7 +748,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
verify = False
#logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + findurl)
logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + str(logsearch))
logger.fdebug('[SSL: %s] Search URL: %s' % (verify, logsearch))
try:
r = requests.get(findurl, params=payload, verify=verify, headers=headers)
@ -768,7 +770,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
try:
if str(r.status_code) != '200':
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
logger.warn('Unable to retrieve search results from %s [Status Code returned: %s]' % (tmpprov, r.status_code))
if str(r.status_code) == '503':
logger.warn('Unavailable indexer detected. Disabling for a short duration and will try again.')
helpers.disable_provider(tmpprov)
@ -785,10 +787,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
try:
if bb == 'no results':
logger.fdebug('No results for search query from %s' % tmprov)
logger.fdebug('No results for search query from %s' % tmpprov)
break
elif bb['feed']['error']:
logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description']))
logger.error('[ERROR CODE: %s] %s' % (bb['feed']['error']['code'], bb['feed']['error']['description']))
if bb['feed']['error']['code'] == '910':
logger.warn('DAILY API limit reached. Disabling provider usage until 12:01am')
mylar.CONFIG.DOGNZB = 0
@ -851,7 +853,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
ComicTitle = entry['title']
for subs in splitTitle:
logger.fdebug('sub:' + subs)
logger.fdebug('sub: %s' % subs)
regExCount = 0
try:
if len(subs) >= len(ComicName.split()) and not any(d in subs.lower() for d in except_list) and bool(_digits.search(subs)) is True:
@ -1088,7 +1090,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if '(digital first)' in ComicTitle.lower(): #entry['title'].lower():
dig_moving = re.sub('\(digital first\)', '', ComicTitle.lower()).strip() #entry['title'].lower()).strip()
dig_moving = re.sub('[\s+]', ' ', dig_moving)
dig_mov_end = dig_moving + ' (Digital First)'
dig_mov_end = '%s (Digital First)' % dig_moving
thisentry = dig_mov_end
else:
thisentry = ComicTitle #entry['title']
@ -1098,13 +1100,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if 'mixed format' in cleantitle.lower():
cleantitle = re.sub('mixed format', '', cleantitle).strip()
logger.fdebug('removed extra information after issue # that is not necessary: ' + str(cleantitle))
logger.fdebug('removed extra information after issue # that is not necessary: %s' % cleantitle)
# if it's coming from 32P, remove the ' -' at the end as it screws it up.
if nzbprov == '32P':
if cleantitle.endswith(' - '):
cleantitle = cleantitle[:-3]
logger.fdebug("cleaned up title to : " + str(cleantitle))
logger.fdebug('Cleaned up title to : %s' % cleantitle)
#send it to the parser here.
p_comic = filechecker.FileChecker(file=ComicTitle)
@ -1146,7 +1148,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
fndcomicversion = None
if parsed_comic['series_volume'] is not None:
version_found = "yes"
versionfound = "yes"
if len(parsed_comic['series_volume'][1:]) == 4 and parsed_comic['series_volume'][1:].isdigit(): #v2013
logger.fdebug("[Vxxxx] Version detected as %s" % (parsed_comic['series_volume']))
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
@ -1187,37 +1189,37 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
result_comyear = parsed_comic['issue_year']
logger.fdebug('year looking for: %s' % comyear)
if str(comyear) in result_comyear:
logger.fdebug(str(comyear) + " - right years match baby!")
logger.fdebug('%s - right years match baby!' % comyear)
yearmatch = "true"
else:
logger.fdebug(str(comyear) + " - not right - years do not match")
logger.fdebug('%s - not right - years do not match' % comyear)
yearmatch = "false"
if UseFuzzy == "2":
#Fuzzy the year +1 and -1
ComUp = int(ComicYear) + 1
ComDwn = int(ComicYear) - 1
if str(ComUp) in result_comyear or str(ComDwn) in result_comyear:
logger.fdebug("Fuzzy Logic'd the Year and got a match with a year of " + str(result_comyear))
logger.fdebug('Fuzzy Logic\'d the Year and got a match with a year of %s' % result_comyear)
yearmatch = "true"
else:
logger.fdebug(str(comyear) + "Fuzzy logic'd the Year and year still didn't match.")
logger.fdebug('%s Fuzzy logic\'d the Year and year still did not match.' % comyear)
#let's do this here and save a few extra loops ;)
#fix for issue dates between Nov-Dec/Jan
if IssDateFix != "no" and UseFuzzy is not "2":
if IssDateFix == "01" or IssDateFix == "02" or IssDateFix == "03":
ComicYearFix = int(ComicYear) - 1
if str(ComicYearFix) in result_comyear:
logger.fdebug("further analysis reveals this was published inbetween Nov-Jan, decreasing year to " + str(ComicYearFix) + " has resulted in a match!")
logger.fdebug('Further analysis reveals this was published inbetween Nov-Jan, decreasing year to %s has resulted in a match!' % ComicYearFix)
yearmatch = "true"
else:
logger.fdebug(str(comyear) + " - not the right year.")
logger.fdebug('%s- not the right year.' % comyear)
else:
ComicYearFix = int(ComicYear) + 1
if str(ComicYearFix) in result_comyear:
logger.fdebug("further analysis reveals this was published inbetween Nov-Jan, incrementing year to " + str(ComicYearFix) + " has resulted in a match!")
logger.fdebug('Further analysis reveals this was published inbetween Nov-Jan, incrementing year to %s has resulted in a match!' % ComicYearFix)
yearmatch = "true"
else:
logger.fdebug(str(comyear) + " - not the right year.")
logger.fdebug('%s - not the right year.' % comyear)
elif UseFuzzy == "1": yearmatch = "true"
if yearmatch == "false": continue
@ -1288,7 +1290,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
nzbprov = 'DEM'
if all([nzbprov == '32P', allow_packs == True, RSS == 'no']):
logger.fdebug('pack:' + entry['pack'])
logger.fdebug('pack: %s' % entry['pack'])
if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', pack_test is True])): #allow_packs is True
if nzbprov == '32P':
if entry['pack'] == '2':
@ -1389,7 +1391,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
nowrite = False
if all([nzbprov == 'torznab', 'worldwidetorrents' in entry['link']]):
nzbid = generate_id(nzbprov, entry['id'])
elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]) or all([nzbprov == 'ddl', RSS == 'yes']):
if RSS == "yes":
entry['id'] = entry['link']
entry['link'] = 'https://getcomics.info/?p='+str(entry['id'])
entry['filename'] = entry['title']
if '/cat/' in entry['link']:
entry['link'] = 'https://getcomics.info/?p='+str(entry['id'])
nzbid = entry['id']
entry['title'] = entry['filename']
else:
@ -1404,7 +1412,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#modify the name for annualization to be displayed properly
if annualize == True:
modcomicname = ComicName + ' Annual'
modcomicname = '%s Annual' % ComicName
else:
modcomicname = ComicName
@ -1467,7 +1475,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#generate nzbname
nzbname = nzbname_create(nzbprov, info=mylar.COMICINFO, title=ComicTitle) #entry['title'])
if nzbname is None:
logger.error('[NZBPROVIDER = NONE] Encountered an error using given provider with requested information: ' + mylar.COMICINFO + '. You have a blank entry most likely in your newznabs, fix it & restart Mylar')
logger.error('[NZBPROVIDER = NONE] Encountered an error using given provider with requested information: %s. You have a blank entry most likely in your newznabs, fix it & restart Mylar' % mylar.COMICINFO)
continue
#generate the send-to and actually send the nzb / torrent.
#logger.info('entry: %s' % entry)
@ -1521,7 +1529,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
issinfo = mylar.COMICINFO['pack_issuelist']
if issinfo is not None:
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname))
logger.fdebug('Found matching comic within pack...preparing to send to Updater with IssueIDs: %s and nzbname of %s' % (issueid_info, nzbname))
#because packs need to have every issue that's not already Downloaded in a Snatched status, throw it to the updater here as well.
for isid in issinfo['issues']:
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
@ -1532,11 +1540,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else:
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
logger.fdebug('Found matching comic...preparing to send to Updater with IssueID: %s and nzbname: %s' % (IssueID, nzbname))
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
else:
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + '[' + alt_nzbname + ']')
logger.fdebug('Found matching comic...preparing to send to Updater with IssueID: %s and nzbname: %s [%s]' % (IssueID, nzbname, alt_nzbname))
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff)
#send out the notifications for the snatch.
@ -1582,10 +1590,10 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
if not issueid or rsscheck:
if rsscheck:
logger.info(u"Initiating RSS Search Scan at the scheduled interval of " + str(mylar.CONFIG.RSS_CHECKINTERVAL) + " minutes.")
logger.info('Initiating RSS Search Scan at the scheduled interval of %s minutes' % mylar.CONFIG.RSS_CHECKINTERVAL)
mylar.SEARCHLOCK = True
else:
logger.info(u"Initiating check to add Wanted items to Search Queue....")
logger.info('Initiating check to add Wanted items to Search Queue....')
myDB = db.DBConnection()
@ -1676,18 +1684,18 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
if all([comic is None, result['mode'] == 'story_arc']):
comic = myDB.selectone("SELECT * from storyarcs WHERE StoryArcID=? AND IssueArcID=?", [result['StoryArcID'],result['IssueArcID']]).fetchone()
if comic is None:
logger.fdebug(str(result['ComicID']) + ' has no associated comic information in the Arc. Skipping searching for this series.')
logger.fdebug('%s has no associated comic information in the Arc. Skipping searching for this series.' % result['ComicID'])
continue
else:
OneOff = True
elif comic is None:
logger.fdebug(str(result['ComicID']) + ' has no associated comic information in the Arc. Skipping searching for this series.')
logger.fdebug('%s has no associated comic information in the Arc. Skipping searching for this series.' % result['ComicID'])
continue
else:
storyarc_watchlist = True
if result['StoreDate'] == '0000-00-00' or result['StoreDate'] is None:
if any([result['IssueDate'] is None, result['IssueDate'] == '0000-00-00']) and result['DigitalDate'] == '0000-00-00':
logger.fdebug('ComicID: ' + str(result['ComicID']) + ' has invalid Date data. Skipping searching for this series.')
logger.fdebug('ComicID: %s has invalid Date data. Skipping searching for this series.' % result['ComicID'])
continue
foundNZB = "none"
@ -1757,6 +1765,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
mylar.SEARCHLOCK = False
else:
mylar.SEARCHLOCK = True
result = myDB.selectone('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
mode = 'want'
oneoff = False
@ -1874,7 +1883,7 @@ def searchIssueIDList(issuelist):
issue = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issueid]).fetchone()
mode = 'want_ann'
if issue is None:
logger.warn('unable to determine IssueID - perhaps you need to delete/refresh series? Skipping this entry: ' + issueid)
logger.warn('Unable to determine IssueID - perhaps you need to delete/refresh series? Skipping this entry: %s' % issueid)
continue
if any([issue['Status'] == 'Downloaded', issue['Status'] == 'Snatched']):
@ -1992,9 +2001,9 @@ def nzbname_create(provider, title=None, info=None):
str_IssueNumber = 'infinity'
else:
str_IssueNumber = IssueNumber
nzbname = str(re.sub(" ", ".", str(Bl_ComicName))) + "." + str(str_IssueNumber) + ".(" + str(comyear) + ")"
nzbname = '%s.%s.(%s)' % (re.sub(" ", ".", str(Bl_ComicName)), str_IssueNumber, comyear)
logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname))
logger.fdebug('nzb name to be used for post-processing is : %s' % nzbname)
elif any([provider == '32P', provider == 'WWT', provider == 'DEM', provider == 'ddl']):
#filesafe the name cause people are idiots when they post sometimes.
@ -2009,29 +2018,29 @@ def nzbname_create(provider, title=None, info=None):
else:
# let's change all space to decimals for simplicity
logger.fdebug('[SEARCHER] entry[title]: ' + title)
logger.fdebug('[SEARCHER] entry[title]: %s' % title)
#gotta replace & or escape it
nzbname = re.sub('\&amp;(amp;)?|\&', 'and', title)
nzbname = re.sub('[\,\:\?\'\+]', '', nzbname)
nzbname = re.sub('[\(\)]', ' ', nzbname)
logger.fdebug('[SEARCHER] nzbname (remove chars): ' + nzbname)
logger.fdebug('[SEARCHER] nzbname (remove chars): %s' % nzbname)
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[SEARCHER] nzbname (\s): ' + nzbname)
logger.fdebug('[SEARCHER] nzbname (\s): %s' % nzbname)
nzbname = re.sub(' ', '.', nzbname)
#remove the [1/9] parts or whatever kinda crap (usually in experimental results)
pattern = re.compile(r'\W\d{1,3}\/\d{1,3}\W')
match = pattern.search(nzbname)
if match:
nzbname = re.sub(match.group(), '', nzbname).strip()
logger.fdebug('[SEARCHER] end nzbname: ' + nzbname)
logger.fdebug('[SEARCHER] end nzbname: %s' % nzbname)
if nzbname is None:
return None
else:
logger.fdebug("nzbname used for post-processing:" + nzbname)
logger.fdebug('nzbname used for post-processing: %s' % nzbname)
return nzbname
def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None, newznab=None, torznab=None, rss=None):
@ -2098,16 +2107,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
else:
if any([oneoff is True, IssueID is None]):
#one-off information
logger.fdebug("ComicName: " + ComicName)
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Year: " + str(comyear))
logger.fdebug("IssueDate: " + comicinfo[0]['IssueDate'])
logger.fdebug('ComicName: %s' % ComicName)
logger.fdebug('Issue: %s' % IssueNumber)
logger.fdebug('Year: %s' % comyear)
logger.fdebug('IssueDate: %s' % comicinfo[0]['IssueDate'])
if IssueNumber is None:
logger.info('Found %s (%s) using %s' % (ComicName, comyear, tmpprov))
else:
logger.info('Found %s (%s) #%s using %s' % (ComicName, comyear, IssueNumber, tmpprov))
logger.fdebug("link given by: " + str(nzbprov))
logger.fdebug('link given by: %s' % nzbprov)
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
logger.info('nzbid: %s' % nzbid)
@ -2127,7 +2136,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
call_the_fail = Failed.FailedProcessor(nzb_name=nzbname, id=nzbid, issueid=IssueID, comicid=ComicID, prov=tmpprov)
check_the_fail = call_the_fail.failed_check()
if check_the_fail == 'Failed':
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] [' + str(tmpprov) + '] Marked as a bad download : ' + str(nzbid))
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] [%s] Marked as a bad download : %s' % (tmpprov, nzbid))
return "downloadchk-fail"
elif check_the_fail == 'Good':
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] This is not in the failed downloads list. Will continue with the download.')
@ -2210,11 +2219,11 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
tmp_url_en = len(tmp_url)
tmp_line += tmp_url[tmp_url_en:]
#tmp_url = helpers.apiremove(down_url.copy(), '&')
logger.fdebug('[PAYLOAD-NONE]Download URL: ' + str(tmp_line) + ' [VerifySSL:' + str(verify) + ']')
logger.fdebug('[PAYLOAD-NONE] Download URL: %s [VerifySSL: %s]' % (tmp_line, verify))
else:
tmppay = payload.copy()
tmppay['apikey'] = 'YOUDONTNEEDTOKNOWTHIS'
logger.fdebug('[PAYLOAD] Download URL: ' + down_url + '?' + urllib.urlencode(tmppay) + ' [VerifySSL:' + str(verify) + ']')
logger.fdebug('[PAYLOAD] Download URL: %s?%s [VerifySSL: %s]' % (down_url, urllib.urlencode(tmppay), verify))
if down_url.startswith('https') and verify == False:
try:
@ -2258,10 +2267,10 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if filen is None:
if payload is None:
logger.error('[PAYLOAD:NONE] Unable to download nzb from link: ' + str(down_url) + ' [' + link + ']')
logger.error('[PAYLOAD:NONE] Unable to download nzb from link: %s [%s]' % (down_url, link))
else:
errorlink = down_url + '?' + urllib.urlencode(payload)
logger.error('[PAYLOAD:PRESENT] Unable to download nzb from link: ' + str(errorlink) + ' [' + link + ']')
logger.error('[PAYLOAD:PRESENT] Unable to download nzb from link: %s [%s]' % (errorlink, link))
return "sab-fail"
else:
#convert to a generic type of format to help with post-processing.
@ -2269,33 +2278,33 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
filen = re.sub('[\,\:\?\']', '', filen)
filen = re.sub('[\(\)]', ' ', filen)
filen = re.sub('[\s\s+]', '', filen) #make sure we remove the extra spaces.
logger.fdebug('[FILENAME] filename (remove chars): ' + filen)
logger.fdebug('[FILENAME] filename (remove chars): %s' % filen)
filen = re.sub('.cbr', '', filen).strip()
filen = re.sub('.cbz', '', filen).strip()
logger.fdebug('[FILENAME] nzbname (\s): ' + filen)
logger.fdebug('[FILENAME] nzbname (\s): %s' % filen)
#filen = re.sub('\s', '.', filen)
logger.fdebug('[FILENAME] end nzbname: ' + filen)
logger.fdebug('[FILENAME] end nzbname: %s' % filen)
if re.sub('.nzb', '', filen.lower()).strip() != re.sub('.nzb', '', nzbname.lower()).strip():
alt_nzbname = re.sub('.nzb', '', filen).strip()
alt_nzbname = re.sub('[\s+]', ' ', alt_nzbname)
alt_nzbname = re.sub('[\s\_]', '.', alt_nzbname)
logger.info('filen: ' + filen + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname)
logger.info('filen: %s -- nzbname: %s are not identical. Storing extra value as : %s' % (filen, nzbname, alt_nzbname))
#make sure the cache directory exists - if not, create it (used for storing nzbs).
if os.path.exists(mylar.CONFIG.CACHE_DIR):
if mylar.CONFIG.ENFORCE_PERMS:
logger.fdebug("Cache Directory successfully found at : " + mylar.CONFIG.CACHE_DIR + ". Ensuring proper permissions.")
logger.fdebug('Cache Directory successfully found at : %s. Ensuring proper permissions.' % mylar.CONFIG.CACHE_DIR)
#enforce the permissions here to ensure the lower portion writes successfully
filechecker.setperms(mylar.CONFIG.CACHE_DIR, True)
else:
logger.fdebug("Cache Directory successfully found at : " + mylar.CONFIG.CACHE_DIR)
logger.fdebug('Cache Directory successfully found at : %s' % mylar.CONFIG.CACHE_DIR)
else:
#let's make the dir.
logger.fdebug("Could not locate Cache Directory, attempting to create at : " + mylar.CONFIG.CACHE_DIR)
logger.fdebug('Could not locate Cache Directory, attempting to create at : %s' % mylar.CONFIG.CACHE_DIR)
try:
filechecker.validateAndCreateDirectory(mylar.CONFIG.CACHE_DIR, True)
logger.info("Temporary NZB Download Directory successfully created at: " + mylar.CONFIG.CACHE_DIR)
logger.info('Temporary NZB Download Directory successfully created at: %s' % mylar.CONFIG.CACHE_DIR)
except OSError:
raise
@ -2317,17 +2326,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
ggc = getcomics.GC(issueid=IssueID, comicid=ComicID)
sendsite = ggc.loadsite(nzbid, link)
ddl_it = ggc.parse_downloadresults(nzbid, link)
logger.info("ddl status response: %s" % ddl_it)
if ddl_it['success'] is True:
logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize()))
else:
logger.info('Failed to retrieve %s from the DDL site.' %s (nzbname))
logger.info('Failed to retrieve %s from the DDL site.' % nzbname)
return "ddl-fail"
sent_to = "is downloading it directly via DDL"
elif mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'torznab']):
logger.fdebug("using blackhole directory at : " + str(mylar.CONFIG.BLACKHOLE_DIR))
logger.fdebug('Using blackhole directory at : %s' % mylar.CONFIG.BLACKHOLE_DIR)
if os.path.exists(mylar.CONFIG.BLACKHOLE_DIR):
#copy the nzb from nzbpath to blackhole dir.
try:
@ -2335,8 +2343,8 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
except (OSError, IOError):
logger.warn('Failed to move nzb into blackhole directory - check blackhole directory and/or permissions.')
return "blackhole-fail"
logger.fdebug("filename saved to your blackhole as : " + nzbname)
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + os.path.join(mylar.CONFIG.BLACKHOLE_DIR, nzbname))
logger.fdebug('Filename saved to your blackhole as : %s' % nzbname)
logger.info('Successfully sent .nzb to your Blackhole directory : %s' % os.path.join(mylar.CONFIG.BLACKHOLE_DIR, nzbname))
sent_to = "has sent it to your Blackhole Directory"
if mylar.CONFIG.ENABLE_SNATCH_SCRIPT:
@ -2375,16 +2383,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#torrents (32P & DEM)
elif any([nzbprov == '32P', nzbprov == 'WWT', nzbprov == 'DEM', nzbprov == 'torznab']):
logger.fdebug("ComicName:" + ComicName)
logger.fdebug("link:" + link)
logger.fdebug("Torrent Provider:" + nzbprov)
logger.fdebug('ComicName: %s' % ComicName)
logger.fdebug('link: %s' % link)
logger.fdebug('Torrent Provider: %s' % nzbprov)
rcheck = rsscheck.torsend2client(ComicName, IssueNumber, comyear, link, nzbprov, nzbid) #nzbid = hash for usage with public torrents
if rcheck == "fail":
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
logger.error('Unable to send torrent to client. Assuming incomplete link - sending to Failed Handler and continuing search.')
if any([oneoff is True, IssueID is None]):
logger.fdebug('One-off mode was initiated - Failed Download handling for : ' + ComicName + ' #' + str(IssueNumber))
logger.fdebug('One-off mode was initiated - Failed Download handling for : %s #%s' % (ComicName, IssueNumber))
comicinfo = {"ComicName": ComicName,
"IssueNumber": IssueNumber}
else:
@ -2532,7 +2540,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
mylar.LOCAL_IP = s.getsockname()[0]
s.close()
except:
logger.warn('Unable to determine local IP. Defaulting to host address for Mylar provided as : ' + str(mylar.CONFIG.HTTP_HOST))
logger.warn('Unable to determine local IP. Defaulting to host address for Mylar provided as : %s' % mylar.CONFIG.HTTP_HOST)
if mylar.CONFIG.HOST_RETURN:
#mylar has the return value already provided (easier and will work if it's right)
@ -2716,7 +2724,7 @@ def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack):
if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL()
prowl.notify(snatched_name, "Download started using " + sent_to)
prowl.notify(snatched_name, 'Download started using %s' % sent_to)
if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH:
logger.info(u"Sending NMA notification")
nma = notifiers.NMA()
@ -2765,16 +2773,16 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
issuetitle = re.sub('[\-\:\,\?\.]', ' ', str(issuetitle))
issuetitle_words = issuetitle.split(None)
#issue title comparison here:
logger.fdebug('there are ' + str(len(issuetitle_words)) + ' words in the issue title of : ' + str(issuetitle))
logger.fdebug('there are %s words in the issue title of : %s' % (len(issuetitle_words), issuetitle))
# we minus 1 the splitst since the issue # is included in there.
if (splitst - 1) > len(watchcomic_split):
logger.fdebug('splitit:' + str(splitit))
logger.fdebug('splitst:' + str(splitst))
logger.fdebug('len-watchcomic:' + str(len(watchcomic_split)))
possibleissue_num = splitit[len(watchcomic_split)] #[splitst]
logger.fdebug('possible issue number of : ' + str(possibleissue_num))
logger.fdebug('possible issue number of : %s' % possibleissue_num)
extra_words = splitst - len(watchcomic_split)
logger.fdebug('there are ' + str(extra_words) + ' left over after we remove the series title.')
logger.fdebug('there are %s left over after we remove the series title.' % extra_words)
wordcount = 1
#remove the series title here so we just have the 'hopefully' issue title
for word in splitit:
@ -2801,20 +2809,20 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
if len(chkspot) == (len(decit[0]) + len(decit[1]) + 1):
logger.fdebug('lengths match for possible decimal issue.')
if '.' in chkspot:
logger.fdebug('decimal located within : ' + str(chkspot))
logger.fdebug('decimal located within : %s' % chkspot)
possibleissue_num = chkspot
splitst = splitst -1 #remove the second numeric as it's a decimal and would add an extra char to
logger.fdebug('search_issue_title is : ' + str(search_issue_title))
logger.fdebug('possible issue number of : ' + str(possibleissue_num))
logger.fdebug('search_issue_title is : %s' % search_issue_title)
logger.fdebug('possible issue number of : %s' % possibleissue_num)
if hyphensplit is not None and 'of' not in search_issue_title:
logger.fdebug('hypen split detected.')
try:
issue_start = search_issue_title.find(issue_firstword)
logger.fdebug('located first word of : ' + str(issue_firstword) + ' at position : ' + str(issue_start))
logger.fdebug('located first word of : %s at position : %s' % (issue_firstword, issue_start))
search_issue_title = search_issue_title[issue_start:]
logger.fdebug('corrected search_issue_title is now : ' + str(search_issue_title))
logger.fdebug('corrected search_issue_title is now : %s' % search_issue_title)
except TypeError:
logger.fdebug('invalid parsing detection. Ignoring this result.')
return vals.append({"splitit": splitit,
@ -2832,46 +2840,46 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
if wsplit.lower() == 'part' or wsplit.lower() == 'of':
if wsplit.lower() == 'of':
of_chk = True
logger.fdebug('not worrying about this word : ' + str(wsplit))
logger.fdebug('not worrying about this word : %s' % wsplit)
misword +=1
continue
if wsplit.isdigit() and of_chk == True:
logger.fdebug('of ' + str(wsplit) + ' detected. Ignoring for matching.')
logger.fdebug('of %s detected. Ignoring for matching.' % wsplit)
of_chk = False
continue
for sit in sit_split:
logger.fdebug('looking at : ' + str(sit.lower()) + ' -TO- ' + str(wsplit.lower()))
logger.fdebug('looking at : %s -TO- %s' % (sit.lower(), wsplit.lower()))
if sit.lower() == 'part':
logger.fdebug('not worrying about this word : ' + str(sit))
logger.fdebug('not worrying about this word : %s' % sit)
misword +=1
isstitle_removal.append(sit)
break
elif sit.lower() == wsplit.lower():
logger.fdebug('word match: ' + str(sit))
logger.fdebug('word match: %s' % sit)
isstitle_match +=1
isstitle_removal.append(sit)
break
else:
try:
if int(sit) == int(wsplit):
logger.fdebug('found matching numeric: ' + str(wsplit))
logger.fdebug('found matching numeric: %s' % wsplit)
isstitle_match +=1
isstitle_removal.append(sit)
break
except:
pass
logger.fdebug('isstitle_match count : ' + str(isstitle_match))
logger.fdebug('isstitle_match count : %s' % isstitle_match)
if isstitle_match > 0:
iss_calc = ((isstitle_match + misword) / watch_split_count) * 100
logger.fdebug('iss_calc: ' + str(iss_calc) + ' % with ' + str(misword) + ' unaccounted for words')
logger.fdebug('iss_calc: %s %s with %s unaccounted for words' % (iss_calc, '%', misword))
else:
iss_calc = 0
logger.fdebug('0 words matched on issue title.')
if iss_calc >= 80: #mylar.ISSUE_TITLEMATCH - user-defined percentage to match against for issue name comparisons.
logger.fdebug('>80% match on issue name. If this were implemented, this would be considered a match.')
logger.fdebug('we should remove ' + str(len(isstitle_removal)) + ' words : ' + str(isstitle_removal))
logger.fdebug('we should remove %s words : %s' % (len(isstitle_removal), isstitle_removal))
logger.fdebug('Removing issue title from nzb filename to improve matching algorithims.')
splitst = splitst - len(isstitle_removal)
isstitle_chk = True

View File

@ -41,7 +41,6 @@ class RTorrent(object):
mylar.CONFIG.RTORRENT_PASSWORD,
mylar.CONFIG.RTORRENT_AUTHENTICATION,
mylar.CONFIG.RTORRENT_VERIFY,
mylar.CONFIG.RTORRENT_SSL,
mylar.CONFIG.RTORRENT_RPC_URL,
mylar.CONFIG.RTORRENT_CA_BUNDLE):
logger.error('[ERROR] Could not connect to %s - exiting' % mylar.CONFIG.RTORRENT_HOST)

View File

@ -23,23 +23,37 @@ class TorrentClient(object):
# Use default ssl verification
return True
def connect(self, host, username, password, auth, verify, ssl, rpc_url, ca_bundle):
def connect(self, host, username, password, auth, verify, rpc_url, ca_bundle, test=False):
if self.conn is not None:
return self.conn
if not host:
return False
return {'status': False, 'error': 'No host specified'}
url = helpers.cleanHost(host, protocol = True, ssl = ssl)
url = host
if host.startswith('https:'):
ssl = True
else:
if not host.startswith('http://'):
url = 'http://' + url
ssl = False
#add on the slash ..
if not url.endswith('/'):
url += '/'
#url = helpers.cleanHost(host, protocol = True, ssl = ssl)
# Automatically add '+https' to 'httprpc' protocol if SSL is enabled
if ssl is True and url.startswith('httprpc://'):
url = url.replace('httprpc://', 'httprpc+https://')
#if ssl is True and url.startswith('httprpc://'):
# url = url.replace('httprpc://', 'httprpc+https://')
#if ssl is False and not url.startswith('http://'):
# url = 'http://' + url
parsed = urlparse(url)
#parsed = urlparse(url)
# rpc_url is only used on http/https scgi pass-through
if parsed.scheme in ['http', 'https']:
if rpc_url is not None:
url += rpc_url
#logger.fdebug(url)
@ -52,8 +66,8 @@ class TorrentClient(object):
verify_ssl=self.getVerifySsl(verify, ca_bundle)
)
except Exception as err:
logger.error('Failed to connect to rTorrent: %s', err)
return False
logger.error('Make sure you have the right protocol specified for the rtorrent host. Failed to connect to rTorrent - error: %s.' % err)
return {'status': False, 'error': err}
else:
logger.fdebug('NO username %s / NO password %s' % (username, password))
try:
@ -63,10 +77,13 @@ class TorrentClient(object):
verify_ssl=self.getVerifySsl(verify, ca_bundle)
)
except Exception as err:
logger.error('Failed to connect to rTorrent: %s', err)
return False
logger.error('Failed to connect to rTorrent: %s' % err)
return {'status': False, 'error': err}
return self.conn
if test is True:
return {'status': True, 'version': self.conn.get_client_version()}
else:
return self.conn
def find_torrent(self, hash):
return self.conn.find_torrent(hash)

View File

@ -516,7 +516,7 @@ class WebInterface(object):
except Exception, e:
logger.warn('Unable to download image from CV URL link - possibly no arc picture is present: %s' % imageurl)
else:
logger.fdebug('comic image retrieval status code: ' + str(r.status_code))
logger.fdebug('comic image retrieval status code: %s' % r.status_code)
if str(r.status_code) != '200':
logger.warn('Unable to download image from CV URL link: %s [Status Code returned: %s]' % (imageurl, r.status_code))
@ -532,8 +532,8 @@ class WebInterface(object):
f.flush()
arc_results = mylar.cv.getComic(comicid=None, type='issue', arcid=arcid, arclist=arclist)
logger.fdebug(module + ' Arcresults: ' + str(arc_results))
logger.fdebug('arclist: ' + str(arclist))
logger.fdebug('%s Arcresults: %s' % (module, arc_results))
logger.fdebug('%s Arclist: %s' % (module, arclist))
if len(arc_results) > 0:
import random
@ -610,7 +610,7 @@ class WebInterface(object):
readingorder = int(re.sub('[\,\|]','', rosre).strip())
else:
readingorder = 0
logger.fdebug('[' + str(readingorder) + '] issueid:' + str(issid) + ' - findorder#:' + str(findorder))
logger.fdebug('[%s] issueid: %s - findorder#: %s' % (readingorder, issid, findorder))
issuedata.append({"ComicID": comicid,
"IssueID": issid,
@ -628,7 +628,7 @@ class WebInterface(object):
"Manual": manual_mod})
n+=1
comicid_results = mylar.cv.getComic(comicid=None, type='comicyears', comicidlist=cidlist)
logger.fdebug(module + ' Initiating issue updating - just the info')
logger.fdebug('%s Initiating issue updating - just the info' % module)
for AD in issuedata:
seriesYear = 'None'
@ -1335,9 +1335,9 @@ class WebInterface(object):
threading.Thread(target=self.queueissue, kwargs=kwargs).start()
queueit.exposed = True
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None, manualsearch=None, Publisher=None, pullinfo=None, pullweek=None, pullyear=None, manual=False, ComicVersion=None):
logger.fdebug('ComicID:' + str(ComicID))
logger.fdebug('mode:' + str(mode))
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None, manualsearch=None, Publisher=None, pullinfo=None, pullweek=None, pullyear=None, manual=False, ComicVersion=None, BookType=None):
logger.fdebug('ComicID: %s' % ComicID)
logger.fdebug('mode: %s' % mode)
now = datetime.datetime.now()
myDB = db.DBConnection()
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
@ -1359,8 +1359,8 @@ class WebInterface(object):
SARC = True
IssueArcID = None
else:
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
logger.info('Story Arc : %s queueing selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
#try to load the issue dates - can now sideload issue details.
dateload = myDB.selectone('SELECT * FROM storyarcs WHERE IssueArcID=?', [IssueArcID]).fetchone()
if dateload is None:
@ -1373,25 +1373,24 @@ class WebInterface(object):
ReleaseDate = dateload['ReleaseDate']
Publisher = dateload['IssuePublisher']
SeriesYear = dateload['SeriesYear']
BookType = dateload['Type']
if ComicYear is None: ComicYear = SeriesYear
if dateload['Volume'] is None:
logger.info('Marking ' + ComicName + ' #' + ComicIssue + ' as wanted...')
logger.info('Marking %s #%s as wanted...' % (ComicName, ComicIssue))
else:
logger.info('Marking ' + ComicName + ' (' + dateload['Volume'] + ') #' + ComicIssue + ' as wanted...')
logger.fdebug('publisher: ' + Publisher)
logger.info('Marking %s (%s) #%s as wanted...' % (ComicName, dateload['Volume'], ComicIssue))
logger.fdebug('publisher: %s' % Publisher)
controlValueDict = {"IssueArcID": IssueArcID}
newStatus = {"Status": "Wanted"}
myDB.upsert("storyarcs", newStatus, controlValueDict)
logger.info('[STORY-ARCS] Now Queuing %s (%s) #%s for search' % (ComicName, ComicYear, ComicIssue))
s = mylar.SEARCH_QUEUE.put({'issueid': IssueArcID, 'comicname': ComicName, 'seriesyear': ComicYear, 'comicid': ComicID, 'issuenumber': ComicIssue})
#foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=ReleaseDate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=dateload['Volume'], SARC=SARC, IssueArcID=IssueArcID)
#if foundcom['status'] is True:
# logger.info(u"Downloaded " + ComicName + " #" + ComicIssue + " (" + str(ComicYear) + ")")
# controlValueDict = {"IssueArcID": IssueArcID}
# newStatus = {"Status": "Snatched"}
#myDB.upsert("storyarcs", newStatus, controlValueDict)
return # foundcom
moduletype = '[STORY-ARCS]'
passinfo = {'issueid': IssueArcID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
elif mode == 'pullwant': #and ComicID is None
#this is for marking individual comics from the pullist to be downloaded.
@ -1400,19 +1399,17 @@ class WebInterface(object):
#better to set both to some generic #, and then filter out later...
IssueDate = pullinfo
try:
ComicYear = IssueDate[:4]
SeriesYear = IssueDate[:4]
except:
ComicYear == now.year
SeriesYear == now.year
if Publisher == 'COMICS': Publisher = None
logger.info('Now Queuing %s %s for search' % (ComicName, ComicIssue))
s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': ComicYear, 'comicid': ComicID, 'issuenumber': ComicIssue})
#foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=IssueDate, IssueID=IssueID, ComicID=ComicID, AlternateSearch=None, mode=mode, UseFuzzy=None, ComicVersion=ComicVersion, allow_packs=False, manual=manual)
if manual is True:
return foundcom
#if foundcom['status'] is True:
#logger.info('[ONE-OFF MODE] Successfully Downloaded ' + ComicName + ' ' + ComicIssue)
#return updater.foundsearch(ComicID, IssueID, mode=mode, provider=prov, hash=foundcom['info']['t_hash'], pullinfo={'weeknumber': pullweek, 'year': pullyear})
return
moduletype = '[PULL-LIST]'
passinfo = {'issueid': IssueID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
elif mode == 'want' or mode == 'want_ann' or manualsearch:
cdname = myDB.selectone("SELECT * from comics where ComicID=?", [ComicID]).fetchone()
@ -1430,9 +1427,9 @@ class WebInterface(object):
newStatus = {"Status": "Wanted"}
if mode == 'want':
if manualsearch:
logger.info('Initiating manual search for ' + ComicName + ' issue: ' + ComicIssue)
logger.info('Initiating manual search for %s issue: %s' % (ComicName, ComicIssue))
else:
logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...")
logger.info('Marking %s issue: %s as wanted...' % (ComicName, ComicIssue))
myDB.upsert("issues", newStatus, controlValueDict)
else:
annual_name = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? and IssueID=?", [ComicID, IssueID]).fetchone()
@ -1442,55 +1439,49 @@ class WebInterface(object):
ComicName = annual_name['ReleaseComicName']
if manualsearch:
logger.info('Initiating manual search for ' + ComicName + ' : ' + ComicIssue)
logger.info('Initiating manual search for %s : %s' % (ComicName, ComicIssue))
else:
logger.info(u"Marking " + ComicName + " : " + ComicIssue + " as wanted...")
logger.info('Marking %s : %s as wanted...' % (ComicName, ComicIssue))
myDB.upsert("annuals", newStatus, controlValueDict)
#---
#this should be on it's own somewhere
#if IssueID is not None:
# controlValueDict = {"IssueID": IssueID}
# newStatus = {"Status": "Wanted"}
# myDB.upsert("issues", newStatus, controlValueDict)
#for future reference, the year should default to current year (.datetime)
if mode == 'want':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
elif mode == 'want_ann':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if ComicYear == None:
ComicYear = str(issues['IssueDate'])[:4]
if issues['ReleaseDate'] is None or issues['ReleaseDate'] == '0000-00-00':
logger.info('No Store Date found for given issue. This is probably due to not Refreshing the Series beforehand.')
logger.info('I Will assume IssueDate as Store Date, but you should probably Refresh the Series and try again if required.')
storedate = issues['IssueDate']
else:
storedate = issues['ReleaseDate']
#miy = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
#SeriesYear = miy['ComicYear']
#AlternateSearch = miy['AlternateSearch']
#Publisher = miy['ComicPublisher']
#UseAFuzzy = miy['UseFuzzy']
#ComicVersion = miy['ComicVersion']
moduletype = '[WANTED-SEARCH]'
passinfo = {'issueid': IssueID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
if mode == 'want':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
elif mode == 'want_ann':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if ComicYear == None:
ComicYear = str(issues['IssueDate'])[:4]
if issues['ReleaseDate'] is None or issues['ReleaseDate'] == '0000-00-00':
logger.info('No Store Date found for given issue. This is probably due to not Refreshing the Series beforehand.')
logger.info('I Will assume IssueDate as Store Date, but you should probably Refresh the Series and try again if required.')
storedate = issues['IssueDate']
else:
storedate = issues['ReleaseDate']
if BookType == 'TPB':
logger.info('[%s] Now Queueing %s (%s) for search' % (BookType, ComicName, SeriesYear))
logger.info('%s[%s] Now Queueing %s (%s) for search' % (moduletype, BookType, ComicName, SeriesYear))
elif ComicIssue is None:
logger.info('Now Queueing %s (%s) for search' % (ComicName, SeriesYear))
logger.info('%s Now Queueing %s (%s) for search' % (moduletype, ComicName, SeriesYear))
else:
logger.info('Now Queueing %s (%s) #%s for search' % (ComicName, SeriesYear, ComicIssue))
s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': SeriesYear, 'comicid': ComicID, 'issuenumber': ComicIssue, 'booktype': BookType})
# foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
# if foundcom['status'] is True:
# # file check to see if issue exists and update 'have' count
# if IssueID is not None:
# logger.info("passing to updater.")
# return updater.foundsearch(ComicID, IssueID, mode=mode, provider=prov, hash=foundcom['info']['t_hash'])
logger.info('%s Now Queueing %s (%s) #%s for search' % (moduletype, ComicName, SeriesYear, ComicIssue))
#s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': SeriesYear, 'comicid': ComicID, 'issuenumber': ComicIssue, 'booktype': BookType})
s = mylar.SEARCH_QUEUE.put(passinfo)
if manualsearch:
# if it's a manual search, return to null here so the thread will die and not cause http redirect errors.
return
if ComicID:
return cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
else:
raise cherrypy.HTTPRedirect(redirect)
return
#raise cherrypy.HTTPRedirect(redirect)
queueissue.exposed = True
def unqueueissue(self, IssueID, ComicID, ComicName=None, Issue=None, FutureID=None, mode=None, ReleaseComicID=None):
@ -1704,7 +1695,7 @@ class WebInterface(object):
try:
x = float(weekly['ISSUE'])
except ValueError, e:
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower():
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower() or '.hu' in weekly['ISSUE'].lower():
x = weekly['ISSUE']
if x is not None:
@ -1847,7 +1838,7 @@ class WebInterface(object):
try:
x = float(future['ISSUE'])
except ValueError, e:
if 'au' in future['ISSUE'].lower() or 'ai' in future['ISSUE'].lower() or '.inh' in future['ISSUE'].lower() or '.now' in future['ISSUE'].lower() or '.mu' in future['ISSUE'].lower():
if 'au' in future['ISSUE'].lower() or 'ai' in future['ISSUE'].lower() or '.inh' in future['ISSUE'].lower() or '.now' in future['ISSUE'].lower() or '.mu' in future['ISSUE'].lower() or '.hu' in future['ISSUE'].lower():
x = future['ISSUE']
if future['EXTRA'] == 'N/A' or future['EXTRA'] == '':
@ -2198,6 +2189,25 @@ class WebInterface(object):
annualDelete.exposed = True
def ddl_requeue(self, id, mode):
myDB = db.DBConnection()
item = myDB.selectone("SELECT * FROM DDL_INFO WHERE ID=?", [id]).fetchone()
if item is not None:
if mode == 'resume':
if item['status'] != 'Completed':
filesize = os.stat(os.path.join(mylar.CONFIG.DDL_LOCATION, item['filename'])).st_size
mylar.DDL_QUEUE.put({'link': item['link'],
'mainlink': item['mainlink'],
'series': item['series'],
'year': item['year'],
'size': item['size'],
'comicid': item['comicid'],
'issueid': item['issueid'],
'id': item['id'],
'resume': filesize})
ddl_requeue.exposed = True
def queueManage(self): # **args):
myDB = db.DBConnection()
activelist = 'There are currently no items currently downloading via Direct Download (DDL).'
@ -2211,22 +2221,28 @@ class WebInterface(object):
'id': active['id']}
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'")
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id, c.updated_date FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'")
if s_info:
resultlist = []
for si in s_info:
issue = si['Issue_Number']
if issue is not None:
issue = '#%s' % issue
resultlist.append({'series': si['ComicName'],
'issue': issue,
'id': si['id'],
'volume': si['ComicVersion'],
'year': si['ComicYear'],
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status']})
issue = '#%s' % issue
if si['status'] == 'Completed':
si_status = '100%'
else:
si_status = ''
resultlist.append({'series': si['ComicName'],
'issue': issue,
'id': si['id'],
'volume': si['ComicVersion'],
'year': si['ComicYear'],
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status'],
'updated_date': si['updated_date'],
'progress': si_status})
logger.info('resultlist: %s' % resultlist)
return serve_template(templatename="queue_management.html", title="Queue Management", activelist=activelist, resultlist=resultlist)
@ -2474,10 +2490,12 @@ class WebInterface(object):
annuals = []
for iss in issues:
results.append(iss)
resultlist.append(str(iss['IssueID']))
if status == 'Snatched':
resultlist.append(str(iss['IssueID']))
for ann in annuals:
results.append(ann)
resultlist.append(str(iss['IssueID']))
if status == 'Snatched':
resultlist.append(str(ann['IssueID']))
endresults = []
if status == 'Snatched':
for genlist in helpers.chunker(resultlist, 200):
@ -2764,6 +2782,12 @@ class WebInterface(object):
return serve_template(templatename="readinglist.html", title="Reading Lists", issuelist=readlist, counts=counts)
readlist.exposed = True
def clear_arcstatus(self, issuearcid=None):
myDB = db.DBConnection()
myDB.upsert('storyarcs', {'Status': 'Skipped'}, {'IssueArcID': issuearcid})
logger.info('Status set to Skipped.')
clear_arcstatus.exposed = True
def storyarc_main(self, arcid=None):
myDB = db.DBConnection()
arclist = []
@ -2843,7 +2867,7 @@ class WebInterface(object):
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = str(lowyear) + ' - ' + str(maxyear)
spanyears = '%s - %s' % (lowyear, maxyear)
sdir = helpers.arcformat(arcinfo[0]['StoryArc'], spanyears, arcpub)
@ -3311,18 +3335,18 @@ class WebInterface(object):
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = str(lowyear) + ' - ' + str(maxyear)
spanyears = '%s - %s' % (lowyear, maxyear)
logger.info('arcpub: ' + arcpub)
logger.info('arcpub: %s' % arcpub)
dstloc = helpers.arcformat(arcdir, spanyears, arcpub)
filelist = None
if dstloc is not None:
if not os.path.isdir(dstloc):
if mylar.CONFIG.STORYARCDIR:
logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.')
logger.info('Story Arc Directory [%s] does not exist! - attempting to create now.' % dstloc)
else:
logger.info('Story Arc Grab-Bag Directory [' + dstloc + '] does not exist! - attempting to create now.')
logger.info('Story Arc Grab-Bag Directory [%s] does not exist! - attempting to create now.' % dstloc)
checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
@ -3346,7 +3370,7 @@ class WebInterface(object):
fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True)
filechk = fchk.listFiles()
fccnt = filechk['comiccount']
logger.fdebug('[STORY ARC DIRECTORY] ' + str(fccnt) + ' files exist within this directory.')
logger.fdebug('[STORY ARC DIRECTORY] %s files exist within this directory.' % fccnt)
if fccnt > 0:
filelist = filechk['comiclist']
logger.info(filechk)
@ -3357,11 +3381,14 @@ class WebInterface(object):
sarc_title = None
showonreadlist = 1 # 0 won't show storyarcissues on storyarcs main page, 1 will show
for arc in ArcWatch:
newStatus = 'Skipped'
if arc['Manual'] == 'deleted':
continue
sarc_title = arc['StoryArc']
logger.fdebug('[' + arc['StoryArc'] + '] ' + arc['ComicName'] + ' : ' + arc['IssueNumber'])
logger.fdebug('[%s] %s : %s' % (arc['StoryArc'], arc['ComicName'], arc['IssueNumber']))
matcheroso = "no"
#fc = filechecker.FileChecker(watchcomic=arc['ComicName'])
@ -3379,29 +3406,45 @@ class WebInterface(object):
# if it's a multi-volume series, it's decimalized - let's get rid of the decimal.
GCDissue, whocares = helpers.decimal_issue(arc['IssueNumber'])
GCDissue = int(GCDissue) / 1000
if '.' not in str(GCDissue): GCDissue = str(GCDissue) + ".00"
logger.fdebug("issue converted to " + str(GCDissue))
if '.' not in str(GCDissue):
GCDissue = '%s.00' % GCDissue
logger.fdebug("issue converted to %s" % GCDissue)
isschk = myDB.selectone("SELECT * FROM issues WHERE Issue_Number=? AND ComicID=?", [str(GCDissue), comic['ComicID']]).fetchone()
else:
issue_int = helpers.issuedigits(arc['IssueNumber'])
logger.fdebug('int_issue = ' + str(issue_int))
logger.fdebug('int_issue = %s' % issue_int)
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=?", [issue_int, comic['ComicID']]).fetchone() #AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone()
if isschk is None:
logger.fdebug("we matched on name, but issue " + arc['IssueNumber'] + " doesn't exist for " + comic['ComicName'])
logger.fdebug('We matched on name, but issue %s doesn\'t exist for %s' % (arc['IssueNumber'], comic['ComicName']))
else:
#this gets ugly - if the name matches and the issue, it could still be wrong series
#use series year to break it down further.
logger.fdebug('COMIC-comicyear: ' + str(int(comic['ComicYear'])))
logger.fdebug('ARC-seriesyear: ' + str(int(arc['SeriesYear'])))
if int(comic['ComicYear']) != int(arc['SeriesYear']):
logger.fdebug("Series years are different - discarding match. " + str(comic['ComicYear']) + " != " + str(arc['SeriesYear']))
logger.fdebug('COMIC-comicyear: %s' % comic['ComicYear'])
logger.fdebug('B4-ARC-seriesyear: %s' % arc['SeriesYear'])
if any([arc['SeriesYear'] is None, arc['SeriesYear'] == 'None']):
vy = '2099-00-00'
for x in isschk:
if any([x['IssueDate'] is None, x['IssueDate'] == '0000-00-00']):
sy = x['StoreDate']
if any([sy is None, sy == '0000-00-00']):
continue
else:
sy = x['IssueDate']
if sy < vy:
v_seriesyear = sy
seriesyear = v_seriesyear
logger.info('No Series year set. Discovered & set to %s' % seriesyear)
else:
logger.fdebug("issue #: %s is present!" % arc['IssueNumber'])
logger.fdebug('isschk: %s' % isschk)
logger.fdebug("Comicname: " + arc['ComicName'])
logger.fdebug("ComicID: " + str(isschk['ComicID']))
logger.fdebug("Issue: %s" % arc['IssueNumber'])
logger.fdebug("IssueArcID: " + str(arc['IssueArcID']))
seriesyear = arc['SeriesYear']
logger.fdebug('ARC-seriesyear: %s' % seriesyear)
if int(comic['ComicYear']) != int(seriesyear):
logger.fdebug('Series years are different - discarding match. %s != %s' % (comic['ComicYear'], seriesyear))
else:
logger.fdebug('issue #: %s is present!' % arc['IssueNumber'])
logger.fdebug('Comicname: %s' % arc['ComicName'])
logger.fdebug('ComicID: %s' % isschk['ComicID'])
logger.fdebug('Issue: %s' % arc['IssueNumber'])
logger.fdebug('IssueArcID: %s' % arc['IssueArcID'])
#gather the matches now.
arc_match.append({
"match_storyarc": arc['StoryArc'],
@ -3416,17 +3459,17 @@ class WebInterface(object):
matcheroso = "yes"
break
if matcheroso == "no":
logger.fdebug("[NO WATCHLIST MATCH] Unable to find a match for " + arc['ComicName'] + " :#" + arc['IssueNumber'])
logger.fdebug('[NO WATCHLIST MATCH] Unable to find a match for %s :#%s' % (arc['ComicName'], arc['IssueNumber']))
wantedlist.append({
"ComicName": arc['ComicName'],
"IssueNumber": arc['IssueNumber'],
"IssueYear": arc['IssueYear']})
if filelist is not None and mylar.CONFIG.STORYARCDIR:
logger.fdebug("[NO WATCHLIST MATCH] Checking against lcoal Arc directory for given issue.")
logger.fdebug('[NO WATCHLIST MATCH] Checking against local Arc directory for given issue.')
fn = 0
valids = [x for x in filelist if re.sub('[\|\s]','', x['dynamic_name'].lower()).strip() == re.sub('[\|\s]','', arc['DynamicComicName'].lower()).strip()]
logger.info('valids: ' + str(valids))
logger.fdebug('valids: %s' % valids)
if len(valids) > 0:
for tmpfc in valids: #filelist:
haveissue = "no"
@ -3435,19 +3478,19 @@ class WebInterface(object):
fcdigit = helpers.issuedigits(arc['IssueNumber'])
int_iss = helpers.issuedigits(temploc)
if int_iss == fcdigit:
logger.fdebug(arc['ComicName'] + ' Issue #' + arc['IssueNumber'] + ' already present in StoryArc directory.')
logger.fdebug('%s Issue #%s already present in StoryArc directory' % (arc['ComicName'], arc['IssueNumber']))
#update storyarcs db to reflect status.
rr_rename = False
if mylar.CONFIG.READ2FILENAME:
readorder = helpers.renamefile_readingorder(arc['ReadingOrder'])
if all([tmpfc['reading_order'] is not None, int(readorder) != int(tmpfc['reading_order']['reading_sequence'])]):
logger.warn('reading order sequence has changed for this issue from ' + str(tmpfc['reading_order']['reading_sequence']) + ' to ' + str(readorder))
logger.warn('reading order sequence has changed for this issue from %s to %s' % (tmpfc['reading_order']['reading_sequence'], readorder))
rr_rename = True
dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename']
dfilename = '%s-%s' % (readorder, tmpfc['reading_order']['filename'])
elif tmpfc['reading_order'] is None:
dfilename = str(readorder) + '-' + tmpfc['comicfilename']
dfilename = '%s-%s' % (readorder, tmpfc['comicfilename'])
else:
dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename']
dfilename = '%s-%s' % (readorder, tmpfc['reading_order']['filename'])
else:
dfilename = tmpfc['comicfilename']
@ -3457,21 +3500,30 @@ class WebInterface(object):
loc_path = os.path.join(tmpfc['comiclocation'], dfilename)
if rr_rename:
logger.fdebug('Now re-sequencing file to : ' + dfilename)
logger.fdebug('Now re-sequencing file to : %s' % dfilename)
os.rename(os.path.join(tmpfc['comiclocation'],tmpfc['comicfilename']), loc_path)
newVal = {"Status": "Downloaded",
newStatus = 'Downloaded'
newVal = {"Status": newStatus,
"Location": loc_path} #dfilename}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
break
else:
newStatus = 'Skipped'
fn+=1
if newStatus == 'Skipped':
#this will set all None Status' to Skipped (at least initially)
newVal = {"Status": "Skipped"}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
continue
newVal = {"Status": "Skipped"}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.fdebug(str(len(arc_match)) + " issues currently exist on your watchlist that are within this arc. Analyzing...")
logger.fdebug('%s issues currently exist on your watchlist that are within this arc. Analyzing...' % len(arc_match))
for m_arc in arc_match:
#now we cycle through the issues looking for a match.
#issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
@ -3479,11 +3531,9 @@ class WebInterface(object):
if issue is None: pass
else:
logger.fdebug("issue: " + issue['Issue_Number'] + "..." + m_arc['match_issue'])
# if helpers.decimal_issue(issuechk['Issue_Number']) == helpers.decimal_issue(m_arc['match_issue']):
logger.fdebug('issue: %s ... %s' % (issue['Issue_Number'], m_arc['match_issue']))
if issue['Issue_Number'] == m_arc['match_issue']:
logger.fdebug("we matched on " + issue['Issue_Number'] + " for " + m_arc['match_name'])
logger.fdebug('We matched on %s for %s' % (issue['Issue_Number'], m_arc['match_name']))
if issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived' or issue['Status'] == 'Snatched':
if showonreadlist:
showctrlVal = {"IssueID": issue['IssueID']}
@ -3494,7 +3544,7 @@ class WebInterface(object):
"ComicID": m_arc['match_id']}
myDB.upsert("readlist", shownewVal, showctrlVal)
logger.fdebug("Already have " + issue['ComicName'] + " :# " + issue['Issue_Number'])
logger.fdebug('Already have %s : #%s' % (issue['ComicName'], issue['Issue_Number']))
if issue['Location'] is not None:
issloc = os.path.join(m_arc['match_filedirectory'], issue['Location'])
else:
@ -3512,10 +3562,10 @@ class WebInterface(object):
continue
except:
pass
logger.fdebug('source location set to : ' + issloc)
logger.fdebug('source location set to : %s' % issloc)
if all([mylar.CONFIG.STORYARCDIR, mylar.CONFIG.COPY2ARCDIR]):
logger.fdebug('Destination location set to : ' + m_arc['destination_location'])
logger.fdebug('Destination location set to : %s' % m_arc['destination_location'])
logger.fdebug('Attempting to copy into StoryArc directory')
#copy into StoryArc directory...
@ -3525,13 +3575,13 @@ class WebInterface(object):
if mylar.CONFIG.READ2FILENAME:
readorder = helpers.renamefile_readingorder(m_arc['match_readingorder'])
if all([m_arc['match_readingorder'] is not None, int(readorder) != int(m_arc['match_readingorder'])]):
logger.warn('reading order sequence has changed for this issue from ' + str(m_arc['match_reading_order']) + ' to ' + str(readorder))
logger.warn('Reading order sequence has changed for this issue from %s to %s' % (m_arc['match_reading_order'], readorder))
rr_rename = True
dfilename = str(readorder) + '-' + issue['Location']
dfilename = '%s-%s' % (readorder, issue['Location'])
elif m_arc['match_readingorder'] is None:
dfilename = str(readorder) + '-' + issue['Location']
dfilename = '%s-%s' % (readorder, issue['Location'])
else:
dfilename = str(readorder) + '-' + issue['Location']
dfilename = '%s-%s' % (readorder, issue['Location'])
else:
dfilename = issue['Location']
@ -3542,21 +3592,21 @@ class WebInterface(object):
dstloc = os.path.join(m_arc['destination_location'], dfilename)
if rr_rename:
logger.fdebug('Now re-sequencing COPIED file to : ' + dfilename)
logger.fdebug('Now re-sequencing COPIED file to : %s' % dfilename)
os.rename(issloc, dstloc)
if not os.path.isfile(dstloc):
logger.fdebug('Copying ' + issloc + ' to ' + dstloc)
logger.fdebug('Copying %s to %s' % (issloc, dstloc))
try:
fileoperation = helpers.file_ops(issloc, dstloc, arc=True)
if not fileoperation:
raise OSError
except (OSError, IOError):
logger.error('Failed to ' + mylar.CONFIG.FILE_OPTS + ' ' + issloc + ' - check directories and manually re-run.')
logger.error('Failed to %s %s - check directories and manually re-run.' % (mylar.CONFIG.FILE_OPTS, issloc))
continue
else:
logger.fdebug('Destination file exists: ' + dstloc)
logger.fdebug('Destination file exists: %s' % dstloc)
location_path = dstloc
else:
location_path = issloc
@ -3569,12 +3619,12 @@ class WebInterface(object):
myDB.upsert("storyarcs",newVal,ctrlVal)
else:
logger.fdebug("We don't have " + issue['ComicName'] + " :# " + issue['Issue_Number'])
logger.fdebug('We don\'t have %s : #%s' % (issue['ComicName'], issue['Issue_Number']))
ctrlVal = {"IssueArcID": m_arc['match_issuearcid']}
newVal = {"Status": issue['Status'], #"Wanted",
"IssueID": issue['IssueID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as " + issue['Status'])
logger.info('Marked %s :#%s as %s' % (issue['ComicName'], issue['Issue_Number'], issue['Status']))
arcstats = self.storyarc_main(StoryArcID)
logger.info('[STORY-ARCS] Completed Missing/Recheck Files for %s [%s / %s]' % (arcname, arcstats['Have'], arcstats['TotalIssues']))
@ -3588,7 +3638,6 @@ class WebInterface(object):
def ReadGetWanted(self, StoryArcID):
# this will queue up (ie. make 'Wanted') issues in a given Story Arc that are 'Not Watched'
print StoryArcID
stupdate = []
mode = 'story_arc'
myDB = db.DBConnection()
@ -3596,25 +3645,25 @@ class WebInterface(object):
if wantedlist is not None:
for want in wantedlist:
print want
issuechk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [want['IssueArcID']]).fetchone()
issuechk = myDB.selectone("SELECT a.Type, a.ComicYear, b.ComicName, b.Issue_Number, b.ComicID, b.IssueID FROM comics as a INNER JOIN issues as b on a.ComicID = b.ComicID WHERE b.IssueID=?", [want['IssueArcID']]).fetchone()
SARC = want['StoryArc']
IssueArcID = want['IssueArcID']
Publisher = want['Publisher']
if issuechk is None:
# none means it's not a 'watched' series
s_comicid = want['ComicID'] #None
s_issueid = want['IssueID'] #None
s_issueid = want['IssueArcID'] #None
BookType = want['Type']
stdate = want['ReleaseDate']
issdate = want['IssueDate']
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug(want['ComicName'] + " -- #" + str(want['IssueNumber']))
logger.fdebug(u"Story Arc : " + str(SARC) + " queueing the selected issue...")
logger.fdebug(u"IssueArcID : " + str(IssueArcID))
logger.fdebug(u"ComicID: " + str(s_comicid) + " --- IssueID: " + str(s_issueid)) # no comicid in issues table.
logger.fdebug(u"ReleaseDate: " + str(stdate) + " --- IssueDate: " + str(issdate))
#logger.info(u'Publisher: ' + want['Publisher']) <-- no publisher in issues table.
logger.fdebug('%s -- #%s' % (want['ComicName'], want['IssueNumber']))
logger.fdebug('Story Arc %s : queueing the selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
logger.fdebug('ComicID: %s --- IssueID: %s' % (s_comicid, s_issueid)) # no comicid in issues table.
logger.fdebug('ReleaseDate: %s --- IssueDate: %s' % (stdate, issdate))
issueyear = want['IssueYEAR']
logger.fdebug('IssueYear: ' + str(issueyear))
logger.fdebug('IssueYear: %s' % issueyear)
if issueyear is None or issueyear == 'None':
try:
logger.fdebug('issdate:' + str(issdate))
@ -3624,31 +3673,44 @@ class WebInterface(object):
except:
issueyear = stdate[:4]
logger.fdebug('ComicYear: ' + str(want['SeriesYear']))
foundcom, prov = search.search_init(ComicName=want['ComicName'], IssueNumber=want['IssueNumber'], ComicYear=issueyear, SeriesYear=want['SeriesYear'], Publisher=Publisher, IssueDate=issdate, StoreDate=stdate, IssueID=s_issueid, SARC=SARC, IssueArcID=IssueArcID, oneoff=True)
logger.fdebug('ComicYear: %s' % want['SeriesYear'])
passinfo = {'issueid': s_issueid,
'comicname': want['ComicName'],
'seriesyear': want['SeriesYear'],
'comicid': s_comicid,
'issuenumber': want['IssueNumber'],
'booktype': BookType}
#oneoff = True ?
else:
# it's a watched series
s_comicid = issuechk['ComicID']
s_issueid = issuechk['IssueID']
logger.fdebug("-- watched series queue.")
logger.fdebug(issuechk['ComicName'] + " -- #" + str(issuechk['Issue_Number']))
foundcom, prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=Publisher, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
logger.fdebug('%s --- #%s' % (issuechk['ComicName'], issuechk['Issue_Number']))
passinfo = {'issueid': s_issueid,
'comicname': issuechk['ComicName'],
'seriesyear': issuechk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': issuechk['Issue_Number'],
'booktype': issuechk['Type']}
if foundcom['status'] is True:
logger.fdebug('sucessfully found.')
#update the status - this is necessary for torrents as they are in 'snatched' status.
updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
else:
logger.fdebug('not sucessfully found.')
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": s_issueid})
mylar.SEARCH_QUEUE.put(passinfo)
#if foundcom['status'] is True:
# logger.fdebug('sucessfully found.')
# #update the status - this is necessary for torrents as they are in 'snatched' status.
# updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
#else:
# logger.fdebug('not sucessfully found.')
# stupdate.append({"Status": "Wanted",
# "IssueArcID": IssueArcID,
# "IssueID": s_issueid})
watchlistchk = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND Status='Wanted'", [StoryArcID])
if watchlistchk is not None:
for watchchk in watchlistchk:
logger.fdebug('Watchlist hit - ' + str(watchchk['ComicName']))
issuechk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [watchchk['IssueArcID']]).fetchone()
logger.fdebug('Watchlist hit - %s' % watchchk['ComicName'])
issuechk = myDB.selectone("SELECT a.Type, a.ComicYear, b.ComicName, b.Issue_Number, b.ComicID, b.IssueID FROM comics as a INNER JOIN issues as b on a.ComicID = b.ComicID WHERE b.IssueID=?", [watchchk['IssueArcID']]).fetchone()
SARC = watchchk['StoryArc']
IssueArcID = watchchk['IssueArcID']
if issuechk is None:
@ -3659,17 +3721,17 @@ class WebInterface(object):
s_comicid = None
try:
s_issueid = watchchk['IssueID']
s_issueid = watchchk['IssueArcID']
except:
s_issueid = None
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug(watchchk['ComicName'] + " -- #" + str(watchchk['IssueNumber']))
logger.fdebug(u"Story Arc : " + str(SARC) + " queueing up the selected issue...")
logger.fdebug(u"IssueArcID : " + str(IssueArcID))
logger.fdebug('%s -- #%s' % (watchchk['ComicName'], watchchk['IssueNumber']))
logger.fdebug('Story Arc : %s queueing up the selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
try:
issueyear = watchchk['IssueYEAR']
logger.fdebug('issueYEAR : ' + issueyear)
logger.fdebug('issueYEAR : %s' % issueyear)
except:
try:
issueyear = watchchk['IssueDate'][:4]
@ -3678,39 +3740,55 @@ class WebInterface(object):
stdate = watchchk['ReleaseDate']
issdate = watchchk['IssueDate']
logger.fdebug('issueyear : ' + str(issueyear))
logger.fdebug('comicname : ' + watchchk['ComicName'])
logger.fdebug('issuenumber : ' + watchchk['IssueNumber'])
logger.fdebug('comicyear : ' + watchchk['SeriesYear'])
logger.fdebug('issueyear : %s' % issueyear)
logger.fdebug('comicname : %s' % watchchk['ComicName'])
logger.fdebug('issuenumber : %s' % watchchk['IssueNumber'])
logger.fdebug('comicyear : %s' % watchchk['SeriesYear'])
#logger.info('publisher : ' + watchchk['IssuePublisher']) <-- no publisher in table
logger.fdebug('SARC : ' + SARC)
logger.fdebug('IssueArcID : ' + IssueArcID)
foundcom, prov = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=issueyear, SeriesYear=watchchk['SeriesYear'], Publisher=None, IssueDate=issdate, StoreDate=stdate, IssueID=s_issueid, SARC=SARC, IssueArcID=IssueArcID, oneoff=True)
logger.fdebug('SARC : %s' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
passinfo = {'issueid': s_issueid,
'comicname': watchchk['ComicName'],
'seriesyear': watchchk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': watchchk['IssueNumber'],
'booktype': watchchk['Type']}
#foundcom, prov = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=issueyear, SeriesYear=watchchk['SeriesYear'], Publisher=None, IssueDate=issdate, StoreDate=stdate, IssueID=s_issueid, SARC=SARC, IssueArcID=IssueArcID, oneoff=True)
else:
# it's a watched series
s_comicid = issuechk['ComicID']
s_issueid = issuechk['IssueID']
logger.fdebug("-- watched series queue.")
logger.fdebug(issuechk['ComicName'] + " -- #" + str(issuechk['Issue_Number']))
foundcom, prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=None, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
if foundcom['status'] is True:
updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
else:
logger.fdebug('Watchlist issue not sucessfully found')
logger.fdebug('issuearcid: ' + str(IssueArcID))
logger.fdebug('issueid: ' + str(s_issueid))
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": s_issueid})
logger.fdebug('-- watched series queue.')
logger.fdebug('%s -- #%s' % (issuechk['ComicName'], issuechk['Issue_Number']))
passinfo = {'issueid': s_issueid,
'comicname': issuechk['ComicName'],
'seriesyear': issuechk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': issuechk['Issue_Number'],
'booktype': issuechk['Type']}
#foundcom, prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=None, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
mylar.SEARCH_QUEUE.put(passinfo)
#if foundcom['status'] is True:
# updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
#else:
# logger.fdebug('Watchlist issue not sucessfully found')
# logger.fdebug('issuearcid: %s' % IssueArcID)
# logger.fdebug('issueid: %s' % s_issueid)
# stupdate.append({"Status": "Wanted",
# "IssueArcID": IssueArcID,
# "IssueID": s_issueid})
if len(stupdate) > 0:
logger.fdebug(str(len(stupdate)) + ' issues need to get updated to Wanted Status')
logger.fdebug('%s issues need to get updated to Wanted Status' % len(stupdate))
for st in stupdate:
ctrlVal = {'IssueArcID': st['IssueArcID']}
newVal = {'Status': st['Status']}
if st['IssueID']:
if st['IssueID']:
logger.fdebug('issueid:' + str(st['IssueID']))
logger.fdebug('issueid: %s' %st['IssueID'])
newVal['IssueID'] = st['IssueID']
myDB.upsert("storyarcs", newVal, ctrlVal)
ReadGetWanted.exposed = True
@ -3990,7 +4068,7 @@ class WebInterface(object):
mylar.CONFIG.IMP_METADATA = bool(imp_metadata)
mylar.CONFIG.IMP_PATHS = bool(imp_paths)
mylar.CONFIG.configure(update=True)
mylar.CONFIG.configure(update=True, startup=False)
# Write the config
logger.info('Now updating config...')
mylar.CONFIG.writeconfig()
@ -5672,18 +5750,23 @@ class WebInterface(object):
testslack.exposed = True
def testrtorrent(self, host, username, password, auth, verify, ssl, rpc_url):
def testrtorrent(self, host, username, password, auth, verify, rpc_url):
import torrent.clients.rtorrent as TorClient
client = TorClient.TorrentClient()
ca_bundle = None
if mylar.CONFIG.RTORRENT_CA_BUNDLE is not None:
ca_bundle = mylar.CONFIG.RTORRENT_CA_BUNDLE
if not client.connect(host, username, password, auth, verify, ssl, rpc_url, ca_bundle):
rclient = client.connect(host, username, password, auth, verify, rpc_url, ca_bundle, test=True)
if not rclient:
logger.warn('Could not establish connection to %s' % host)
return 'Error establishing connection to Rtorrent'
return '[rTorrent] Error establishing connection to Rtorrent'
else:
logger.info('Successfully validated connection to %s' % host)
return "Successfully validated connection to %s" % host
if rclient['status'] is False:
logger.warn('[rTorrent] Could not establish connection to %s. Error returned: %s' % (host, rclient['error']))
return 'Error establishing connection to rTorrent'
else:
logger.info('[rTorrent] Successfully validated connection to %s [v%s]' % (host, rclient['version']))
return 'Successfully validated rTorrent connection'
testrtorrent.exposed = True
def testqbit(self, host, username, password):
@ -5796,9 +5879,9 @@ class WebInterface(object):
download_0day.exposed = True
def test_32p(self):
def test_32p(self, username, password):
import auth32p
tmp = auth32p.info32p(test=True)
tmp = auth32p.info32p(test={'username': username, 'password': password})
rtnvalues = tmp.authenticate()
if rtnvalues['status'] is True:
return json.dumps({"status": "Successfully Authenticated.", "inkdrops": mylar.INKDROPS_32P})
@ -5807,6 +5890,17 @@ class WebInterface(object):
test_32p.exposed = True
def check_ActiveDDL(self):
myDB = db.DBConnection()
active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone()
if active is None:
return "There are no active downloads currently being attended to"
else:
filesize = os.stat(os.path.join(mylar.CONFIG.DDL_LOCATION, active['filename'])).st_size
cmath = int(float(filesize*100)/int(int(active['remote_filesize'])*100) * 100)
return "%s%s" % (cmath, '%')
check_ActiveDDL.exposed = True
def create_readlist(self, list=None, weeknumber=None, year=None):
# ({
# "PUBLISHER": weekly['PUBLISHER'],
@ -5859,7 +5953,7 @@ class WebInterface(object):
try:
x = float(weekly['ISSUE'])
except ValueError, e:
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower():
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower() or '.hu' in weekly['ISSUE'].lower():
x = weekly['ISSUE']
if x is not None: