FIX: necessary git catchup (bad merge)
0
Mylar.py
Executable file → Normal file
|
@ -99,9 +99,11 @@
|
|||
</footer>
|
||||
<a href="#main" id="toTop"><span>Back to top</span></a>
|
||||
</div>
|
||||
<script src="http://code.jquery.com/jquery-1.9.1.js"></script>
|
||||
<!--<script src="http://code.jquery.com/ui/1.10.3/jquery-ui.js"></script> -->
|
||||
|
||||
<script src="js/libs/jquery-1.7.2.min.js"></script>
|
||||
<script src="js/libs/jquery-ui.min.js"></script>
|
||||
<script src="js/libs/jquery-ui.min.js"></script>
|
||||
|
||||
${next.javascriptIncludes()}
|
||||
|
||||
|
|
|
@ -485,6 +485,27 @@
|
|||
</div>
|
||||
</fieldset>
|
||||
|
||||
<fieldset>
|
||||
<div id="newznab providers">
|
||||
<%
|
||||
order_number = 1
|
||||
%>
|
||||
|
||||
%for p_order in config['provider_order']:
|
||||
<div class="config" id="p_order${order_number}">
|
||||
<div class="row">
|
||||
<label>${p_order[0]}</label>
|
||||
<label>${p_order[1]}</label>
|
||||
</div>
|
||||
<div>
|
||||
<%
|
||||
order_number += 1
|
||||
%>
|
||||
|
||||
%endfor
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
</td>
|
||||
|
||||
</tr>
|
||||
|
|
|
@ -65,6 +65,10 @@
|
|||
<td id="status">${item['Status']}
|
||||
%if item['Provider'] == 'CBT' or item['Provider'] == 'KAT':
|
||||
<img src="interfaces/default/images/torrent-icon.png" height="20" width="20" title="${item['Provider']}" />
|
||||
%else:
|
||||
%if item['Status'] != 'Downloaded':
|
||||
(${item['Provider']})
|
||||
%endif
|
||||
%endif
|
||||
</td>
|
||||
<td id="action">[<a href="#" onclick="doAjaxCall('queueissue?IssueID=${item['IssueID']}&ComicName=${item['ComicName']}&ComicID=${item['ComicID']}&ComicIssue=${item['Issue_Number']}&mode=want&redirect=history', $(this),'table')" data-success="Retrying download of '${item['ComicName']}' '${item['Issue_Number']}'">retry</a>]</td>
|
||||
|
|
|
@ -1,149 +0,0 @@
|
|||
<%inherit file="base.html" />
|
||||
<%!
|
||||
import mylar
|
||||
from mylar.helpers import checked
|
||||
%>
|
||||
<%def name="headerIncludes()">
|
||||
<div id="subhead_container">
|
||||
<div id="subhead_menu">
|
||||
<a id="menu_link_edit" href="manageComics">Manage Comics</a>
|
||||
</div>
|
||||
</div>
|
||||
</%def>
|
||||
|
||||
<%def name="body()">
|
||||
<div id="paddingheader">
|
||||
<h1 class="clearfix">Manage</h1>
|
||||
</div>
|
||||
<div id="tabs">
|
||||
<ul>
|
||||
<li><a href="#tabs-1">Scan Comic Library</a></li>
|
||||
<li><a href="#tabs-2">Manual Post-Processing</a></li>
|
||||
<li><a href="#tabs-3">Advanced Options</a></li>
|
||||
</ul>
|
||||
<div id="tabs-1" class="configtable">
|
||||
<form action="comicScan" method="GET" id="comicScan">
|
||||
<fieldset>
|
||||
<legend>Scan Comic Library</legend>
|
||||
<p><strong>Where do you keep your comics?</strong></p>
|
||||
<p>You can put in any directory, and it will scan for comic files in that folder
|
||||
(including all subdirectories). <br/><small>For example: '/Users/name/Comics'</small></p>
|
||||
<p>
|
||||
It may take a while depending on how many files you have. You can navigate away from the page<br />
|
||||
as soon as you click 'Save changes'
|
||||
</p>
|
||||
<br/>
|
||||
<div class="row">
|
||||
<label for="">Path to directory</label>
|
||||
%if mylar.COMIC_DIR:
|
||||
<input type="text" value="${mylar.COMIC_DIR}" name="path" size="70" />
|
||||
%else:
|
||||
<input type="text" value="Enter a Comic Directory to scan" onfocus="if
|
||||
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
|
||||
%endif
|
||||
</div>
|
||||
<!-- <div class="row checkbox">
|
||||
<input type="checkbox" name="libraryscan" id="libraryscan" value="1" ${checked(mylar.LIBRARYSCAN)}><label>Automatically Scan Library</label>
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" name="autoadd" id="autoadd" value="1" ${checked(mylar.ADD_COMICS)}><label>Auto-add new series</label>
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.IMP_METADATA)}><label>Use existing Metadata</label>
|
||||
<small>Use existing Metadata to better locate series for import</small>
|
||||
</div>
|
||||
-->
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" onclick="initConfigCheckbox($this));" id="imp_move" value="1" ${checked(mylar.IMP_MOVE)}><label>Move files into corresponding Series directory</label>
|
||||
<small>Leaving this unchecked will not move anything, but will mark the issues as Archived</small>
|
||||
</div>
|
||||
<div class="config">
|
||||
<input type="checkbox" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.IMP_RENAME)}><label>Rename Files </label>
|
||||
<small>Rename files to configuration settings</small>
|
||||
</div>
|
||||
<br/>
|
||||
<input type="button" value="Save Changes and Scan" onclick="addScanAction();doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes saved. Library will be scanned">
|
||||
<input type="button" value="Save Changes without Scanning Library" onclick="doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes Saved Successfully">
|
||||
</fieldset>
|
||||
</form>
|
||||
</div>
|
||||
<div id="tabs-2" class="configtable">
|
||||
<tr>
|
||||
<td>
|
||||
<form action="post_process" method="GET" id="post_process">
|
||||
<fieldset>
|
||||
<legend>Manual Run</legend>
|
||||
<p><strong>Manual Post-Processing</strong></p>
|
||||
<p>You can put in any directory, and it will scan for comic files in that folder
|
||||
(including all subdirectories) that exist on your watchlist.
|
||||
<br/><small>For example: '/Users/name/Comics'</small></p>
|
||||
<p>
|
||||
It may take a while depending on how many files you have. You can navigate away from this
|
||||
as soon as you click 'Go'.
|
||||
</p>
|
||||
<br/>
|
||||
<p><strong>Only series' that exist on your watchlist will be post-processed with your current
|
||||
post-processing options.<br/> The remainder will not be moved from the given directory</strong></p>
|
||||
<div class="row">
|
||||
<label for="">Path to Manually Run</label>
|
||||
<input type="text" value="Enter the full path to post-process" name="nzb_folder" size="70" />
|
||||
<input type="hidden" name="nzb_name" value='Manual Run' />
|
||||
</div>
|
||||
<input type="button" value="Go Manual Run!" onclick="doAjaxCall('post_process',$(this),'tabs',true);return false;">
|
||||
</fieldset>
|
||||
</form>
|
||||
</td>
|
||||
</tr>
|
||||
</div>
|
||||
|
||||
<div id="tabs-3">
|
||||
<table class="configtable" summary="Advanced Options">
|
||||
|
||||
<tr>
|
||||
<td>
|
||||
<fieldset>
|
||||
<div class="links">
|
||||
<legend>Force Options</legend>
|
||||
<a href="#" onclick="doAjaxCall('forceSearch',$(this))" data-success="Checking for wanted issues successful" data-error="Error checking wanted issues"><span class="ui-icon ui-icon-search"></span>Force Check for Wanted Issues</a>
|
||||
<a href="#" onclick="doAjaxCall('forceUpdate',$(this))" data-success="Update active comics successful" data-error="Error forcing update Comics"><span class="ui-icon ui-icon-heart"></span>Force Update Active Comics</a>
|
||||
<a href="#" onclick="doAjaxCall('checkGithub',$(this))" data-success="Checking for update successful" data-error="Error checking for update"><span class="ui-icon ui-icon-refresh"></span>Check for mylar Updates</a>
|
||||
</div>
|
||||
</fieldset>
|
||||
</td>
|
||||
<td>
|
||||
<fieldset>
|
||||
<legend>Export</legend>
|
||||
<div class="links">
|
||||
<a href="#" onclick="doAjaxCall('wanted_Export',$(this))" data-sucess="Exported to Wanted list." data-error="Failed to export. Check logs"><span class="ui-icon ui-icon-refresh"></span>Export Wanted to CSV</a>
|
||||
</div>
|
||||
<br/><br/>
|
||||
<legend>Hidden Options</legend>
|
||||
<div classs="links">
|
||||
<a href="readlist">Reading List Management</a><br/>
|
||||
<a href="importResults">Import Results Management</a>
|
||||
</div>
|
||||
</fieldset>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</%def>
|
||||
<%def name="javascriptIncludes()">
|
||||
<script>
|
||||
function addScanAction() {
|
||||
$('#autoadd').append('<input type="hidden" name="scan" value=1 />');
|
||||
};
|
||||
|
||||
function initThisPage() {
|
||||
jQuery( "#tabs" ).tabs();
|
||||
initActions();
|
||||
initConfigCheckbox("#imp_move");
|
||||
|
||||
};
|
||||
$(document).ready(function() {
|
||||
initThisPage();
|
||||
});
|
||||
</script>
|
||||
</%def>
|
0
data/interfaces/default/images/archive_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
0
data/interfaces/default/images/copy_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
0
data/interfaces/default/images/download_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
0
data/interfaces/default/images/favicon.ico
Normal file → Executable file
Before Width: | Height: | Size: 9.9 KiB After Width: | Height: | Size: 9.9 KiB |
0
data/interfaces/default/images/green-circle.png
Normal file → Executable file
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
0
data/interfaces/default/images/mylar2.png
Normal file → Executable file
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
0
data/interfaces/default/images/next.gif
Normal file → Executable file
Before Width: | Height: | Size: 898 B After Width: | Height: | Size: 898 B |
0
data/interfaces/default/images/prev.gif
Normal file → Executable file
Before Width: | Height: | Size: 899 B After Width: | Height: | Size: 899 B |
0
data/interfaces/default/images/retry_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 130 KiB |
0
data/interfaces/default/images/skipped_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 8.7 KiB After Width: | Height: | Size: 8.7 KiB |
0
data/interfaces/default/images/ultron.png
Normal file → Executable file
Before Width: | Height: | Size: 146 KiB After Width: | Height: | Size: 146 KiB |
0
data/interfaces/default/images/wanted_icon.png
Normal file → Executable file
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
|
@ -32,9 +32,11 @@
|
|||
<small>Arcs in StoryArc Directory: <% sdir = os.path.join(mylar.DESTINATION_DIR, "StoryArcs") %>${sdir}</small><br/>
|
||||
<input type="checkbox" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
|
||||
<input type="checkbox" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading# to filename</label><br/>
|
||||
<label>Enforce Renaming/MetaTagging options (if enabled)</label>
|
||||
<label>Copy watchlisted issues to StoryArc Directory</label>
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
<div>
|
||||
<input type="submit" value="Update"/>
|
||||
</div>
|
||||
</form>
|
||||
|
|
|
@ -12,8 +12,6 @@
|
|||
4,2045,none,1482/10251/6029/11218/62349
|
||||
#Amazing Spider-man
|
||||
2,2127,none,1570/7794/11288
|
||||
#The Boys
|
||||
1,18033,none,19531/25058
|
||||
#2000 A.D.
|
||||
4,19752,none,11289/11295/11294/11292/11293
|
||||
#--------
|
||||
|
|
|
|
@ -170,6 +170,8 @@ NZBGET_PASSWORD = None
|
|||
NZBGET_PRIORITY = None
|
||||
NZBGET_CATEGORY = None
|
||||
|
||||
PROVIDER_ORDER = None
|
||||
|
||||
NZBSU = False
|
||||
NZBSU_UID = None
|
||||
NZBSU_APIKEY = None
|
||||
|
@ -315,7 +317,7 @@ def initialize():
|
|||
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBSU, NZBSU_UID, NZBSU_APIKEY, DOGNZB, DOGNZB_UID, DOGNZB_APIKEY, NZBX,\
|
||||
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
|
||||
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, ALTEXPERIMENTAL, \
|
||||
ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, \
|
||||
ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, PROVIDER_ORDER, \
|
||||
ENABLE_TORRENTS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \
|
||||
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, ENABLE_TORRENT_SEARCH, ENABLE_KAT, ENABLE_CBT, CBT_PASSKEY, \
|
||||
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_USERNAME, BOXCAR_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
|
||||
|
@ -509,15 +511,27 @@ def initialize():
|
|||
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBGet', 'nzbget_category', '')
|
||||
NZBGET_PRIORITY = check_setting_str(CFG, 'NZBGet', 'nzbget_priority', '')
|
||||
|
||||
PR_NUM = 0 # provider counter here (used for provider orders)
|
||||
PR = []
|
||||
|
||||
NZBSU = bool(check_setting_int(CFG, 'NZBsu', 'nzbsu', 0))
|
||||
NZBSU_UID = check_setting_str(CFG, 'NZBsu', 'nzbsu_uid', '')
|
||||
NZBSU_APIKEY = check_setting_str(CFG, 'NZBsu', 'nzbsu_apikey', '')
|
||||
if NZBSU:
|
||||
PR.append('nzbsu')
|
||||
PR_NUM +=1
|
||||
|
||||
DOGNZB = bool(check_setting_int(CFG, 'DOGnzb', 'dognzb', 0))
|
||||
DOGNZB_UID = check_setting_str(CFG, 'DOGnzb', 'dognzb_uid', '')
|
||||
DOGNZB_APIKEY = check_setting_str(CFG, 'DOGnzb', 'dognzb_apikey', '')
|
||||
if DOGNZB:
|
||||
PR.append('dognzb')
|
||||
PR_NUM +=1
|
||||
|
||||
NZBX = bool(check_setting_int(CFG, 'nzbx', 'nzbx', 0))
|
||||
if NZBX:
|
||||
PR.append('nzbx')
|
||||
PR_NUM +=1
|
||||
|
||||
RAW = bool(check_setting_int(CFG, 'Raw', 'raw', 0))
|
||||
RAW_PROVIDER = check_setting_str(CFG, 'Raw', 'raw_provider', '')
|
||||
|
@ -527,6 +541,12 @@ def initialize():
|
|||
|
||||
EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0))
|
||||
ALTEXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'altexperimental', 1))
|
||||
if EXPERIMENTAL:
|
||||
PR.append('Experimental')
|
||||
PR_NUM +=1
|
||||
|
||||
print 'PR_NUM::' + str(PR_NUM)
|
||||
|
||||
NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0))
|
||||
|
||||
if CONFIG_VERSION:
|
||||
|
@ -575,10 +595,38 @@ def initialize():
|
|||
#to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple
|
||||
if NEWZNAB_HOST and CONFIG_VERSION:
|
||||
EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED)))
|
||||
PR_NUM +=1
|
||||
# Need to rewrite config here and bump up config version
|
||||
CONFIG_VERSION = '5'
|
||||
config_write()
|
||||
|
||||
|
||||
print 'PR_NUM:' + str(PR_NUM)
|
||||
for ens in EXTRA_NEWZNABS:
|
||||
print ens[0]
|
||||
print 'enabled:' + str(ens[4])
|
||||
if ens[4] == '1': # if newznabs are enabled
|
||||
PR.append(ens[0])
|
||||
PR_NUM +=1
|
||||
|
||||
|
||||
print('Provider Number count: ' + str(PR_NUM))
|
||||
|
||||
flattened_provider_order = check_setting_str(CFG, 'General', 'provider_order', [], log=False)
|
||||
PROVIDER_ORDER = list(itertools.izip(*[itertools.islice(flattened_provider_order, i, None, 2) for i in range(2)]))
|
||||
|
||||
if len(flattened_provider_order) == 0:
|
||||
#priority provider sequence in order#, ProviderName
|
||||
print('creating provider sequence order now...')
|
||||
TMPPR_NUM = 0
|
||||
PROV_ORDER = []
|
||||
while TMPPR_NUM < PR_NUM :
|
||||
PROV_ORDER.append((TMPPR_NUM, PR[TMPPR_NUM]))
|
||||
TMPPR_NUM +=1
|
||||
PROVIDER_ORDER = PROV_ORDER
|
||||
|
||||
print 'Provider Order is:' + str(PROVIDER_ORDER)
|
||||
config_write()
|
||||
|
||||
# update folder formats in the config & bump up config version
|
||||
if CONFIG_VERSION == '0':
|
||||
from mylar.helpers import replace_all
|
||||
|
@ -869,6 +917,14 @@ def config_write():
|
|||
new_config['General']['rss_checkinterval'] = RSS_CHECKINTERVAL
|
||||
new_config['General']['rss_lastrun'] = RSS_LASTRUN
|
||||
|
||||
# Need to unpack the extra newznabs for saving in config.ini
|
||||
flattened_providers = []
|
||||
for prov_order in PROVIDER_ORDER:
|
||||
for item in prov_order:
|
||||
flattened_providers.append(item)
|
||||
|
||||
new_config['General']['provider_order'] = flattened_providers
|
||||
|
||||
new_config['Torrents'] = {}
|
||||
new_config['Torrents']['enable_torrents'] = int(ENABLE_TORRENTS)
|
||||
new_config['Torrents']['torrent_local'] = int(TORRENT_LOCAL)
|
||||
|
|
|
@ -178,22 +178,27 @@ def IssueDetails(cbdb_id):
|
|||
publen = len(pubd) # find the # of <td>'s
|
||||
pubs = pubd[publen-1] #take the last <td> which will always contain the publication date
|
||||
pdaters = pubs.findNext(text=True) #get the actual date :)
|
||||
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'09','september':'10','october':'11','december':'12'}
|
||||
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'09','september':'10','october':'11','december':'12','annual':''}
|
||||
for numbs in basmonths:
|
||||
if numbs in pdaters.lower():
|
||||
pconv = basmonths[numbs]
|
||||
ParseYear = re.sub('/s','',pdaters[-5:])
|
||||
pubdate= str(ParseYear) + "-" + str(pconv)
|
||||
if basmonths[numbs] == '':
|
||||
pubdate = str(ParseYear)
|
||||
else:
|
||||
pubdate= str(ParseYear) + "-" + str(pconv)
|
||||
#logger.fdebug("!success - Publication date: " + str(ParseDate))
|
||||
|
||||
#pubdate = re.sub("[^0-9]", "", pdaters)
|
||||
print ("Issue : " + str(issue) + " (" + str(pubdate) + ")")
|
||||
issuetmp = re.sub("[^0-9]", '', issue)
|
||||
print ("Issue : " + str(issuetmp) + " (" + str(pubdate) + ")")
|
||||
print ("Issuetitle " + str(issuetitle))
|
||||
|
||||
annualslist.append({
|
||||
'AnnualIssue': str(issue),
|
||||
'AnnualIssue': issuetmp.strip(),
|
||||
'AnnualTitle': issuetitle,
|
||||
'AnnualDate': str(pubdate)
|
||||
'AnnualDate': pubdate.strip(),
|
||||
'AnnualYear': ParseYear.strip()
|
||||
})
|
||||
gcount+=1
|
||||
print("annualslist appended...")
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
import os
|
||||
import threading
|
||||
import logging
|
||||
import unicodedata # for non-english locales
|
||||
from logging import handlers
|
||||
|
||||
import mylar
|
||||
|
@ -72,7 +73,11 @@ class RotatingLogger(object):
|
|||
threadname = threading.currentThread().getName()
|
||||
|
||||
if level != 'DEBUG':
|
||||
mylar.LOG_LIST.insert(0, (helpers.now(), message, level, threadname))
|
||||
if mylar.OS_DETECT == "Windows" and mylar.OS_ENCODING is not "utf-8":
|
||||
tmpthedate = unicodedata.normalize('NFKD', helpers.now().decode(mylar.OS_ENCODING, "replace"))
|
||||
else:
|
||||
tmpthedate = helpers.now()
|
||||
mylar.LOG_LIST.insert(0, (tmpthedate, message, level, threadname))
|
||||
|
||||
message = threadname + ' : ' + message
|
||||
|
||||
|
|
|
@ -71,15 +71,15 @@ def torrents(pickfeed=None,seriesname=None,issue=None):
|
|||
if issue:
|
||||
srchterm += ' ' + str(issue)
|
||||
|
||||
if pickfeed == "1": # comicbt rss feed based on followlist
|
||||
if pickfeed == "1": # cbt rss feed based on followlist
|
||||
feed = "http://comicbt.com/rss.php?action=browse&passkey=" + str(passkey) + "&type=dl"
|
||||
elif pickfeed == "2" and srchterm is not None: # kat.ph search
|
||||
feed = "http://kat.ph/usearch/" + str(srchterm) + "%20category%3Acomics%20seeds%3A1/?rss=1"
|
||||
elif pickfeed == "3": # kat.ph rss feed
|
||||
feed = "http://kat.ph/usearch/category%3Acomics%20seeds%3A1/?rss=1"
|
||||
elif pickfeed == "4": #comicbt follow link
|
||||
elif pickfeed == "4": #cbt follow link
|
||||
feed = "http://comicbt.com/rss.php?action=follow&passkey=" + str(passkey) + "&type=dl"
|
||||
elif pickfeed == "5": # comicbt series link
|
||||
elif pickfeed == "5": # cbt series link
|
||||
# seriespage = "http://comicbt.com/series.php?passkey=" + str(passkey)
|
||||
feed = "http://comicbt.com/rss.php?action=series&series=" + str(seriesno) + "&passkey=" + str(passkey)
|
||||
else:
|
||||
|
@ -93,11 +93,12 @@ def torrents(pickfeed=None,seriesname=None,issue=None):
|
|||
|
||||
if pickfeed == "5": # we need to get the series # first
|
||||
seriesSearch(seriespage, seriesname)
|
||||
|
||||
feedme = feedparser.parse(feed)
|
||||
|
||||
i = 0
|
||||
|
||||
feeddata = []
|
||||
|
||||
myDB = db.DBConnection()
|
||||
torthekat = []
|
||||
katinfo = {}
|
||||
|
@ -131,18 +132,19 @@ def torrents(pickfeed=None,seriesname=None,issue=None):
|
|||
elif pickfeed == "1" or pickfeed == "4":
|
||||
# tmpsz = feedme.entries[i].enclosures[0]
|
||||
feeddata.append({
|
||||
'Site': 'comicBT',
|
||||
'Site': 'CBT',
|
||||
'Title': feedme.entries[i].title,
|
||||
'Link': feedme.entries[i].link,
|
||||
'Pubdate': feedme.entries[i].updated
|
||||
# 'Size': tmpsz['length']
|
||||
})
|
||||
#print ("Site: ComicBT")
|
||||
#print ("Site: CBT")
|
||||
#print ("Title: " + str(feeddata[i]['Title']))
|
||||
#print ("Link: " + str(feeddata[i]['Link']))
|
||||
#print ("pubdate: " + str(feeddata[i]['Pubdate']))
|
||||
i+=1
|
||||
logger.fdebug('there were ' + str(i) + ' results..')
|
||||
|
||||
if not seriesname:
|
||||
rssdbupdate(feeddata,i,'torrent')
|
||||
else:
|
||||
|
@ -356,7 +358,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
|
|||
tresults = []
|
||||
|
||||
if mylar.ENABLE_CBT:
|
||||
tresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='comicBT'", [tsearch]).fetchall()
|
||||
tresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='CBT'", [tsearch]).fetchall()
|
||||
if mylar.ENABLE_KAT:
|
||||
tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [tsearch]).fetchall()
|
||||
|
||||
|
@ -388,7 +390,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
|
|||
|
||||
if mylar.ENABLE_CBT:
|
||||
#print "AS_Alternate:" + str(AS_Alternate)
|
||||
tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='comicBT'", [AS_Alternate]).fetchall()
|
||||
tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='CBT'", [AS_Alternate]).fetchall()
|
||||
if mylar.ENABLE_KAT:
|
||||
tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [AS_Alternate]).fetchall()
|
||||
|
||||
|
@ -509,7 +511,7 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None):
|
|||
nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\s]', '%',seriesname)
|
||||
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
|
||||
nsearch = nsearch_seriesname + "%"
|
||||
nresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [nsearch])
|
||||
nresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'CBT' AND Site != 'KAT'", [nsearch])
|
||||
if nresults is None:
|
||||
logger.fdebug('nzb search returned no results for ' + seriesname)
|
||||
if seriesname_alt is None:
|
||||
|
@ -521,7 +523,7 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None):
|
|||
AS_Alternate = AlternateSearch
|
||||
for calt in chkthealt:
|
||||
AS_Alternate = re.sub('##','',calt)
|
||||
nresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [AS_Alternate])
|
||||
nresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'CBT' AND Site != 'KAT'", [AS_Alternate])
|
||||
if nresults is None:
|
||||
logger.fdebug('nzb alternate name search returned no results.')
|
||||
return "no results"
|
||||
|
@ -546,7 +548,7 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None):
|
|||
def torsend2client(seriesname, linkit, site):
|
||||
logger.info('matched on ' + str(seriesname))
|
||||
filename = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
|
||||
if site == 'ComicBT':
|
||||
if site == 'CBT':
|
||||
logger.info(linkit)
|
||||
linkit = str(linkit) + '&passkey=' + str(mylar.CBT_PASSKEY)
|
||||
|
||||
|
|
|
@ -130,30 +130,81 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
|||
else:
|
||||
IssDateFix = "no"
|
||||
|
||||
while (torpr >=0 ):
|
||||
if torprovider[torpr] == 'cbt':
|
||||
torprov = 'CBT'
|
||||
elif torprovider[torpr] == 'kat':
|
||||
torprov = 'KAT'
|
||||
searchcnt = 0
|
||||
i = 1
|
||||
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, torprov, torpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
logger.fdebug("findit = found!")
|
||||
break
|
||||
if rsscheck:
|
||||
if mylar.ENABLE_RSS:
|
||||
searchcnt = 1 # rss-only
|
||||
else:
|
||||
if AlternateSearch is not None and AlternateSearch != "None":
|
||||
chkthealt = AlternateSearch.split('##')
|
||||
if chkthealt == 0:
|
||||
AS_Alternate = AlternateSearch
|
||||
loopit = len(chkthealt)
|
||||
for calt in chkthealt:
|
||||
AS_Alternate = re.sub('##','',calt)
|
||||
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
|
||||
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, torprov, torp, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
break
|
||||
searchcnt = 0 # if it's not enabled, don't even bother.
|
||||
else:
|
||||
if mylar.ENABLE_RSS:
|
||||
searchcnt = 2 # rss first, then api on non-matches
|
||||
else:
|
||||
searchcnt = 2 #set the searchcnt to 2 (api)
|
||||
i = 2 #start the counter at api, so it will exit without running RSS
|
||||
|
||||
torpr-=1
|
||||
while ( i <= searchcnt ):
|
||||
#searchmodes:
|
||||
# rss - will run through the built-cached db of entries
|
||||
# api - will run through the providers via api (or non-api in the case of Experimental)
|
||||
# the trick is if the search is done during an rss compare, it needs to exit when done.
|
||||
# otherwise, the order of operations is rss feed check first, followed by api on non-results.
|
||||
|
||||
if i == 1: searchmode = 'rss' #order of ops - this will be used first.
|
||||
elif i == 2: searchmode = 'api'
|
||||
|
||||
logger.fdebug("Initiating Search via : " + str(searchmode))
|
||||
|
||||
torprtmp = torpr
|
||||
|
||||
while (torprtmp >=0 ):
|
||||
if torprovider[torprtmp] == 'cbt':
|
||||
# CBT
|
||||
torprov = 'CBT'
|
||||
elif torprovider[torprtmp] == 'kat':
|
||||
torprov = 'KAT'
|
||||
|
||||
if mylar.ENABLE_RSS:
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, torprov, torpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
logger.fdebug("findit = found!")
|
||||
break
|
||||
else:
|
||||
if AlternateSearch is not None and AlternateSearch != "None":
|
||||
chkthealt = AlternateSearch.split('##')
|
||||
if chkthealt == 0:
|
||||
AS_Alternate = AlternateSearch
|
||||
loopit = len(chkthealt)
|
||||
for calt in chkthealt:
|
||||
AS_Alternate = re.sub('##','',calt)
|
||||
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
|
||||
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, torprov, torp, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
break
|
||||
|
||||
else:
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, torprov, torpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
logger.fdebug("findit = found!")
|
||||
break
|
||||
else:
|
||||
if AlternateSearch is not None and AlternateSearch != "None":
|
||||
chkthealt = AlternateSearch.split('##')
|
||||
if chkthealt == 0:
|
||||
AS_Alternate = AlternateSearch
|
||||
loopit = len(chkthealt)
|
||||
for calt in chkthealt:
|
||||
AS_Alternate = re.sub('##','',calt)
|
||||
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
|
||||
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, torprov, torp, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
|
||||
if findit == 'yes':
|
||||
break
|
||||
|
||||
torprtmp-=1
|
||||
|
||||
i+=1
|
||||
|
||||
if findit == 'yes': return findit, torprov
|
||||
|
||||
|
|
|
@ -1836,6 +1836,7 @@ class WebInterface(object):
|
|||
"extra_newznabs" : mylar.EXTRA_NEWZNABS,
|
||||
"enable_rss" : helpers.checked(mylar.ENABLE_RSS),
|
||||
"rss_checkinterval" : mylar.RSS_CHECKINTERVAL,
|
||||
"provider_order" : mylar.PROVIDER_ORDER,
|
||||
"enable_torrents" : helpers.checked(mylar.ENABLE_TORRENTS),
|
||||
"torrent_local" : helpers.checked(mylar.TORRENT_LOCAL),
|
||||
"local_watchdir" : mylar.LOCAL_WATCHDIR,
|
||||
|
|
587
test/search.py
|
@ -1,587 +0,0 @@
|
|||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import mylar
|
||||
from mylar import logger, db, updater, helpers, parseit
|
||||
|
||||
nzbsu_APIkey = mylar.NZBSU_APIKEY
|
||||
dognzb_APIkey = mylar.DOGNZB_APIKEY
|
||||
|
||||
LOG = mylar.LOG_DIR
|
||||
|
||||
import lib.feedparser as feedparser
|
||||
import urllib
|
||||
import os, errno
|
||||
import string
|
||||
import sqlite3 as lite
|
||||
import sys
|
||||
import getopt
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear):
|
||||
if ComicYear == None: ComicYear = '2012'
|
||||
else: ComicYear = str(ComicYear)[:4]
|
||||
##nzb provider selection##
|
||||
##'dognzb' or 'nzb.su' or 'experimental'
|
||||
nzbprovider = []
|
||||
nzbp = 0
|
||||
if mylar.NZBSU == 1:
|
||||
nzbprovider.append('nzb.su')
|
||||
nzbp+=1
|
||||
if mylar.DOGNZB == 1:
|
||||
nzbprovider.append('dognzb')
|
||||
nzbp+=1
|
||||
# --------
|
||||
# Xperimental
|
||||
if mylar.EXPERIMENTAL == 1:
|
||||
nzbprovider.append('experimental')
|
||||
nzbp+=1
|
||||
# --------
|
||||
nzbpr = nzbp-1
|
||||
while (nzbpr >= 0 ):
|
||||
if nzbprovider[nzbpr] == 'experimental':
|
||||
#this is for experimental
|
||||
nzbprov = 'experimental'
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr)
|
||||
if findit == 'yes':
|
||||
break
|
||||
else:
|
||||
nzbpr-=1
|
||||
|
||||
if nzbprovider[nzbpr] == 'nzb.su':
|
||||
# ----
|
||||
# this is for nzb.su
|
||||
#d = feedparser.parse("http://nzb.su/rss?t=7030&dl=1&i=" + str(nzbsu_APIID) + "&r=" + str(nzbsu_APIkey))
|
||||
#--LATER ?search.rss_find = RSS_SEARCH(ComicName, IssueNumber)
|
||||
#if rss_find == 0:
|
||||
nzbprov = 'nzb.su'
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr)
|
||||
if findit == 'yes':
|
||||
break
|
||||
else:
|
||||
nzbpr-=1
|
||||
# ----
|
||||
|
||||
elif nzbprovider[nzbpr] == 'dognzb':
|
||||
# this is for dognzb.com
|
||||
#d = feedparser.parse("http://dognzb.cr/rss.cfm?r=" + str(dognzb_APIkey) + "&t=7030&num=100")
|
||||
#RSS_SEARCH(ComicName, IssueNumber)
|
||||
nzbprov = 'dognzb'
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr)
|
||||
if findit == 'yes':
|
||||
break
|
||||
else:
|
||||
nzbpr-=1
|
||||
# ----
|
||||
return findit
|
||||
|
||||
def RSS_Search(ComicName, IssueNumber):
|
||||
#this all needs to be REDONE...#
|
||||
loopd = int(w-1)
|
||||
ssab = []
|
||||
ssabcount = 0
|
||||
print ("--------RSS MATCHING-----------------")
|
||||
for entry in d['entries']:
|
||||
# test for comic name here
|
||||
print loopd, entry['title']
|
||||
#print kc[loopd]
|
||||
#while (loopd > -1):
|
||||
# if str(kc[loopd]).lower() in str(entry['title'].lower()):
|
||||
#print entry['title']
|
||||
# more precision - let's see if it's a hit on issue as well
|
||||
# Experimental process
|
||||
# since we're comparing the watchlist titles to the rss feed (for more robust matching)
|
||||
|
||||
# the results will be 2nd/3rd variants, MR's, and comics on the watchlist but not necessarily 'NEW' rele$
|
||||
# let's first compare watchlist to release list
|
||||
incloop = int (tot -1)
|
||||
while (incloop > -1):
|
||||
#print ("Comparing " + str(entry['title']) + " - for - " + str(watchfnd[incloop]))
|
||||
cleantitle = helpers.cleanName(entry['title'])
|
||||
if str(watchfnd[incloop]).lower() in str(cleantitle).lower():
|
||||
#print ("MATCHED - " + str(watchfnd[incloop]).lower())
|
||||
if str(watchfndextra[incloop]).lower() is not None:
|
||||
if str(watchfndextra[incloop]).lower() not in str(cleantitle).lower():
|
||||
#print ("no extra matching - not a match")
|
||||
#print (watchfndextra[incloop].lower())
|
||||
break
|
||||
# now we have a match on watchlist and on release list, let's check if the issue is the same
|
||||
# on the feed and the releaselist
|
||||
# we have to remove the # sign from the ki[array] field first
|
||||
ki[incloop] = re.sub("\D", "", str(ki[incloop]))
|
||||
if str(ki[incloop]) in str(cleantitle):
|
||||
print ("MATCH FOR DOWNLOAD!!\n WATCHLIST: " + str(watchfnd[incloop]) + "\n RLSLIST: " + str(kc[incloop]) + " ISSUE# " + str(ki[incloop]) + "\n RSS: " + str(cleantitle))
|
||||
#let's do the DOWNLOAD and send to SABnzbd
|
||||
#this is for nzb.su - API LIMIT :(
|
||||
linkstart = os.path.splitext(entry['link'])[0]
|
||||
#following is JUST for nzb.su
|
||||
if nzbprov == 'nzb.su':
|
||||
linkit = os.path.splitext(entry['link'])[1]
|
||||
linkit = linkit.replace("&", "%26")
|
||||
thislink = str(linkstart) + str(linkit)
|
||||
else:
|
||||
# this should work for every other provider
|
||||
linkstart = linkstart.replace("&", "%26")
|
||||
thislink = str(linkstart)
|
||||
tmp = "http://192.168.2.2:8085/api?mode=addurl&name=" + str(thislink) + "&pp=3&cat=comics&apikey=" + str(SABAPI)
|
||||
print tmp
|
||||
ssab.append(str(watchfnd[incloop]))
|
||||
ssabcount+=1
|
||||
urllib.urlopen(tmp);
|
||||
# time.sleep(5)
|
||||
incloop-=1
|
||||
# - End of Experimental Process
|
||||
#break
|
||||
#loopd-=1
|
||||
print ("snatched " + str(ssabcount) + " out of " + str(tot) + " comics via rss...")
|
||||
return ssabcount
|
||||
|
||||
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
|
||||
logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " using " + str(nzbprov))
|
||||
if nzbprov == 'nzb.su':
|
||||
apikey = mylar.NZBSU_APIKEY
|
||||
elif nzbprov == 'dognzb':
|
||||
apikey = mylar.DOGNZB_APIKEY
|
||||
elif nzbprov == 'experimental':
|
||||
apikey = 'none'
|
||||
#print ("-------------------------")
|
||||
|
||||
if mylar.PREFERRED_QUALITY == 0: filetype = ""
|
||||
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
|
||||
elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz"
|
||||
|
||||
# figure out what was missed via rss feeds and do a manual search via api
|
||||
#tsc = int(tot-1)
|
||||
findcomic = []
|
||||
findcomiciss = []
|
||||
findcount = 0
|
||||
ci = ""
|
||||
comsearch = []
|
||||
isssearch = []
|
||||
comyear = str(ComicYear)
|
||||
|
||||
#print ("-------SEARCH FOR MISSING------------------")
|
||||
findcomic.append(str(ComicName))
|
||||
IssueNumber = str(re.sub("\.00", "", str(IssueNumber)))
|
||||
#print ("issueNumber" + str(IssueNumber))
|
||||
findcomiciss.append(str(re.sub("\D", "", str(IssueNumber))))
|
||||
|
||||
#print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount]))
|
||||
# replace whitespace in comic name with %20 for api search
|
||||
cm = re.sub(" ", "%20", str(findcomic[findcount]))
|
||||
#print (cmi)
|
||||
if len(str(findcomiciss[findcount])) == 1:
|
||||
cmloopit = 3
|
||||
elif len(str(findcomiciss[findcount])) == 2:
|
||||
cmloopit = 2
|
||||
else:
|
||||
cmloopit = 1
|
||||
isssearch.append(str(findcomiciss[findcount]))
|
||||
comsearch.append(cm)
|
||||
findcount+=1
|
||||
|
||||
# ----
|
||||
|
||||
#print ("------RESULTS OF SEARCH-------------------")
|
||||
findloop = 0
|
||||
foundcomic = []
|
||||
|
||||
#---issue problem
|
||||
# if issue is '011' instead of '11' in nzb search results, will not have same
|
||||
# results. '011' will return different than '11', as will '009' and '09'.
|
||||
|
||||
while (findloop < (findcount) ):
|
||||
comsrc = comsearch[findloop]
|
||||
#print (str(comsearch[findloop]))
|
||||
while (cmloopit >= 1 ):
|
||||
# here we account for issue pattern variations
|
||||
if cmloopit == 3:
|
||||
comsearch[findloop] = comsrc + "%2000" + isssearch[findloop] + "%20" + str(filetype)
|
||||
#print (comsearch[findloop])
|
||||
elif cmloopit == 2:
|
||||
comsearch[findloop] = comsrc + "%200" + isssearch[findloop] + "%20" + str(filetype)
|
||||
#print (comsearch[findloop])
|
||||
elif cmloopit == 1:
|
||||
comsearch[findloop] = comsrc + "%20" + isssearch[findloop] + "%20" + str(filetype)
|
||||
#print (comsearch[findloop])
|
||||
#print ("NZB Provider set to: " + nzbprov)
|
||||
if nzbprov != 'experimental':
|
||||
if nzbprov == 'dognzb':
|
||||
#print ("dog-search.")
|
||||
findurl = "http://dognzb.cr/api?t=search&apikey=" + str(apikey) + "&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030"
|
||||
elif nzbprov == 'nzb.su':
|
||||
#print ("nzb.su search")
|
||||
findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
||||
bb = feedparser.parse(findurl)
|
||||
#print (findurl)
|
||||
elif nzbprov == 'experimental':
|
||||
#print ("experimental raw search")
|
||||
bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
|
||||
done = False
|
||||
foundc = "no"
|
||||
if bb == "no results":
|
||||
#print ("no results found...attempting alternate search")
|
||||
pass
|
||||
elif (len(bb['entries']) == 0):
|
||||
#print ("Nothing found for : " + str(findcomic[findloop]) + " Issue: #" + str(findcomiciss[findloop]))
|
||||
foundc = "no"
|
||||
else:
|
||||
#print ("Found for: " + str(findcomic[findloop]))
|
||||
for entry in bb['entries']:
|
||||
#print str(entry['title'])
|
||||
cleantitle = helpers.cleanName(str(entry['title']))
|
||||
if done:
|
||||
break
|
||||
#print ("title: " + str(cleantitle))
|
||||
#print ("link: " + entry['link'])
|
||||
#let's narrow search down - take out year (2010), (2011), etc
|
||||
#let's check for first occurance of '(' as generally indicates
|
||||
#that the 'title' has ended
|
||||
|
||||
ripperlist=['digital-',
|
||||
'empire',
|
||||
'dcp']
|
||||
#this takes care of the brackets :)
|
||||
# m = re.findall(r"\((\w+)\)", cleantitle)
|
||||
m = re.findall('[^()]+', cleantitle)
|
||||
lenm = len(m)
|
||||
#print ("there are " + str(lenm) + " words.")
|
||||
cnt = 0
|
||||
while (cnt < lenm):
|
||||
if m[cnt] is None: break
|
||||
#if m[cnt] == ' ': print ("space detected")
|
||||
#print (str(cnt) + ". Bracket Word: " + m[cnt] )
|
||||
if cnt == 0:
|
||||
comic_andiss = m[cnt]
|
||||
print ("Comic:" + str(comic_andiss))
|
||||
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
|
||||
print ("year detected!")
|
||||
result_comyear = m[cnt]
|
||||
if str(comyear) in result_comyear:
|
||||
print (str(comyear) + " - right - years match baby!")
|
||||
yearmatch = "true"
|
||||
else:
|
||||
print (str(comyear) + " - not right - years don't match ")
|
||||
yearmatch = "false"
|
||||
if 'digital' in m[cnt] and len(m[cnt]) == 7:
|
||||
pass
|
||||
#print ("digital edition")
|
||||
if ' of ' in m[cnt]:
|
||||
#print ("mini-series detected : " + str(m[cnt]))
|
||||
result_of = m[cnt]
|
||||
if 'cover' in m[cnt]:
|
||||
#print ("covers detected")
|
||||
result_comcovers = m[cnt]
|
||||
for ripper in ripperlist:
|
||||
if ripper in m[cnt]:
|
||||
#print ("Scanner detected:" + str(m[cnt]))
|
||||
result_comscanner = m[cnt]
|
||||
cnt+=1
|
||||
|
||||
if yearmatch == "false": break
|
||||
|
||||
splitit = []
|
||||
watchcomic_split = []
|
||||
comic_iss = re.sub('[\-\:\,]', '', str(comic_andiss))
|
||||
splitit = comic_iss.split(None)
|
||||
watchcomic_split = findcomic[findloop].split(None)
|
||||
|
||||
bmm = re.findall('v\d', comic_iss)
|
||||
#print ("vers - " + str(bmm))
|
||||
if len(bmm) > 0: splitst = len(splitit) - 2
|
||||
else: splitst = len(splitit) - 1
|
||||
if (splitst) != len(watchcomic_split):
|
||||
print ("incorrect comic lengths...not a match")
|
||||
if str(splitit[0]).lower() == "the":
|
||||
print ("THE word detected...attempting to adjust pattern matching")
|
||||
splitit[0] = splitit[4:]
|
||||
else:
|
||||
print ("length match..proceeding")
|
||||
n = 0
|
||||
scount = 0
|
||||
#print ("length:" + str(len(splitit)))
|
||||
while ( n <= len(splitit)-1 ):
|
||||
if n < len(splitit)-1:
|
||||
#print ( str(n) + ". Comparing: " + watchcomic_split[n] + " .to. " + splitit[n] )
|
||||
if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
|
||||
#print ("word matched on : " + splitit[n])
|
||||
scount+=1
|
||||
#elif ':' in splitit[n] or '-' in splitit[n]:
|
||||
# splitrep = splitit[n].replace('-', '')
|
||||
# print ("non-character keyword...skipped on " + splitit[n])
|
||||
elif len(splitit[n]) < 3 or (splitit[n][1:]) == "v":
|
||||
#print ("possible verisoning..checking")
|
||||
#we hit a versioning # - account for it
|
||||
if splitit[n][2:].isdigit():
|
||||
comicversion = str(splitit[n])
|
||||
#print ("version found:" + str(comicversion))
|
||||
else:
|
||||
if splitit[n].isdigit():
|
||||
print ("issue detected")
|
||||
comiss = splitit[n]
|
||||
comicNAMER = n - 1
|
||||
comNAME = splitit[0]
|
||||
cmnam = 1
|
||||
while (cmnam < comicNAMER):
|
||||
comNAME = str(comNAME) + " " + str(splitit[cmnam])
|
||||
cmnam+=1
|
||||
#print ("comic: " + str(comNAME))
|
||||
else:
|
||||
#print ("non-match for: " + splitit[n])
|
||||
pass
|
||||
n+=1
|
||||
spercent = ( scount/int(len(splitit)) ) * 100
|
||||
#print (str(spercent) + "% match")
|
||||
#if spercent >= 75: print ("it's a go captain...")
|
||||
#if spercent < 75: print ("failure - we only got " + str(spercent) + "% right!")
|
||||
print ("this should be a match!")
|
||||
#issue comparison now as well
|
||||
if int(findcomiciss[findloop]) == int(comiss):
|
||||
print ("issues match!")
|
||||
|
||||
## -- inherit issue. Comic year is non-standard. nzb year is the year
|
||||
## -- comic was printed, not the start year of the comic series and
|
||||
## -- thus the deciding component if matches are correct or not
|
||||
linkstart = os.path.splitext(entry['link'])[0]
|
||||
#following is JUST for nzb.su
|
||||
if nzbprov == 'nzb.su':
|
||||
linkit = os.path.splitext(entry['link'])[1]
|
||||
#print ("linkit: " + str(linkit))
|
||||
linkit = linkit.replace("&", "%26")
|
||||
linkapi = str(linkstart) + str(linkit)
|
||||
else:
|
||||
# this should work for every other provider
|
||||
linkstart = linkstart.replace("&", "%26")
|
||||
linkapi = str(linkstart)
|
||||
#here we distinguish between rename and not.
|
||||
#blackhole functinality---
|
||||
#let's download the file to a temporary cache.
|
||||
|
||||
if mylar.BLACKHOLE:
|
||||
if os.path.exists(mylar.BLACKHOLE_DIR):
|
||||
filenamenzb = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
|
||||
urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
|
||||
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
|
||||
#end blackhole
|
||||
|
||||
else:
|
||||
tmppath = mylar.CACHE_DIR
|
||||
print ("cache directory set to: " + str(tmppath))
|
||||
if os.path.exists(tmppath):
|
||||
filenamenzb = os.path.split(linkapi)[1]
|
||||
#filenzb = os.path.join(tmppath,filenamenzb)
|
||||
if nzbprov == 'nzb.su':
|
||||
filenzb = linkstart[21:]
|
||||
if nzbprov == 'experimental':
|
||||
filenzb = filenamenzb[6:]
|
||||
if nzbprov == 'dognzb':
|
||||
filenzb == str(filenamenzb)
|
||||
savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
|
||||
else:
|
||||
#let's make the dir.
|
||||
try:
|
||||
os.makedirs(str(mylar.CACHE_DIR))
|
||||
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
|
||||
savefile = str(mylar.CACHE_DIR) + "/" + str(filenzb) + ".nzb"
|
||||
|
||||
except OSError.e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
print ("savefile set to: " + str(savefile))
|
||||
urllib.urlretrieve(linkapi, str(savefile))
|
||||
#print (str(mylar.RENAME_FILES))
|
||||
print ("sucessfully retrieve nzb to : " + str(savefile))
|
||||
#check sab for current pause status
|
||||
print ("sab host set to :" + str(mylar.SAB_HOST))
|
||||
sabqstatusapi = str(mylar.SAB_HOST) + "/api?mode=qstatus&output=xml&apikey=" + str(mylar.SAB_APIKEY)
|
||||
from xml.dom.minidom import parseString
|
||||
import urllib2
|
||||
file = urllib2.urlopen(sabqstatusapi);
|
||||
data = file.read()
|
||||
file.close()
|
||||
dom = parseString(data)
|
||||
for node in dom.getElementsByTagName('paused'):
|
||||
pausestatus = node.firstChild.wholeText
|
||||
#print pausestatus
|
||||
if pausestatus != 'True':
|
||||
#pause sab first because it downloads too quick (cbr's are small!)
|
||||
pauseapi = str(mylar.SAB_HOST) + "/api?mode=pause&apikey=" + str(mylar.SAB_APIKEY)
|
||||
urllib.urlopen(pauseapi);
|
||||
print "Queue paused"
|
||||
else:
|
||||
print "Queue already paused"
|
||||
|
||||
if mylar.RENAME_FILES == 1:
|
||||
#print ("Saved file to: " + str(savefile))
|
||||
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addlocalfile&name=" + str(savefile) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
|
||||
else:
|
||||
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
|
||||
print ("sab api string:" + str(tmpapi))
|
||||
time.sleep(5)
|
||||
urllib.urlopen(tmpapi);
|
||||
if mylar.RENAME_FILES == 1:
|
||||
#let's give it 5 extra seconds to retrieve the nzb data...
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
outqueue = str(mylar.SAB_HOST) + "/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=" + str(mylar.SAB_APIKEY)
|
||||
print ("outqueue line generated")
|
||||
urllib.urlopen(outqueue);
|
||||
time.sleep(5)
|
||||
print ("passed api request to SAB")
|
||||
#<slots><slot><filename>.nzb filename
|
||||
#chang nzbfilename to include series(SAB will auto rename based on this)
|
||||
#api?mode=queue&name=rename&value=<filename_nzi22ks>&value2=NEWNAME
|
||||
from xml.dom.minidom import parseString
|
||||
import urllib2
|
||||
file = urllib2.urlopen(outqueue);
|
||||
data = file.read()
|
||||
file.close()
|
||||
dom = parseString(data)
|
||||
queue_slots = dom.getElementsByTagName('filename')
|
||||
queue_cnt = len(queue_slots)
|
||||
print ("there are " + str(queue_cnt) + " things in SABnzbd's queue")
|
||||
que = 0
|
||||
slotmatch = "no"
|
||||
for queue in queue_slots:
|
||||
#retrieve the first xml tag (<tag>data</tag>)
|
||||
#that the parser finds with name tagName:
|
||||
queue_file = dom.getElementsByTagName('filename')[que].firstChild.wholeText
|
||||
while ('Trying to fetch NZB' in queue_file):
|
||||
#let's keep waiting until nzbname is resolved by SABnzbd
|
||||
time.sleep(5)
|
||||
file = urllib2.urlopen(outqueue);
|
||||
data = file.read()
|
||||
file.close()
|
||||
dom = parseString(data)
|
||||
queue_file = dom.getElementsByTagName('filename')[que].firstChild.wholeText
|
||||
print (str(queue_file))
|
||||
print (str(filenzb))
|
||||
queue_file = queue_file.replace("_", " ")
|
||||
if str(queue_file) in str(filenzb):
|
||||
print ("matched")
|
||||
slotmatch = "yes"
|
||||
slot_nzoid = dom.getElementsByTagName('nzo_id')[que].firstChild.wholeText
|
||||
print ("slot_nzoid: " + str(slot_nzoid))
|
||||
break
|
||||
que+=1
|
||||
if slotmatch == "yes":
|
||||
if mylar.REPLACE_SPACES:
|
||||
repchar = mylar.REPLACE_CHAR
|
||||
else:
|
||||
repchar = ' '
|
||||
#let's make sure there's no crap in the ComicName since it's O.G.
|
||||
ComicNM = re.sub('[\:\,]', '', str(ComicName))
|
||||
renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
|
||||
renameit = renameit.replace(' ', repchar)
|
||||
nzo_ren = str(mylar.SAB_HOST) + "/api?mode=queue&name=rename&apikey=" + str(mylar.SAB_APIKEY) + "&value=" + str(slot_nzoid) + "&value2=" + str(renameit)
|
||||
print ("attempting to rename queue to " + str(nzo_ren))
|
||||
urllib2.urlopen(nzo_ren);
|
||||
print ("renamed!")
|
||||
#delete the .nzb now.
|
||||
#delnzb = str(mylar.PROG_DIR) + "/" + str(filenzb) + ".nzb"
|
||||
#if mylar.PROG_DIR is not "/":
|
||||
#os.remove(delnzb)
|
||||
#we need to track nzo_id to make sure finished downloaded with SABnzbd.
|
||||
#controlValueDict = {"nzo_id": str(slot_nzoid)}
|
||||
#newValueDict = {"ComicName": str(ComicName),
|
||||
# "ComicYEAR": str(comyear),
|
||||
# "ComicIssue": str(IssueNumber),
|
||||
# "name": str(filenamenzb)}
|
||||
#print ("updating SABLOG")
|
||||
#myDB = db.DBConnection()
|
||||
#myDB.upsert("sablog", newValueDict, controlValueDict)
|
||||
else: logger.info(u"Couldn't locate file in SAB - are you sure it's being downloaded?")
|
||||
#resume sab if it was running before we started
|
||||
if pausestatus != 'True':
|
||||
#let's unpause queue now that we did our jobs.
|
||||
resumeapi = str(mylar.SAB_HOST) + "/api?mode=resume&apikey=" + str(mylar.SAB_APIKEY)
|
||||
urllib.urlopen(resumeapi);
|
||||
#print "Queue resumed"
|
||||
#else:
|
||||
#print "Queue already paused"
|
||||
|
||||
#raise an exception to break out of loop
|
||||
foundc = "yes"
|
||||
done = True
|
||||
break
|
||||
else:
|
||||
#print ("issues don't match..")
|
||||
foundc = "no"
|
||||
if done == True: break
|
||||
cmloopit-=1
|
||||
findloop+=1
|
||||
if foundc == "yes":
|
||||
foundcomic.append("yes")
|
||||
logger.info(u"Found :" + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov))
|
||||
break
|
||||
elif foundc == "no" and nzbpr <> 0:
|
||||
logger.info(u"More than one search provider given - trying next one.")
|
||||
elif foundc == "no" and nzbpr == 0:
|
||||
foundcomic.append("no")
|
||||
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
|
||||
break
|
||||
return foundc
|
||||
|
||||
def searchforissue(issueid=None, new=False):
|
||||
myDB = db.DBConnection()
|
||||
|
||||
if not issueid:
|
||||
|
||||
myDB = db.DBConnection()
|
||||
|
||||
results = myDB.select('SELECT * from issues WHERE Status="Wanted"')
|
||||
|
||||
new = True
|
||||
|
||||
for result in results:
|
||||
comic = myDB.action('SELECT * from comics WHERE ComicID=?', [result['ComicID']]).fetchone()
|
||||
foundNZB = "none"
|
||||
SeriesYear = comic['ComicYear']
|
||||
if result['IssueDate'] == None:
|
||||
ComicYear = comic['ComicYear']
|
||||
else:
|
||||
ComicYear = str(result['IssueDate'])[:4]
|
||||
|
||||
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
|
||||
foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'])
|
||||
if foundNZB == "yes":
|
||||
#print ("found!")
|
||||
updater.foundsearch(result['ComicID'], result['IssueID'])
|
||||
else:
|
||||
pass
|
||||
#print ("not found!")
|
||||
else:
|
||||
result = myDB.action('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
|
||||
ComicID = result['ComicID']
|
||||
comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
|
||||
SeriesYear = comic['ComicYear']
|
||||
if result['IssueDate'] == None:
|
||||
IssueYear = comic['ComicYear']
|
||||
else:
|
||||
IssueYear = str(result['IssueDate'])[:4]
|
||||
|
||||
foundNZB = "none"
|
||||
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
|
||||
foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'])
|
||||
if foundNZB == "yes":
|
||||
#print ("found!")
|
||||
updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'])
|
||||
else:
|
||||
pass
|
||||
#print ("not found!")
|