mirror of
https://github.com/evilhero/mylar
synced 2025-03-09 21:33:42 +00:00
FIX:(#304) Index out of range on recheck, FIX:(#303) Comicvine link updated on details page, FIX:(#302) Query rate (Search Delay) Added as a configuration option, FIX:(#300) Version number error on searching when no version number, FIX:(#297) Manual Rename of Files working, FIX:(#294) 'AU' issue problems should be resolved now, FIX:(#290) V#(year) and Vol#(year) added to filechecking, IMP: ComicVine data use only now as a hidden option (cv_only = 1 in config.ini), IMP: added as options for file naming, IMP: Rough drafting of Annuals (annuals_on = 1 in config.ini), Other fixes..
This commit is contained in:
parent
d9b2d9f0a4
commit
6e9833ee2e
12 changed files with 708 additions and 365 deletions
|
@ -32,7 +32,7 @@
|
|||
%endif
|
||||
<div class="row">
|
||||
|
||||
<a href="http://comicvine.com/volume/49-${comic['ComicID']}" target="_blank">${comic['ComicName']} (${comic['ComicYear']})</a>
|
||||
<a href="${comic['DetailURL']}" target="_blank">${comic['ComicName']} (${comic['ComicYear']})</a>
|
||||
%if comic['Status'] == 'Loading':
|
||||
<h3><i>(Comic information is currently being loaded)</i></h3>
|
||||
%endif
|
||||
|
@ -344,7 +344,27 @@
|
|||
<td id="issuename">${annual['IssueName']}</td>
|
||||
<td id="reldate">${annual['IssueDate']}</td>
|
||||
<td id="status">${annual['Status']}</td>
|
||||
<td id="options"></td>
|
||||
%if annual['Status'] == 'Downloaded' or annual['Status'] == 'Archived':
|
||||
<%Csize = mylar.helpers.human_size(annual['ComicSize'])%>
|
||||
<a href="#" title="${annual['Location']} (${Csize})"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
%endif
|
||||
</td>
|
||||
<td id="options">
|
||||
%if annual['Status'] == 'Skipped':
|
||||
<a href="#" title="Mark issue as Wanted" onclick="doAjaxCall('queueissue?ComicID=${annual['ComicID']}&IssueID=${annual['IssueID']}&ComicIssue=${annual['Issue_Number']}&ComicYear=${annual['IssueDate']}&mode=want',$(this),'table')"><img src="interfaces/default/images/wanted_icon.png" height="25" width="25" /></a>
|
||||
%elif (annual['Status'] == 'Wanted'):
|
||||
<a href="#" title="Mark issue as Skipped" onclick="doAjaxCall('unqueueissue?IssueID=${annual['IssueID']}&ComicID=${annual['ComicID']}',$(this),'table')" data-success="'${annual['Issue_Number']}' has been marked as skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
|
||||
%elif (annual['Status'] == 'Snatched'):
|
||||
<a href="#" title="Mark issue as Skipped" onclick="doAjaxCall('unqueueissue?IsssueID=${annual['IssueID']}&ComicID=${annual['ComicID']}',$(this),'table')" data-success="'${annual['Issue_Number']}' has been marked as skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
|
||||
%elif (annual['Status'] == 'Read'):
|
||||
<a href="#" title="Add to Reading List"><img src="interfaces/default/images/glasses-icon.png" height="25" width="25" /></a>
|
||||
%else:
|
||||
<a href="#" title="Retry"><img src="interfaces/default/images/retry_icon.png" height="25" width="25" /></a>
|
||||
<a href="#" title="Mark issue as Skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
|
||||
%endif
|
||||
<a href="#"><img src="interfaces/default/images/archive_icon.png" height="25" width="25" title="Mark issue as Archived"/></a>
|
||||
</td>
|
||||
|
||||
</tr>
|
||||
%endfor
|
||||
</tbody>
|
||||
|
|
|
@ -137,6 +137,10 @@
|
|||
<div class="row checkbox">
|
||||
<input type="checkbox" name="nzb_startup_search" value="1" ${config['nzb_startup_search']} /><label>NZB Search on startup</label>
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>Search delay</label>
|
||||
<input type="text" name="search_delay" value="${config['search_delay']}" size="4" />mins
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
@ -487,8 +491,13 @@
|
|||
<div class="row">
|
||||
<label> File Format</label>
|
||||
<input type="text" name="file_format" value="${config['file_format']}" size="43">
|
||||
<%
|
||||
file_options = "$Series = SeriesName\n$Year = SeriesYear\n$Issue = IssueNumber\n$VolumeY = V{SeriesYear}\n$VolumeN = V{Volume#}"
|
||||
%>
|
||||
<a href="#" title="${file_options}"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
<small>Use: $Series, $Year, $Issue<br />
|
||||
E.g.: $Series $Issue ($Year) = Animal Man 0 (2012) </small>
|
||||
<small>if tag isn't available, won't create it</small>
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
|
|
|
@ -309,7 +309,20 @@ class PostProcessor(object):
|
|||
comlocation = comicnzb['ComicLocation']
|
||||
self._log("Comic Location: " + comlocation, logger.DEBUG)
|
||||
logger.fdebug("Comic Location: " + str(comlocation))
|
||||
|
||||
comversion = comicnzb['ComicVersion']
|
||||
self._log("Comic Version: " + str(comversion), logger.DEBUG)
|
||||
logger.fdebug("Comic Version: " + str(comversion))
|
||||
if comversion is None:
|
||||
comversion = 'None'
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
if comversion == 'None':
|
||||
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug("No version # found for series, removing from filename", logger.DEBUG)
|
||||
logger.fdebug("new format is now: " + str(chunk_file_format), logger.DEBUG)
|
||||
else:
|
||||
chunk_file_format = mylar.FILE_FORMAT
|
||||
#Run Pre-script
|
||||
|
||||
if mylar.ENABLE_PRE_SCRIPTS:
|
||||
|
@ -340,7 +353,8 @@ class PostProcessor(object):
|
|||
'$series': series.lower(),
|
||||
'$Publisher': publisher,
|
||||
'$publisher': publisher.lower(),
|
||||
'$Volume': seriesyear
|
||||
'$VolumeY': 'V' + str(seriesyear),
|
||||
'$VolumeN': comversion
|
||||
}
|
||||
|
||||
for root, dirnames, filenames in os.walk(self.nzb_folder):
|
||||
|
@ -362,7 +376,7 @@ class PostProcessor(object):
|
|||
else:
|
||||
nfilename = ofilename
|
||||
else:
|
||||
nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values)
|
||||
nfilename = helpers.replace_all(chunk_file_format, file_values)
|
||||
if mylar.REPLACE_SPACES:
|
||||
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
|
||||
nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
|
||||
|
|
|
@ -78,6 +78,7 @@ CURRENT_VERSION = None
|
|||
LATEST_VERSION = None
|
||||
COMMITS_BEHIND = None
|
||||
USER_AGENT = None
|
||||
SEARCH_DELAY = 1
|
||||
|
||||
CHECK_GITHUB = False
|
||||
CHECK_GITHUB_ON_STARTUP = False
|
||||
|
@ -193,6 +194,8 @@ COUNT_HAVES = 0
|
|||
|
||||
COMICSORT = None
|
||||
ANNUALS_ON = 0
|
||||
CV_ONLY = 0
|
||||
CV_ONETIMER = 0
|
||||
|
||||
def CheckSection(sec):
|
||||
""" Check if INI section exists, if not create it """
|
||||
|
@ -253,8 +256,8 @@ def initialize():
|
|||
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\
|
||||
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
|
||||
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, \
|
||||
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, \
|
||||
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, ANNUALS_ON
|
||||
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, SEARCH_DELAY, \
|
||||
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, ANNUALS_ON, CV_ONLY, CV_ONETIMER
|
||||
|
||||
if __INITIALIZED__:
|
||||
return False
|
||||
|
@ -324,6 +327,7 @@ def initialize():
|
|||
ZERO_LEVEL_N = check_setting_str(CFG, 'General', 'zero_level_n', '')
|
||||
LOWERCASE_FILENAMES = bool(check_setting_int(CFG, 'General', 'lowercase_filenames', 0))
|
||||
SYNO_FIX = bool(check_setting_int(CFG, 'General', 'syno_fix', 0))
|
||||
SEARCH_DELAY = check_setting_int(CFG, 'General', 'search_delay', 1)
|
||||
|
||||
PROWL_ENABLED = bool(check_setting_int(CFG, 'Prowl', 'prowl_enabled', 0))
|
||||
PROWL_KEYS = check_setting_str(CFG, 'Prowl', 'prowl_keys', '')
|
||||
|
@ -345,6 +349,13 @@ def initialize():
|
|||
if not ANNUALS_ON:
|
||||
#default to off
|
||||
ANNUALS_ON = 0
|
||||
CV_ONLY = bool(check_setting_int(CFG, 'General', 'cv_only', 0))
|
||||
if not CV_ONLY:
|
||||
#default to off
|
||||
CV_ONLY = 0
|
||||
CV_ONETIMER = bool(check_setting_int(CFG, 'General', 'cv_onetimer', 0))
|
||||
if not CV_ONETIMER:
|
||||
CV_ONETIMER = 0
|
||||
LOG_LEVEL = check_setting_str(CFG, 'General', 'log_level', '')
|
||||
ENABLE_EXTRA_SCRIPTS = bool(check_setting_int(CFG, 'General', 'enable_extra_scripts', 0))
|
||||
EXTRA_SCRIPTS = check_setting_str(CFG, 'General', 'extra_scripts', '')
|
||||
|
@ -606,7 +617,9 @@ def config_write():
|
|||
new_config['General']['logverbose'] = int(LOGVERBOSE)
|
||||
new_config['General']['git_path'] = GIT_PATH
|
||||
new_config['General']['cache_dir'] = CACHE_DIR
|
||||
new_config['General']['annuals_on'] = ANNUALS_ON
|
||||
new_config['General']['annuals_on'] = int(ANNUALS_ON)
|
||||
new_config['General']['cv_only'] = int(CV_ONLY)
|
||||
new_config['General']['cv_onetimer'] = int(CV_ONETIMER)
|
||||
|
||||
new_config['General']['check_github'] = int(CHECK_GITHUB)
|
||||
new_config['General']['check_github_on_startup'] = int(CHECK_GITHUB_ON_STARTUP)
|
||||
|
@ -643,6 +656,7 @@ def config_write():
|
|||
new_config['General']['zero_level_n'] = ZERO_LEVEL_N
|
||||
new_config['General']['lowercase_filenames'] = int(LOWERCASE_FILENAMES)
|
||||
new_config['General']['syno_fix'] = int(SYNO_FIX)
|
||||
new_config['General']['search_delay'] = SEARCH_DELAY
|
||||
|
||||
new_config['General']['use_minsize'] = int(USE_MINSIZE)
|
||||
new_config['General']['minsize'] = MINSIZE
|
||||
|
@ -775,7 +789,7 @@ def dbcheck():
|
|||
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT)')
|
||||
|
||||
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT)')
|
||||
conn.commit
|
||||
c.close
|
||||
#new
|
||||
|
@ -886,6 +900,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE readlist ADD COLUMN ComicID TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DetailURL from comics')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN DetailURL TEXT')
|
||||
|
||||
|
||||
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
|
||||
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
|
||||
|
|
127
mylar/cv.py
127
mylar/cv.py
|
@ -5,10 +5,9 @@
|
|||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
# Mylar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
|
||||
# License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
@ -21,12 +20,26 @@ import logger
|
|||
import string
|
||||
import urllib
|
||||
import lib.feedparser
|
||||
import mylar
|
||||
from bs4 import BeautifulSoup as Soup
|
||||
|
||||
def getComic(comicid,type):
|
||||
def getComic(comicid,type,issueid=None):
|
||||
comicapi='583939a3df0a25fc4e8b7a29934a13078002dc27'
|
||||
#api
|
||||
PULLURL='http://api.comicvine.com/volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description'
|
||||
if type == 'comic':
|
||||
PULLURL='http://api.comicvine.com/volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue'
|
||||
elif type == 'issue':
|
||||
if mylar.CV_ONLY:
|
||||
cv_type = 'issues'
|
||||
searchset = 'filter=volume:' + str(comicid) + '&field_list=cover_date,description,id,image,issue_number,name,date_last_updated,store_date'
|
||||
else:
|
||||
cv_type = 'volume/' + str(comicid)
|
||||
searchset = 'name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description'
|
||||
PULLURL = 'http://api.comicvine.com/' + str(cv_type) + '/?api_key=' + str(comicapi) + '&format=xml&' + str(searchset)
|
||||
elif type == 'firstissue':
|
||||
#this is used ONLY for CV_ONLY
|
||||
PULLURL = 'http://api.comicvine.com/issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(issueid) + '&field_list=cover_date'
|
||||
|
||||
|
||||
#import library to do http requests:
|
||||
import urllib2
|
||||
|
@ -61,6 +74,7 @@ def getComic(comicid,type):
|
|||
|
||||
if type == 'comic': return GetComicInfo(comicid,dom)
|
||||
if type == 'issue': return GetIssuesInfo(comicid,dom)
|
||||
if type == 'firstissue': return GetFirstIssue(issueid,dom)
|
||||
|
||||
def GetComicInfo(comicid,dom):
|
||||
|
||||
|
@ -87,13 +101,13 @@ def GetComicInfo(comicid,dom):
|
|||
cntit = int(cntit)
|
||||
#retrieve the first xml tag (<tag>data</tag>)
|
||||
#that the parser finds with name tagName:
|
||||
comic['ComicName'] = dom.getElementsByTagName('name')[trackcnt].firstChild.wholeText
|
||||
comic['ComicName'] = dom.getElementsByTagName('name')[trackcnt+1].firstChild.wholeText
|
||||
comic['ComicName'] = comic['ComicName'].rstrip()
|
||||
try:
|
||||
comic['ComicYear'] = dom.getElementsByTagName('start_year')[0].firstChild.wholeText
|
||||
except:
|
||||
comic['ComicYear'] = '0000'
|
||||
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
|
||||
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[trackcnt].firstChild.wholeText
|
||||
#the description field actually holds the Volume# - so let's grab it
|
||||
try:
|
||||
comic['ComicDescription'] = dom.getElementsByTagName('description')[0].firstChild.wholeText
|
||||
|
@ -116,8 +130,8 @@ def GetComicInfo(comicid,dom):
|
|||
break
|
||||
if volconv != '':
|
||||
vfind = volconv
|
||||
|
||||
comic['ComicVersion'] = re.sub("[^0-9]", "", vfind)
|
||||
vf = re.findall('[^<>]+', vfind)
|
||||
comic['ComicVersion'] = re.sub("[^0-9]", "", vf[0])
|
||||
logger.info("Volume information found! Adding to series record : volume " + comic['ComicVersion'])
|
||||
else:
|
||||
comic['ComicVersion'] = "noversion"
|
||||
|
@ -127,8 +141,13 @@ def GetComicInfo(comicid,dom):
|
|||
else:
|
||||
comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
|
||||
comic['ComicImage'] = dom.getElementsByTagName('super_url')[0].firstChild.wholeText
|
||||
comic['ComicPublisher'] = dom.getElementsByTagName('name')[trackcnt+1].firstChild.wholeText
|
||||
comic['ComicPublisher'] = dom.getElementsByTagName('name')[trackcnt+2].firstChild.wholeText
|
||||
|
||||
# comic['LastIssue'] = dom.getElementsByTagName('last_issue')[].firstChild.wholeText
|
||||
comic['FirstIssueID'] = dom.getElementsByTagName('id')[0].firstChild.wholeText
|
||||
|
||||
print ("fistIss:" + str(comic['FirstIssueID']))
|
||||
# print ("lastIss:" + str(comic['LastIssue']))
|
||||
# comicchoice.append({
|
||||
# 'ComicName': comic['ComicName'],
|
||||
# 'ComicYear': comic['ComicYear'],
|
||||
|
@ -145,41 +164,71 @@ def GetComicInfo(comicid,dom):
|
|||
|
||||
def GetIssuesInfo(comicid,dom):
|
||||
subtracks = dom.getElementsByTagName('issue')
|
||||
cntiss = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
|
||||
logger.fdebug("issues I've counted: " + str(len(subtracks)))
|
||||
logger.fdebug("issues CV says it has: " + str(int(cntiss)))
|
||||
if not mylar.CV_ONLY:
|
||||
cntiss = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
|
||||
logger.fdebug("issues I've counted: " + str(len(subtracks)))
|
||||
logger.fdebug("issues CV says it has: " + str(int(cntiss)))
|
||||
|
||||
if int(len(subtracks)) != int(cntiss):
|
||||
logger.fdebug("CV's count is wrong, I counted different...going with my count for physicals" + str(len(subtracks)))
|
||||
cntiss = len(subtracks) # assume count of issues is wrong, go with ACTUAL physical api count
|
||||
cntiss = int(cntiss)
|
||||
n = cntiss-1
|
||||
|
||||
if int(len(subtracks)) != int(cntiss):
|
||||
logger.fdebug("CV's count is wrong, I counted different...going with my count for physicals" + str(len(subtracks)))
|
||||
cntiss = len(subtracks) # assume count of issues is wrong, go with ACTUAL physical api count
|
||||
cntiss = int(cntiss)
|
||||
n = cntiss-1
|
||||
else:
|
||||
n = int(len(subtracks))-1
|
||||
issue = {}
|
||||
issuechoice = []
|
||||
firstdate = '2099-00-00'
|
||||
for subtrack in subtracks:
|
||||
if (dom.getElementsByTagName('name')[n].firstChild) is not None:
|
||||
issue['Issue_Name'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
|
||||
else:
|
||||
issue['Issue_Name'] = 'None'
|
||||
issue['Issue_ID'] = dom.getElementsByTagName('id')[n].firstChild.wholeText
|
||||
try:
|
||||
if not mylar.CV_ONLY:
|
||||
if (dom.getElementsByTagName('name')[n].firstChild) is not None:
|
||||
issue['Issue_Name'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
|
||||
else:
|
||||
issue['Issue_Name'] = 'None'
|
||||
|
||||
issue['Issue_ID'] = dom.getElementsByTagName('id')[n].firstChild.wholeText
|
||||
issue['Issue_Number'] = dom.getElementsByTagName('issue_number')[n].firstChild.wholeText
|
||||
|
||||
|
||||
issuechoice.append({
|
||||
'Issue_ID': issue['Issue_ID'],
|
||||
'Issue_Number': issue['Issue_Number'],
|
||||
'Issue_Name': issue['Issue_Name']
|
||||
})
|
||||
|
||||
issue['issuechoice'] = issuechoice
|
||||
except:
|
||||
#logger.fdebug("publisher...ignoring this.")
|
||||
#logger.fdebug("n value: " + str(n) + " ...subtracks: " + str(len(subtracks)))
|
||||
# in order to get ALL the issues, we need to increment the count back by 1 so it grabs the
|
||||
# last issue
|
||||
pass
|
||||
'Issue_ID': issue['Issue_ID'],
|
||||
'Issue_Number': issue['Issue_Number'],
|
||||
'Issue_Name': issue['Issue_Name']
|
||||
})
|
||||
else:
|
||||
try:
|
||||
issue['Issue_Name'] = subtrack.getElementsByTagName('name')[0].firstChild.wholeText
|
||||
except:
|
||||
issue['Issue_Name'] = 'None'
|
||||
issue['Issue_ID'] = subtrack.getElementsByTagName('id')[0].firstChild.wholeText
|
||||
try:
|
||||
issue['CoverDate'] = subtrack.getElementsByTagName('cover_date')[0].firstChild.wholeText
|
||||
except:
|
||||
issue['CoverDate'] = '0000-00-00'
|
||||
issue['Issue_Number'] = subtrack.getElementsByTagName('issue_number')[0].firstChild.wholeText
|
||||
issuechoice.append({
|
||||
'Issue_ID': issue['Issue_ID'],
|
||||
'Issue_Number': issue['Issue_Number'],
|
||||
'Issue_Date': issue['CoverDate'],
|
||||
'Issue_Name': issue['Issue_Name']
|
||||
})
|
||||
if issue['CoverDate'] < firstdate and issue['CoverDate'] != '0000-00-00':
|
||||
firstdate = issue['CoverDate']
|
||||
n-=1
|
||||
|
||||
issue['issuechoice'] = issuechoice
|
||||
issue['firstdate'] = firstdate
|
||||
return issue
|
||||
|
||||
def GetFirstIssue(issueid,dom):
|
||||
#if the Series Year doesn't exist, get the first issue and take the date from that
|
||||
try:
|
||||
first_year = dom.getElementsByTagName('cover_date')[0].firstChild.wholeText
|
||||
except:
|
||||
first_year = '0000'
|
||||
return first_year
|
||||
|
||||
the_year = first_year[:4]
|
||||
the_month = first_year[5:7]
|
||||
the_date = the_year + '-' + the_month
|
||||
|
||||
return the_year
|
||||
|
|
|
@ -42,19 +42,29 @@ def listFiles(dir,watchcomic,AlternateSearch=None):
|
|||
#print item
|
||||
#subname = os.path.join(basedir, item)
|
||||
subname = item
|
||||
#print subname
|
||||
#versioning - remove it
|
||||
subsplit = subname.split()
|
||||
volrem = None
|
||||
for subit in subsplit:
|
||||
#print ("subit:" + str(subit))
|
||||
if 'v' in str(subit).lower():
|
||||
#print ("possible versioning detected.")
|
||||
if subit[1:].isdigit():
|
||||
#if in format v1, v2009 etc...
|
||||
#print (subit + " - assuming versioning. Removing from initial search pattern.")
|
||||
subname = re.sub(str(subit), '', subname)
|
||||
volrem = subit
|
||||
if subit.lower()[:3] == 'vol':
|
||||
#if in format vol.2013 etc
|
||||
#because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
|
||||
#print ("volume detected as version #:" + str(subit))
|
||||
subname = re.sub(subit, '', subname)
|
||||
volrem = subit
|
||||
|
||||
subname = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]',' ', str(subname))
|
||||
modwatchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', u_watchcomic)
|
||||
modwatchcomic = re.sub('\s+', ' ', str(modwatchcomic)).strip()
|
||||
#versioning - remove it
|
||||
subsplit = subname.split()
|
||||
for subit in subsplit:
|
||||
if 'v' in str(subit):
|
||||
#print ("possible versioning detected.")
|
||||
if subit[1:].isdigit():
|
||||
#print (subit + " - assuming versioning. Removing from initial search pattern.")
|
||||
subname = re.sub(str(subit), '', subname)
|
||||
|
||||
|
||||
subname = re.sub('\s+', ' ', str(subname)).strip()
|
||||
if AlternateSearch is not None:
|
||||
#same = encode.
|
||||
|
@ -77,10 +87,18 @@ def listFiles(dir,watchcomic,AlternateSearch=None):
|
|||
#print ("Comicsize:" + str(comicsize))
|
||||
comiccnt+=1
|
||||
if modwatchcomic.lower() in subname.lower():
|
||||
jtd_len = len(modwatchcomic)
|
||||
#remove versioning here
|
||||
if volrem != None:
|
||||
jtd_len = len(modwatchcomic) + len(volrem) + 1 #1 is to account for space btwn comic and vol #
|
||||
else:
|
||||
jtd_len = len(modwatchcomic)
|
||||
justthedigits = item[jtd_len:]
|
||||
elif altsearchcomic.lower() in subname.lower():
|
||||
jtd_len = len(altsearchcomic)
|
||||
#remove versioning here
|
||||
if volrem != None:
|
||||
jtd_len = len(altsearchcomic) + len(volrem) + 1
|
||||
else:
|
||||
jtd_len = len(altsearchcomic)
|
||||
justthedigits = item[jtd_len:]
|
||||
comiclist.append({
|
||||
'ComicFilename': item,
|
||||
|
|
137
mylar/helpers.py
137
mylar/helpers.py
|
@ -224,26 +224,26 @@ def decimal_issue(iss):
|
|||
def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None):
|
||||
from mylar import db, logger
|
||||
myDB = db.DBConnection()
|
||||
print ("comicid: " + str(comicid))
|
||||
print ("issue#: " + str(issue))
|
||||
#print ("comicid: " + str(comicid))
|
||||
#print ("issue#: " + str(issue))
|
||||
# the issue here is a non-decimalized version, we need to see if it's got a decimal and if not, add '.00'
|
||||
iss_find = issue.find('.')
|
||||
if iss_find < 0:
|
||||
# no decimal in issue number
|
||||
iss = str(int(issue)) + ".00"
|
||||
else:
|
||||
iss_b4dec = issue[:iss_find]
|
||||
iss_decval = issue[iss_find+1:]
|
||||
if len(str(int(iss_decval))) == 1:
|
||||
iss = str(int(iss_b4dec)) + "." + str(int(iss_decval)*10)
|
||||
else:
|
||||
if issue.endswith(".00"):
|
||||
iss = issue
|
||||
else:
|
||||
iss = str(int(iss_b4dec)) + "." + iss_decval
|
||||
issue = iss
|
||||
# iss_find = issue.find('.')
|
||||
# if iss_find < 0:
|
||||
# # no decimal in issue number
|
||||
# iss = str(int(issue)) + ".00"
|
||||
# else:
|
||||
# iss_b4dec = issue[:iss_find]
|
||||
# iss_decval = issue[iss_find+1:]
|
||||
# if len(str(int(iss_decval))) == 1:
|
||||
# iss = str(int(iss_b4dec)) + "." + str(int(iss_decval)*10)
|
||||
# else:
|
||||
# if issue.endswith(".00"):
|
||||
# iss = issue
|
||||
# else:
|
||||
# iss = str(int(iss_b4dec)) + "." + iss_decval
|
||||
# issue = iss
|
||||
|
||||
print ("converted issue#: " + str(issue))
|
||||
# print ("converted issue#: " + str(issue))
|
||||
if issueid is None:
|
||||
chkissue = myDB.action("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
|
||||
if chkissue is None:
|
||||
|
@ -257,25 +257,31 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
#comicid = issuenzb['ComicID']
|
||||
issuenum = issuenzb['Issue_Number']
|
||||
#issueno = str(issuenum).split('.')[0]
|
||||
|
||||
iss_find = issuenum.find('.')
|
||||
iss_b4dec = issuenum[:iss_find]
|
||||
iss_decval = issuenum[iss_find+1:]
|
||||
if int(iss_decval) == 0:
|
||||
iss = iss_b4dec
|
||||
issdec = int(iss_decval)
|
||||
issueno = str(iss)
|
||||
logger.fdebug("Issue Number: " + str(issueno))
|
||||
else:
|
||||
if len(iss_decval) == 1:
|
||||
iss = iss_b4dec + "." + iss_decval
|
||||
issdec = int(iss_decval) * 10
|
||||
issue_except = 'None'
|
||||
if 'au' in issuenum.lower():
|
||||
issuenum = re.sub("[^0-9]", "", issuenum)
|
||||
issue_except = ' AU'
|
||||
if '.' in issuenum:
|
||||
iss_find = issuenum.find('.')
|
||||
iss_b4dec = issuenum[:iss_find]
|
||||
iss_decval = issuenum[iss_find+1:]
|
||||
if int(iss_decval) == 0:
|
||||
iss = iss_b4dec
|
||||
issdec = int(iss_decval)
|
||||
issueno = str(iss)
|
||||
logger.fdebug("Issue Number: " + str(issueno))
|
||||
else:
|
||||
iss = iss_b4dec + "." + iss_decval.rstrip('0')
|
||||
issdec = int(iss_decval.rstrip('0')) * 10
|
||||
issueno = iss_b4dec
|
||||
logger.fdebug("Issue Number: " + str(iss))
|
||||
|
||||
if len(iss_decval) == 1:
|
||||
iss = iss_b4dec + "." + iss_decval
|
||||
issdec = int(iss_decval) * 10
|
||||
else:
|
||||
iss = iss_b4dec + "." + iss_decval.rstrip('0')
|
||||
issdec = int(iss_decval.rstrip('0')) * 10
|
||||
issueno = iss_b4dec
|
||||
logger.fdebug("Issue Number: " + str(iss))
|
||||
else:
|
||||
iss = issuenum
|
||||
issueno = str(iss)
|
||||
# issue zero-suppression here
|
||||
if mylar.ZERO_LEVEL == "0":
|
||||
zeroadd = ""
|
||||
|
@ -289,11 +295,16 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
if str(len(issueno)) > 1:
|
||||
if int(issueno) < 10:
|
||||
logger.fdebug("issue detected less than 10")
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
|
||||
elif int(issueno) >= 10 and int(issueno) < 100:
|
||||
logger.fdebug("issue detected greater than 10, but less than 100")
|
||||
|
@ -301,17 +312,25 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
zeroadd = ""
|
||||
else:
|
||||
zeroadd = "0"
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss))
|
||||
else:
|
||||
logger.fdebug("issue detected greater than 100")
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(issueno)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
|
||||
else:
|
||||
prettycomiss = str(issueno)
|
||||
|
@ -329,6 +348,18 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
logger.fdebug("Year: " + str(seriesyear))
|
||||
comlocation = comicnzb['ComicLocation']
|
||||
logger.fdebug("Comic Location: " + str(comlocation))
|
||||
comversion = comicnzb['ComicVersion']
|
||||
if comversion is None:
|
||||
comversion = 'None'
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
if comversion == 'None':
|
||||
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug("No version # found for series, removing from filename")
|
||||
print ("new format: " + str(chunk_file_format))
|
||||
else:
|
||||
chunk_file_format = mylar.FILE_FORMAT
|
||||
|
||||
file_values = {'$Series': series,
|
||||
'$Issue': prettycomiss,
|
||||
|
@ -336,7 +367,8 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
'$series': series.lower(),
|
||||
'$Publisher': publisher,
|
||||
'$publisher': publisher.lower(),
|
||||
'$Volume': seriesyear
|
||||
'$VolumeY': 'V' + str(seriesyear),
|
||||
'$VolumeN': comversion
|
||||
}
|
||||
|
||||
extensions = ('.cbr', '.cbz')
|
||||
|
@ -352,7 +384,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
else:
|
||||
nfilename = ofilename
|
||||
else:
|
||||
nfilename = replace_all(mylar.FILE_FORMAT, file_values)
|
||||
nfilename = replace_all(chunk_file_format, file_values)
|
||||
if mylar.REPLACE_SPACES:
|
||||
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
|
||||
nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
|
||||
|
@ -458,3 +490,12 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
|
|||
mylar.COMICSORT['LastOrderID'] = imported
|
||||
return
|
||||
|
||||
def fullmonth(monthno):
|
||||
#simple numerical to worded month conversion....
|
||||
basmonths = {'1':'January','2':'February','3':'March','4':'April','5':'May','6':'June','7':'July','8':'August','9':'September','10':'October','11':'November','12':'December'}
|
||||
|
||||
for numbs in basmonths:
|
||||
if numbs in str(int(monthno)):
|
||||
monthconv = basmonths[numbs]
|
||||
|
||||
return monthconv
|
||||
|
|
|
@ -91,37 +91,49 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
#--Now that we know ComicName, let's try some scraping
|
||||
#--Start
|
||||
# gcd will return issue details (most importantly publishing date)
|
||||
if mismatch == "no" or mismatch is None:
|
||||
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
|
||||
#print ("gcdinfo: " + str(gcdinfo))
|
||||
mismatch_com = "no"
|
||||
if gcdinfo == "No Match":
|
||||
updater.no_searchresults(comicid)
|
||||
nomatch = "true"
|
||||
logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
|
||||
return nomatch
|
||||
else:
|
||||
mismatch_com = "yes"
|
||||
#print ("gcdinfo:" + str(gcdinfo))
|
||||
if not mylar.CV_ONLY:
|
||||
if mismatch == "no" or mismatch is None:
|
||||
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
|
||||
#print ("gcdinfo: " + str(gcdinfo))
|
||||
mismatch_com = "no"
|
||||
if gcdinfo == "No Match":
|
||||
updater.no_searchresults(comicid)
|
||||
nomatch = "true"
|
||||
logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
|
||||
return nomatch
|
||||
else:
|
||||
mismatch_com = "yes"
|
||||
#print ("gcdinfo:" + str(gcdinfo))
|
||||
|
||||
elif mismatch == "yes":
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid['variloop'] is None: pass
|
||||
else:
|
||||
vari_loop = CV_EXcomicid['variloop']
|
||||
NewComicID = CV_EXcomicid['NewComicID']
|
||||
gcomicid = CV_EXcomicid['GComicID']
|
||||
resultURL = "/series/" + str(NewComicID) + "/"
|
||||
#print ("variloop" + str(CV_EXcomicid['variloop']))
|
||||
#if vari_loop == '99':
|
||||
gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None)
|
||||
elif mismatch == "yes":
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid['variloop'] is None: pass
|
||||
else:
|
||||
vari_loop = CV_EXcomicid['variloop']
|
||||
NewComicID = CV_EXcomicid['NewComicID']
|
||||
gcomicid = CV_EXcomicid['GComicID']
|
||||
resultURL = "/series/" + str(NewComicID) + "/"
|
||||
#print ("variloop" + str(CV_EXcomicid['variloop']))
|
||||
#if vari_loop == '99':
|
||||
gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None)
|
||||
|
||||
logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] )
|
||||
# print ("Series Published" + parseit.resultPublished)
|
||||
|
||||
CV_NoYearGiven = "no"
|
||||
#if the SeriesYear returned by CV is blank or none (0000), let's use the gcd one.
|
||||
if comic['ComicYear'] is None or comic['ComicYear'] == '0000':
|
||||
SeriesYear = gcdinfo['SeriesYear']
|
||||
if mylar.CV_ONLY:
|
||||
#we'll defer this until later when we grab all the issues and then figure it out
|
||||
logger.info("Uh-oh. I can't find a Series Year for this series. I'm going to try analyzing deeper.")
|
||||
SeriesYear = cv.getComic(comicid,'firstissue',comic['FirstIssueID'])
|
||||
if SeriesYear == '0000':
|
||||
logger.info("Ok - I couldn't find a Series Year at all. Loading in the issue data now and will figure out the Series Year.")
|
||||
CV_NoYearGiven = "yes"
|
||||
issued = cv.getComic(comicid,'issue')
|
||||
SeriesYear = issued['firstdate'][:4]
|
||||
else:
|
||||
SeriesYear = gcdinfo['SeriesYear']
|
||||
else:
|
||||
SeriesYear = comic['ComicYear']
|
||||
|
||||
|
@ -138,8 +150,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
newCtrl = {"IssueID": str(annualval['AnnualIssue'] + annualval['AnnualDate'])}
|
||||
newVals = {"Issue_Number": annualval['AnnualIssue'],
|
||||
"IssueDate": annualval['AnnualDate'],
|
||||
"IssueName": annualval['AnnualTitle'],
|
||||
"ComicID": comicid,
|
||||
"Status": "skipped"}
|
||||
"Status": "Skipped"}
|
||||
myDB.upsert("annuals", newVals, newCtrl)
|
||||
nb+=1
|
||||
|
||||
|
@ -208,10 +221,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
#try to account for CV not updating new issues as fast as GCD
|
||||
#seems CV doesn't update total counts
|
||||
#comicIssues = gcdinfo['totalissues']
|
||||
if gcdinfo['gcdvariation'] == "cv":
|
||||
comicIssues = str(int(comic['ComicIssues']) + 1)
|
||||
else:
|
||||
comicIssues = comic['ComicIssues']
|
||||
comicIssues = comic['ComicIssues']
|
||||
|
||||
if not mylar.CV_ONLY:
|
||||
if gcdinfo['gcdvariation'] == "cv":
|
||||
comicIssues = str(int(comic['ComicIssues']) + 1)
|
||||
|
||||
#let's download the image...
|
||||
if os.path.exists(mylar.CACHE_DIR):pass
|
||||
|
@ -258,7 +272,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
"ComicVersion": comicVol,
|
||||
"ComicLocation": comlocation,
|
||||
"ComicPublisher": comic['ComicPublisher'],
|
||||
"ComicPublished": gcdinfo['resultPublished'],
|
||||
"DetailURL": comic['ComicURL'],
|
||||
# "ComicPublished": gcdinfo['resultPublished'],
|
||||
"ComicPublished": 'Unknown',
|
||||
"DateAdded": helpers.today(),
|
||||
"Status": "Loading"}
|
||||
|
||||
|
@ -269,7 +285,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
if pullupd is None:
|
||||
helpers.ComicSort(sequence='update')
|
||||
|
||||
issued = cv.getComic(comicid,'issue')
|
||||
if CV_NoYearGiven == 'no':
|
||||
#if set to 'no' then we haven't pulled down the issues, otherwise we did it already
|
||||
issued = cv.getComic(comicid,'issue')
|
||||
logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] )
|
||||
n = 0
|
||||
iscnt = int(comicIssues)
|
||||
|
@ -281,171 +299,273 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
|
||||
latestiss = "0"
|
||||
latestdate = "0000-00-00"
|
||||
firstiss = "10000000"
|
||||
firstdate = "2099-00-00"
|
||||
#print ("total issues:" + str(iscnt))
|
||||
#---removed NEW code here---
|
||||
logger.info(u"Now adding/updating issues for " + comic['ComicName'])
|
||||
|
||||
# file check to see if issue exists
|
||||
logger.info(u"Checking directory for existing issues.")
|
||||
#fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName'])
|
||||
#havefiles = 0
|
||||
|
||||
#fccnt = int(fc['comiccount'])
|
||||
#logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying")
|
||||
#fcnew = []
|
||||
if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part
|
||||
while (n <= iscnt):
|
||||
#---NEW.code
|
||||
try:
|
||||
firstval = issued['issuechoice'][n]
|
||||
except IndexError:
|
||||
break
|
||||
cleanname = helpers.cleanName(firstval['Issue_Name'])
|
||||
issid = str(firstval['Issue_ID'])
|
||||
issnum = str(firstval['Issue_Number'])
|
||||
#print ("issnum: " + str(issnum))
|
||||
issname = cleanname
|
||||
if '.' in str(issnum):
|
||||
issn_st = str(issnum).find('.')
|
||||
issn_b4dec = str(issnum)[:issn_st]
|
||||
#if the length of decimal is only 1 digit, assume it's a tenth
|
||||
dec_is = str(issnum)[issn_st + 1:]
|
||||
if len(dec_is) == 1:
|
||||
dec_nisval = int(dec_is) * 10
|
||||
iss_naftdec = str(dec_nisval)
|
||||
if len(dec_is) == 2:
|
||||
dec_nisval = int(dec_is)
|
||||
iss_naftdec = str(dec_nisval)
|
||||
iss_issue = issn_b4dec + "." + iss_naftdec
|
||||
issis = (int(issn_b4dec) * 1000) + dec_nisval
|
||||
elif 'au' in issnum.lower():
|
||||
print ("au detected")
|
||||
stau = issnum.lower().find('au')
|
||||
issnum_au = issnum[:stau]
|
||||
print ("issnum_au: " + str(issnum_au))
|
||||
#account for Age of Ultron mucked up numbering
|
||||
issis = str(int(issnum_au) * 1000) + 'AU'
|
||||
else: issis = int(issnum) * 1000
|
||||
|
||||
bb = 0
|
||||
while (bb <= iscnt):
|
||||
try:
|
||||
gcdval = gcdinfo['gcdchoice'][bb]
|
||||
#print ("gcdval: " + str(gcdval))
|
||||
if not mylar.CV_ONLY:
|
||||
#fccnt = int(fc['comiccount'])
|
||||
#logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying")
|
||||
#fcnew = []
|
||||
if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part
|
||||
while (n <= iscnt):
|
||||
#---NEW.code
|
||||
try:
|
||||
firstval = issued['issuechoice'][n]
|
||||
except IndexError:
|
||||
#account for gcd variation here
|
||||
if gcdinfo['gcdvariation'] == 'gcd':
|
||||
#logger.fdebug("gcd-variation accounted for.")
|
||||
issdate = '0000-00-00'
|
||||
int_issnum = int ( issis / 1000 )
|
||||
break
|
||||
if 'nn' in str(gcdval['GCDIssue']):
|
||||
#no number detected - GN, TP or the like
|
||||
logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
|
||||
updater.no_searchresults(comicid)
|
||||
return
|
||||
elif 'au' in gcdval['GCDIssue'].lower():
|
||||
#account for Age of Ultron mucked up numbering - this is in format of 5AU.00
|
||||
gstau = gcdval['GCDIssue'].lower().find('au')
|
||||
gcdis_au = gcdval['GCDIssue'][:gstau]
|
||||
gcdis = str(int(gcdis_au) * 1000) + 'AU'
|
||||
elif '.' in str(gcdval['GCDIssue']):
|
||||
#logger.fdebug("g-issue:" + str(gcdval['GCDIssue']))
|
||||
issst = str(gcdval['GCDIssue']).find('.')
|
||||
#logger.fdebug("issst:" + str(issst))
|
||||
issb4dec = str(gcdval['GCDIssue'])[:issst]
|
||||
#logger.fdebug("issb4dec:" + str(issb4dec))
|
||||
cleanname = helpers.cleanName(firstval['Issue_Name'])
|
||||
issid = str(firstval['Issue_ID'])
|
||||
issnum = str(firstval['Issue_Number'])
|
||||
#print ("issnum: " + str(issnum))
|
||||
issname = cleanname
|
||||
if '.' in str(issnum):
|
||||
issn_st = str(issnum).find('.')
|
||||
issn_b4dec = str(issnum)[:issn_st]
|
||||
#if the length of decimal is only 1 digit, assume it's a tenth
|
||||
decis = str(gcdval['GCDIssue'])[issst+1:]
|
||||
#logger.fdebug("decis:" + str(decis))
|
||||
if len(decis) == 1:
|
||||
decisval = int(decis) * 10
|
||||
issaftdec = str(decisval)
|
||||
if len(decis) == 2:
|
||||
decisval = int(decis)
|
||||
issaftdec = str(decisval)
|
||||
gcd_issue = issb4dec + "." + issaftdec
|
||||
#logger.fdebug("gcd_issue:" + str(gcd_issue))
|
||||
try:
|
||||
gcdis = (int(issb4dec) * 1000) + decisval
|
||||
except ValueError:
|
||||
logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot. This feature to allow these will be added in the near future.")
|
||||
dec_is = str(issnum)[issn_st + 1:]
|
||||
if len(dec_is) == 1:
|
||||
dec_nisval = int(dec_is) * 10
|
||||
iss_naftdec = str(dec_nisval)
|
||||
if len(dec_is) == 2:
|
||||
dec_nisval = int(dec_is)
|
||||
iss_naftdec = str(dec_nisval)
|
||||
iss_issue = issn_b4dec + "." + iss_naftdec
|
||||
issis = (int(issn_b4dec) * 1000) + dec_nisval
|
||||
elif 'au' in issnum.lower():
|
||||
print ("au detected")
|
||||
stau = issnum.lower().find('au')
|
||||
issnum_au = issnum[:stau]
|
||||
print ("issnum_au: " + str(issnum_au))
|
||||
#account for Age of Ultron mucked up numbering
|
||||
issis = str(int(issnum_au) * 1000) + 'AU'
|
||||
else: issis = int(issnum) * 1000
|
||||
|
||||
bb = 0
|
||||
while (bb <= iscnt):
|
||||
try:
|
||||
gcdval = gcdinfo['gcdchoice'][bb]
|
||||
#print ("gcdval: " + str(gcdval))
|
||||
except IndexError:
|
||||
#account for gcd variation here
|
||||
if gcdinfo['gcdvariation'] == 'gcd':
|
||||
#logger.fdebug("gcd-variation accounted for.")
|
||||
issdate = '0000-00-00'
|
||||
int_issnum = int ( issis / 1000 )
|
||||
break
|
||||
if 'nn' in str(gcdval['GCDIssue']):
|
||||
#no number detected - GN, TP or the like
|
||||
logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
|
||||
updater.no_searchresults(comicid)
|
||||
return
|
||||
elif 'au' in gcdval['GCDIssue'].lower():
|
||||
#account for Age of Ultron mucked up numbering
|
||||
gstau = gcdval['GCDIssue'].lower().find('au')
|
||||
gcdis_au = gcdval['GCDIssue'][:gstau]
|
||||
gcdis = str(int(gcdis_au) * 1000) + 'AU'
|
||||
print ("gcdis : " + str(gcdis))
|
||||
else:
|
||||
gcdis = int(str(gcdval['GCDIssue'])) * 1000
|
||||
if gcdis == issis:
|
||||
issdate = str(gcdval['GCDDate'])
|
||||
if str(issis).isdigit():
|
||||
int_issnum = int( gcdis / 1000 )
|
||||
else:
|
||||
if 'au' in issis.lower():
|
||||
int_issnum = str(int(gcdis[:-2]) / 1000) + 'AU'
|
||||
else:
|
||||
logger.error("this has an alpha-numeric in the issue # which I cannot account for. Get on github and log the issue for evilhero.")
|
||||
elif 'au' in gcdval['GCDIssue'].lower():
|
||||
#account for Age of Ultron mucked up numbering - this is in format of 5AU.00
|
||||
gstau = gcdval['GCDIssue'].lower().find('au')
|
||||
gcdis_au = gcdval['GCDIssue'][:gstau]
|
||||
gcdis = str(int(gcdis_au) * 1000) + 'AU'
|
||||
elif '.' in str(gcdval['GCDIssue']):
|
||||
#logger.fdebug("g-issue:" + str(gcdval['GCDIssue']))
|
||||
issst = str(gcdval['GCDIssue']).find('.')
|
||||
#logger.fdebug("issst:" + str(issst))
|
||||
issb4dec = str(gcdval['GCDIssue'])[:issst]
|
||||
#logger.fdebug("issb4dec:" + str(issb4dec))
|
||||
#if the length of decimal is only 1 digit, assume it's a tenth
|
||||
decis = str(gcdval['GCDIssue'])[issst+1:]
|
||||
#logger.fdebug("decis:" + str(decis))
|
||||
if len(decis) == 1:
|
||||
decisval = int(decis) * 10
|
||||
issaftdec = str(decisval)
|
||||
if len(decis) == 2:
|
||||
decisval = int(decis)
|
||||
issaftdec = str(decisval)
|
||||
gcd_issue = issb4dec + "." + issaftdec
|
||||
#logger.fdebug("gcd_issue:" + str(gcd_issue))
|
||||
try:
|
||||
gcdis = (int(issb4dec) * 1000) + decisval
|
||||
except ValueError:
|
||||
logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot. This feature to allow these will be added in the near future.")
|
||||
updater.no_searchresults(comicid)
|
||||
return
|
||||
#get the latest issue / date using the date.
|
||||
if gcdval['GCDDate'] > latestdate:
|
||||
latestiss = str(issnum)
|
||||
latestdate = str(gcdval['GCDDate'])
|
||||
break
|
||||
#bb = iscnt
|
||||
bb+=1
|
||||
#print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
|
||||
#---END.NEW.
|
||||
else:
|
||||
gcdis = int(str(gcdval['GCDIssue'])) * 1000
|
||||
if gcdis == issis:
|
||||
issdate = str(gcdval['GCDDate'])
|
||||
if str(issis).isdigit():
|
||||
int_issnum = int( gcdis / 1000 )
|
||||
else:
|
||||
if 'au' in issis.lower():
|
||||
int_issnum = str(int(gcdis[:-2]) / 1000) + 'AU'
|
||||
else:
|
||||
logger.error("this has an alpha-numeric in the issue # which I cannot account for. Get on github and log the issue for evilhero.")
|
||||
return
|
||||
#get the latest issue / date using the date.
|
||||
if gcdval['GCDDate'] > latestdate:
|
||||
latestiss = str(issnum)
|
||||
latestdate = str(gcdval['GCDDate'])
|
||||
break
|
||||
#bb = iscnt
|
||||
bb+=1
|
||||
#print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
|
||||
#---END.NEW.
|
||||
|
||||
# check if the issue already exists
|
||||
iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()
|
||||
# check if the issue already exists
|
||||
iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()
|
||||
|
||||
# Only change the status & add DateAdded if the issue is already in the database
|
||||
if iss_exists is None:
|
||||
newValueDict['DateAdded'] = helpers.today()
|
||||
# Only change the status & add DateAdded if the issue is already in the database
|
||||
if iss_exists is None:
|
||||
newValueDict['DateAdded'] = helpers.today()
|
||||
|
||||
controlValueDict = {"IssueID": issid}
|
||||
newValueDict = {"ComicID": comicid,
|
||||
"ComicName": comic['ComicName'],
|
||||
"IssueName": issname,
|
||||
"Issue_Number": issnum,
|
||||
"IssueDate": issdate,
|
||||
"Int_IssueNumber": int_issnum
|
||||
}
|
||||
if mylar.AUTOWANT_ALL:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
else:
|
||||
newValueDict['Status'] = "Skipped"
|
||||
controlValueDict = {"IssueID": issid}
|
||||
newValueDict = {"ComicID": comicid,
|
||||
"ComicName": comic['ComicName'],
|
||||
"IssueName": issname,
|
||||
"Issue_Number": issnum,
|
||||
"IssueDate": issdate,
|
||||
"Int_IssueNumber": int_issnum
|
||||
}
|
||||
if mylar.AUTOWANT_ALL:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
else:
|
||||
newValueDict['Status'] = "Skipped"
|
||||
|
||||
if iss_exists:
|
||||
#print ("Existing status : " + str(iss_exists['Status']))
|
||||
newValueDict['Status'] = iss_exists['Status']
|
||||
if iss_exists:
|
||||
#print ("Existing status : " + str(iss_exists['Status']))
|
||||
newValueDict['Status'] = iss_exists['Status']
|
||||
|
||||
try:
|
||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||
except sqlite3.InterfaceError, e:
|
||||
#raise sqlite3.InterfaceError(e)
|
||||
logger.error("MAJOR error trying to get issue data, this is most likey a MULTI-VOLUME series and you need to use the custom_exceptions.csv file.")
|
||||
myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid])
|
||||
return
|
||||
n+=1
|
||||
try:
|
||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||
except sqlite3.InterfaceError, e:
|
||||
#raise sqlite3.InterfaceError(e)
|
||||
logger.error("MAJOR error trying to get issue data, this is most likey a MULTI-VOLUME series and you need to use the custom_exceptions.csv file.")
|
||||
myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid])
|
||||
return
|
||||
n+=1
|
||||
|
||||
# logger.debug(u"Updating comic cache for " + comic['ComicName'])
|
||||
# cache.getThumb(ComicID=issue['issueid'])
|
||||
|
||||
# logger.debug(u"Updating cache for: " + comic['ComicName'])
|
||||
# cache.getThumb(ComicIDcomicid)
|
||||
else:
|
||||
if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part
|
||||
while (n <= iscnt):
|
||||
#---NEW.code
|
||||
try:
|
||||
firstval = issued['issuechoice'][n]
|
||||
except IndexError:
|
||||
break
|
||||
cleanname = helpers.cleanName(firstval['Issue_Name'])
|
||||
issid = str(firstval['Issue_ID'])
|
||||
issnum = str(firstval['Issue_Number'])
|
||||
#print ("issnum: " + str(issnum))
|
||||
issname = cleanname
|
||||
issdate = str(firstval['Issue_Date'])
|
||||
if str(issnum).isdigit():
|
||||
int_issnum = int( issnum )
|
||||
else:
|
||||
if 'au' in issnum.lower():
|
||||
int_issnum = str(int(issnum[:-2])) + 'AU'
|
||||
elif '.' in str(issnum):
|
||||
issst = str(issnum).find('.')
|
||||
#logger.fdebug("issst:" + str(issst))
|
||||
issb4dec = str(issnum)[:issst]
|
||||
#logger.fdebug("issb4dec:" + str(issb4dec))
|
||||
#if the length of decimal is only 1 digit, assume it's a tenth
|
||||
decis = str(issnum)[issst+1:]
|
||||
#logger.fdebug("decis:" + str(decis))
|
||||
if len(decis) == 1:
|
||||
decisval = int(decis) * 10
|
||||
issaftdec = str(decisval)
|
||||
if len(decis) == 2:
|
||||
decisval = int(decis)
|
||||
issaftdec = str(decisval)
|
||||
try:
|
||||
int_issnum = str(issnum)
|
||||
except ValueError:
|
||||
logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot.")
|
||||
updater.no_searchresults(comicid)
|
||||
return
|
||||
else:
|
||||
logger.error(str(issnum) + "this has an alpha-numeric in the issue # which I cannot account for.")
|
||||
return
|
||||
#get the latest issue / date using the date.
|
||||
if firstval['Issue_Date'] > latestdate:
|
||||
latestiss = str(issnum)
|
||||
latestdate = str(firstval['Issue_Date'])
|
||||
if firstval['Issue_Date'] < firstdate:
|
||||
firstiss = str(issnum)
|
||||
firstdate = str(firstval['Issue_Date'])
|
||||
# check if the issue already exists
|
||||
iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()
|
||||
|
||||
# Only change the status & add DateAdded if the issue is already in the database
|
||||
if iss_exists is None:
|
||||
newValueDict['DateAdded'] = helpers.today()
|
||||
|
||||
controlValueDict = {"IssueID": issid}
|
||||
newValueDict = {"ComicID": comicid,
|
||||
"ComicName": comic['ComicName'],
|
||||
"IssueName": issname,
|
||||
"Issue_Number": issnum,
|
||||
"IssueDate": issdate,
|
||||
"Int_IssueNumber": int_issnum
|
||||
}
|
||||
if mylar.AUTOWANT_ALL:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
|
||||
newValueDict['Status'] = "Wanted"
|
||||
else:
|
||||
newValueDict['Status'] = "Skipped"
|
||||
if iss_exists:
|
||||
#print ("Existing status : " + str(iss_exists['Status']))
|
||||
newValueDict['Status'] = iss_exists['Status']
|
||||
|
||||
try:
|
||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||
except sqlite3.InterfaceError, e:
|
||||
#raise sqlite3.InterfaceError(e)
|
||||
logger.error("Something went wrong - I can't add the issue information into my DB.")
|
||||
myDB.action("DELETE FROM comics WHERE ComicID=?", [comicid])
|
||||
return
|
||||
n+=1
|
||||
|
||||
#figure publish dates here...
|
||||
styear = str(SeriesYear)
|
||||
#if SeriesYear == '0000':
|
||||
# styear = firstdate[:4]
|
||||
if firstdate[5:7] == '00':
|
||||
stmonth = "?"
|
||||
else:
|
||||
stmonth = helpers.fullmonth(firstdate[5:7])
|
||||
ltyear = re.sub('/s','', latestdate[:4])
|
||||
if latestdate[5:7] == '00':
|
||||
ltmonth = "?"
|
||||
else:
|
||||
ltmonth = helpers.fullmonth(latestdate[5:7])
|
||||
|
||||
#try to determine if it's an 'actively' published comic from above dates
|
||||
#threshold is if it's within a month (<45 days) let's assume it's recent.
|
||||
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
|
||||
n_date = datetime.date.today()
|
||||
recentchk = (n_date - c_date).days
|
||||
#print ("recentchk: " + str(recentchk))
|
||||
if recentchk <= 45:
|
||||
lastpubdate = 'Present'
|
||||
else:
|
||||
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
|
||||
|
||||
publishfigure = str(stmonth) + ' ' + str(styear) + ' - ' + str(lastpubdate)
|
||||
|
||||
controlValueStat = {"ComicID": comicid}
|
||||
|
||||
newValueStat = {"Status": "Active",
|
||||
"LatestIssue": latestiss,
|
||||
"LatestDate": latestdate,
|
||||
"ComicPublished": publishfigure,
|
||||
"LastUpdated": helpers.now()
|
||||
}
|
||||
|
||||
|
@ -475,7 +595,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
if pullupd is None:
|
||||
# lets' check the pullist for anything at this time as well since we're here.
|
||||
# do this for only Present comics....
|
||||
if mylar.AUTOWANT_UPCOMING and 'Present' in gcdinfo['resultPublished']:
|
||||
if mylar.AUTOWANT_UPCOMING: #and 'Present' in gcdinfo['resultPublished']:
|
||||
logger.info(u"Checking this week's pullist for new issues of " + comic['ComicName'])
|
||||
updater.newpullcheck(comic['ComicName'], comicid)
|
||||
|
||||
|
|
|
@ -386,6 +386,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
|||
request.add_header('User-Agent', str(mylar.USER_AGENT))
|
||||
opener = urllib2.build_opener()
|
||||
|
||||
#set a delay between searches here. Default is for 30 seconds...
|
||||
if mylar.SEARCH_DELAY == 'None' or mylar.SEARCH_DELAY is None:
|
||||
pause_the_search = 1 * 60 # (it's in seconds)
|
||||
elif str(mylar.SEARCH_DELAY).isdigit():
|
||||
pause_the_search = mylar.SEARCH_DELAY * 60
|
||||
else:
|
||||
logger.info("Check Search Delay - invalid numerical given. Force-setting to 1 minute.")
|
||||
pause_the_search = 1 * 60
|
||||
time.sleep(pause_the_search)
|
||||
|
||||
try:
|
||||
data = opener.open(request).read()
|
||||
except Exception, e:
|
||||
|
@ -608,7 +618,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
|||
logger.fdebug("watch comicversion is " + str(ComicVersion))
|
||||
fndcomicversion = str(splitit[n])
|
||||
logger.fdebug("version found: " + str(fndcomicversion))
|
||||
if ComicVersion is not None:
|
||||
if ComicVersion is not "None":
|
||||
F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion)
|
||||
D_ComicVersion = re.sub("[^0-9]", "", ComicVersion)
|
||||
if int(F_ComicVersion) == int(D_ComicVersion):
|
||||
|
|
|
@ -36,16 +36,39 @@ def dbUpdate():
|
|||
|
||||
comicid = comic[0]
|
||||
mismatch = "no"
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid is None: pass
|
||||
if not mylar.CV_ONLY or comicid[:1] == "G":
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid is None: pass
|
||||
else:
|
||||
if CV_EXcomicid['variloop'] == '99':
|
||||
mismatch = "yes"
|
||||
if comicid[:1] == "G":
|
||||
mylar.importer.GCDimport(comicid)
|
||||
else:
|
||||
mylar.importer.addComictoDB(comicid,mismatch)
|
||||
else:
|
||||
if CV_EXcomicid['variloop'] == '99':
|
||||
mismatch = "yes"
|
||||
if comicid[:1] == "G":
|
||||
mylar.importer.GCDimport(comicid)
|
||||
else:
|
||||
mylar.importer.addComictoDB(comicid,mismatch)
|
||||
|
||||
if mylar.CV_ONETIMER == 1:
|
||||
#in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clean refresh.
|
||||
issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid])
|
||||
#store the issues' status for a given comicid, after deleting and readding, flip the status back to what it is currently.
|
||||
myDB.select('DELETE FROM issues WHERE ComicID=?', [comicid])
|
||||
mylar.importer.addComictoDB(comicid,mismatch)
|
||||
issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid])
|
||||
icount = 0
|
||||
for issue in issues:
|
||||
for issuenew in issues_new:
|
||||
if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']:
|
||||
#change the status to the previous status
|
||||
ctrlVAL = {'IssueID': issue['IssueID']}
|
||||
newVAL = {'Status': issue['Status']}
|
||||
myDB.upsert("Issues", newVAL, ctrlVAL)
|
||||
icount+=1
|
||||
break
|
||||
logger.info("changed the status of " + str(icount) + " issues.")
|
||||
mylar.CV_ONETIMER = 0
|
||||
else:
|
||||
mylar.importer.addComictoDB(comicid,mismatch)
|
||||
time.sleep(5) #pause for 5 secs so dont hammer CV and get 500 error
|
||||
logger.info('Update complete')
|
||||
|
||||
|
||||
|
@ -362,7 +385,7 @@ def forceRescan(ComicID,archive=None):
|
|||
# fcdigit = fcnew[som].lstrip('0')
|
||||
#fcdigit = str(int(fcnew[som]))
|
||||
fcdigit = int(fcnew[som]) * 1000
|
||||
if 'au' in fcnew[som+1].lower():
|
||||
if som+1 < len(fcnew) and 'au' in fcnew[som+1].lower():
|
||||
#print ("AU detected")
|
||||
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
|
||||
fnd_iss_except = 'AU'
|
||||
|
@ -406,6 +429,12 @@ def forceRescan(ComicID,archive=None):
|
|||
#logger.fdebug("decval: " + str(isschk_decval))
|
||||
#logger.fdebug("intdec: " + str(intdec))
|
||||
#logger.fdebug("let's compare with this issue value: " + str(fcdigit))
|
||||
elif 'au' in fcnew[som].lower():
|
||||
austart = fcnew[som].lower().find('au')
|
||||
if fcnew[som][:austart].isdigit():
|
||||
fcdigit = int(fcnew[som][:austart]) * 1000
|
||||
fnd_iss_except = 'AU'
|
||||
#if AU is part of issue (5AU instead of 5 AU)
|
||||
else:
|
||||
# it's a word, skip it.
|
||||
fcdigit = 19283838380101193
|
||||
|
@ -429,7 +458,10 @@ def forceRescan(ComicID,archive=None):
|
|||
#if issyear in fcnew[som+1]:
|
||||
# print "matched on year:" + str(issyear)
|
||||
#issuedupechk here.
|
||||
if int(fcdigit) in issuedupechk and fnd_iss_except.lower() == iss_except.lower():
|
||||
#print ("fcdigit:" + str(fcdigit))
|
||||
#print ("findiss_except:" + str(fnd_iss_except) + " = iss_except:" + str(iss_except))
|
||||
|
||||
if int(fcdigit) in issuedupechk and str(fnd_iss_except) == str(iss_except):
|
||||
logger.fdebug("duplicate issue detected - not counting this: " + str(tmpfc['ComicFilename']))
|
||||
issuedupe = "yes"
|
||||
break
|
||||
|
|
|
@ -194,73 +194,74 @@ class WebInterface(object):
|
|||
#print ("comicyear: " + str(comicyear))
|
||||
#print ("comicissues: " + str(comicissues))
|
||||
#print ("comicimage: " + str(comicimage))
|
||||
if not mylar.CV_ONLY:
|
||||
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid is None: # pass #
|
||||
gcdinfo=parseit.GCDScraper(comicname, comicyear, comicissues, comicid, quickmatch="yes")
|
||||
if gcdinfo == "No Match":
|
||||
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
|
||||
if CV_EXcomicid is None: # pass #
|
||||
gcdinfo=parseit.GCDScraper(comicname, comicyear, comicissues, comicid, quickmatch="yes")
|
||||
if gcdinfo == "No Match":
|
||||
#when it no matches, the image will always be blank...let's fix it.
|
||||
cvdata = mylar.cv.getComic(comicid,'comic')
|
||||
comicimage = cvdata['ComicImage']
|
||||
updater.no_searchresults(comicid)
|
||||
nomatch = "true"
|
||||
u_comicname = comicname.encode('utf-8').strip()
|
||||
logger.info("I couldn't find an exact match for " + u_comicname + " (" + str(comicyear) + ") - gathering data for Error-Checking screen (this could take a minute)..." )
|
||||
i = 0
|
||||
loopie, cnt = parseit.ComChk(comicname, comicyear, comicpublisher, comicissues, comicid)
|
||||
logger.info("total count : " + str(cnt))
|
||||
while (i < cnt):
|
||||
try:
|
||||
stoopie = loopie['comchkchoice'][i]
|
||||
except (IndexError, TypeError):
|
||||
break
|
||||
cresults.append({
|
||||
'ComicID' : stoopie['ComicID'],
|
||||
'ComicName' : stoopie['ComicName'].decode('utf-8', 'replace'),
|
||||
'ComicYear' : stoopie['ComicYear'],
|
||||
'ComicIssues' : stoopie['ComicIssues'],
|
||||
'ComicURL' : stoopie['ComicURL'],
|
||||
'ComicPublisher' : stoopie['ComicPublisher'].decode('utf-8', 'replace'),
|
||||
'GCDID' : stoopie['GCDID']
|
||||
})
|
||||
i+=1
|
||||
if imported != 'None':
|
||||
cvdata = mylar.cv.getComic(comicid,'comic')
|
||||
comicimage = cvdata['ComicImage']
|
||||
updater.no_searchresults(comicid)
|
||||
nomatch = "true"
|
||||
u_comicname = comicname.encode('utf-8').strip()
|
||||
logger.info("I couldn't find an exact match for " + u_comicname + " (" + str(comicyear) + ") - gathering data for Error-Checking screen (this could take a minute)..." )
|
||||
i = 0
|
||||
loopie, cnt = parseit.ComChk(comicname, comicyear, comicpublisher, comicissues, comicid)
|
||||
logger.info("total count : " + str(cnt))
|
||||
while (i < cnt):
|
||||
try:
|
||||
stoopie = loopie['comchkchoice'][i]
|
||||
except (IndexError, TypeError):
|
||||
break
|
||||
cresults.append({
|
||||
'ComicID' : stoopie['ComicID'],
|
||||
'ComicName' : stoopie['ComicName'].decode('utf-8', 'replace'),
|
||||
'ComicYear' : stoopie['ComicYear'],
|
||||
'ComicIssues' : stoopie['ComicIssues'],
|
||||
'ComicURL' : stoopie['ComicURL'],
|
||||
'ComicPublisher' : stoopie['ComicPublisher'].decode('utf-8', 'replace'),
|
||||
'GCDID' : stoopie['GCDID']
|
||||
})
|
||||
i+=1
|
||||
if imported != 'None':
|
||||
#if it's from an import and it has to go through the UEC, return the values
|
||||
#to the calling function and have that return the template
|
||||
return cresults
|
||||
return cresults
|
||||
else:
|
||||
return serve_template(templatename="searchfix.html", title="Error Check", comicname=comicname, comicid=comicid, comicyear=comicyear, comicimage=comicimage, comicissues=comicissues, cresults=cresults,imported=None,ogcname=None)
|
||||
else:
|
||||
return serve_template(templatename="searchfix.html", title="Error Check", comicname=comicname, comicid=comicid, comicyear=comicyear, comicimage=comicimage, comicissues=comicissues, cresults=cresults,imported=None,ogcname=None)
|
||||
nomatch = "false"
|
||||
logger.info(u"Quick match success..continuing.")
|
||||
else:
|
||||
nomatch = "false"
|
||||
logger.info(u"Quick match success..continuing.")
|
||||
else:
|
||||
if CV_EXcomicid['variloop'] == '99':
|
||||
logger.info(u"mismatched name...autocorrecting to correct GID and auto-adding.")
|
||||
mismatch = "yes"
|
||||
if CV_EXcomicid['NewComicID'] == 'none':
|
||||
logger.info(u"multi-volume series detected")
|
||||
testspx = CV_EXcomicid['GComicID'].split('/')
|
||||
for exc in testspx:
|
||||
fakeit = parseit.GCDAdd(testspx)
|
||||
howmany = int(CV_EXcomicid['variloop'])
|
||||
t = 0
|
||||
while (t <= howmany):
|
||||
try:
|
||||
sres = fakeit['serieschoice'][t]
|
||||
except IndexError:
|
||||
break
|
||||
sresults.append({
|
||||
'ComicID' : sres['ComicID'],
|
||||
'ComicName' : sres['ComicName'],
|
||||
'ComicYear' : sres['ComicYear'],
|
||||
'ComicIssues' : sres['ComicIssues'],
|
||||
'ComicPublisher' : sres['ComicPublisher'],
|
||||
'ComicCover' : sres['ComicCover']
|
||||
})
|
||||
t+=1
|
||||
#searchfix(-1).html is for misnamed comics and wrong years.
|
||||
#searchfix-2.html is for comics that span multiple volumes.
|
||||
return serve_template(templatename="searchfix-2.html", title="In-Depth Results", sresults=sresults)
|
||||
if CV_EXcomicid['variloop'] == '99':
|
||||
logger.info(u"mismatched name...autocorrecting to correct GID and auto-adding.")
|
||||
mismatch = "yes"
|
||||
if CV_EXcomicid['NewComicID'] == 'none':
|
||||
logger.info(u"multi-volume series detected")
|
||||
testspx = CV_EXcomicid['GComicID'].split('/')
|
||||
for exc in testspx:
|
||||
fakeit = parseit.GCDAdd(testspx)
|
||||
howmany = int(CV_EXcomicid['variloop'])
|
||||
t = 0
|
||||
while (t <= howmany):
|
||||
try:
|
||||
sres = fakeit['serieschoice'][t]
|
||||
except IndexError:
|
||||
break
|
||||
sresults.append({
|
||||
'ComicID' : sres['ComicID'],
|
||||
'ComicName' : sres['ComicName'],
|
||||
'ComicYear' : sres['ComicYear'],
|
||||
'ComicIssues' : sres['ComicIssues'],
|
||||
'ComicPublisher' : sres['ComicPublisher'],
|
||||
'ComicCover' : sres['ComicCover']
|
||||
})
|
||||
t+=1
|
||||
#searchfix(-1).html is for misnamed comics and wrong years.
|
||||
#searchfix-2.html is for comics that span multiple volumes.
|
||||
return serve_template(templatename="searchfix-2.html", title="In-Depth Results", sresults=sresults)
|
||||
#print ("imported is: " + str(imported))
|
||||
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch,None,imported,ogcname]).start()
|
||||
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
|
||||
|
@ -637,6 +638,7 @@ class WebInterface(object):
|
|||
logger.info("Not renaming " + str(filename) + " as it is in desired format already.")
|
||||
#continue
|
||||
logger.info("I have renamed " + str(filefind) + " issues of " + comicname)
|
||||
updater.forceRescan(comicid)
|
||||
manualRename.exposed = True
|
||||
|
||||
def searchScan(self, name):
|
||||
|
@ -1192,6 +1194,7 @@ class WebInterface(object):
|
|||
"nzb_search_interval" : mylar.SEARCH_INTERVAL,
|
||||
"nzb_startup_search" : helpers.checked(mylar.NZB_STARTUP_SEARCH),
|
||||
"libraryscan_interval" : mylar.LIBRARYSCAN_INTERVAL,
|
||||
"search_delay" : mylar.SEARCH_DELAY,
|
||||
"use_sabnzbd" : helpers.checked(mylar.USE_SABNZBD),
|
||||
"sab_host" : mylar.SAB_HOST,
|
||||
"sab_user" : mylar.SAB_USERNAME,
|
||||
|
@ -1349,7 +1352,7 @@ class WebInterface(object):
|
|||
else:
|
||||
newValues['UseFuzzy'] = str(fuzzy_year)
|
||||
|
||||
if comic_version is None:
|
||||
if comic_version is None or comic_version == 'None':
|
||||
newValues['ComicVersion'] = "None"
|
||||
else:
|
||||
if comic_version[1:].isdigit() and comic_version[:1].lower() == 'v':
|
||||
|
@ -1379,7 +1382,7 @@ class WebInterface(object):
|
|||
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
|
||||
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
|
||||
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0,
|
||||
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0,
|
||||
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None,
|
||||
destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
|
||||
mylar.HTTP_HOST = http_host
|
||||
mylar.HTTP_PORT = http_port
|
||||
|
@ -1391,6 +1394,7 @@ class WebInterface(object):
|
|||
mylar.SEARCH_INTERVAL = nzb_search_interval
|
||||
mylar.NZB_STARTUP_SEARCH = nzb_startup_search
|
||||
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
|
||||
mylar.SEARCH_DELAY = search_delay
|
||||
mylar.USE_SABNZBD = use_sabnzbd
|
||||
mylar.SAB_HOST = sab_host
|
||||
mylar.SAB_USERNAME = sab_username
|
||||
|
@ -1482,6 +1486,10 @@ class WebInterface(object):
|
|||
logger.info("Search interval too low. Resetting to 6 hour minimum")
|
||||
mylar.SEARCH_INTERVAL = 360
|
||||
|
||||
if mylar.SEARCH_DELAY < 1:
|
||||
logger.info("Minimum search delay set for 1 minute to avoid hammering.")
|
||||
mylar.SEARCH_DELAY = 1
|
||||
|
||||
# Write the config
|
||||
mylar.config_write()
|
||||
|
||||
|
|
|
@ -410,10 +410,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
|
|||
#print ("----------THIS WEEK'S PUBLISHED COMICS------------")
|
||||
if w > 0:
|
||||
while (cnt > -1):
|
||||
lines[cnt] = str(lines[cnt]).upper()
|
||||
lines[cnt] = lines[cnt].upper()
|
||||
#llen[cnt] = str(llen[cnt])
|
||||
#logger.fdebug("looking for : " + str(lines[cnt]))
|
||||
sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', str(lines[cnt]))
|
||||
sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', lines[cnt])
|
||||
sqlsearch = re.sub(r'\s', '%', sqlsearch)
|
||||
if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch)
|
||||
if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
|
||||
|
@ -442,23 +442,23 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
|
|||
|
||||
#-NEW-
|
||||
# strip out all special characters and compare
|
||||
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', str(unlines[cnt]))
|
||||
comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', str(comicnm))
|
||||
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', unlines[cnt])
|
||||
comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', comicnm)
|
||||
watchcomic = re.sub(r'\s', '', watchcomic)
|
||||
comicnm = re.sub(r'\s', '', comicnm)
|
||||
#logger.fdebug("Revised_Watch: " + str(watchcomic))
|
||||
#logger.fdebug("ComicNM: " + str(comicnm))
|
||||
if 'THE' in str(watchcomic).upper():
|
||||
#logger.fdebug("Revised_Watch: " + watchcomic)
|
||||
#logger.fdebug("ComicNM: " + comicnm)
|
||||
if 'THE' in watchcomic.upper():
|
||||
modwatchcomic = re.sub('THE', '', watchcomic.upper())
|
||||
modcomicnm = re.sub('THE', '', comicnm)
|
||||
else:
|
||||
modwatchcomic = watchcomic
|
||||
modcomicnm = comicnm
|
||||
#thnx to A+X for this...
|
||||
if '+' in str(watchcomic):
|
||||
if 'plus' in str(comicnm).lower():
|
||||
if '+' in watchcomic:
|
||||
if 'plus' in comicnm.lower():
|
||||
modcomicnm = re.sub('plus', '+', comicnm)
|
||||
if str(comicnm) == str(watchcomic).upper() or str(modcomicnm) == str(modwatchcomic).upper():
|
||||
if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
|
||||
#logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper())
|
||||
pass
|
||||
elif ("ANNUAL" in week['EXTRA']):
|
||||
|
@ -483,10 +483,13 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
|
|||
watchfnd.append(comicnm)
|
||||
watchfndiss.append(week['ISSUE'])
|
||||
ComicID = comicid[cnt]
|
||||
ComicIssue = str(watchfndiss[tot -1] + ".00")
|
||||
if not mylar.CV_ONLY:
|
||||
ComicIssue = str(watchfndiss[tot -1] + ".00")
|
||||
else:
|
||||
ComicIssue = str(watchfndiss[tot -1])
|
||||
ComicDate = str(week['SHIPDATE'])
|
||||
ComicName = str(unlines[cnt])
|
||||
logger.fdebug("Watchlist hit for : " + str(ComicName) + " ISSUE: " + str(watchfndiss[tot -1]))
|
||||
logger.fdebug("Watchlist hit for : " + ComicName + " ISSUE: " + str(watchfndiss[tot -1]))
|
||||
# here we add to comics.latest
|
||||
updater.latest_update(ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate)
|
||||
# here we add to upcoming table...
|
||||
|
|
Loading…
Add table
Reference in a new issue